diff --git a/.gitignore b/.gitignore index 2ebf3424..e9d48d58 100644 --- a/.gitignore +++ b/.gitignore @@ -88,4 +88,7 @@ ciri/ciri.db* .vscode/ # Documentation -docs/ +docs/html/ + +# Python +__pycache__/ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..cf4da27a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,96 @@ +# Contributing to Tangle-accelerator + +When contributing to this repository, please first discuss the change you wish to make via issue, +email, or any other method with the owners of this repository before making a change. + +Please note we have a code of conduct, please follow it in all your interactions with the project. + +## Pull Request Process + +1. Ensure any install or build dependencies are removed before the end of the layer when doing a + build. +2. Create a new branch for fixing or developing the changes. +3. Run test with `bazel test //tests/...`, after finishing the changes. +4. Update the README.md with details of changes to the interface, which includes new environment + variables, exposed ports, useful file locations and container parameters. +5. Run `hooks/formatter` before committing the changes. +6. Rebase to the latest `develop` branch. + +## Git Commit Message Guidelines +Read this [blog article](https://chris.beams.io/posts/git-commit/) and [this article](https://www.conventionalcommits.org/en/v1.0.0-beta.2/) and follow the instructions in these articles. +The subject line of git commit message should follow this pattern +`[optional scope]: ` +The `type` includes the following 5 words depending on the content of the commit. + +* feat +* fix +* refactor +* test +* doc + +## Code of Conduct + +### Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +nationality, personal appearance, race, religion, or sexual identity and +orientation. + +### Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +### Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +### Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +### Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at [INSERT EMAIL ADDRESS]. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. diff --git a/Doxyfile b/Doxyfile index a33168f3..6de4c82d 100644 --- a/Doxyfile +++ b/Doxyfile @@ -5,11 +5,15 @@ OUTPUT_DIRECTORY = docs/ OPTIMIZE_OUTPUT_FOR_C = YES INPUT = . \ accelerator \ - request \ - response \ - serializer \ + accelerator/core \ + accelerator/core/request \ + accelerator/core/response \ + accelerator/core/serializer \ utils \ - connectivity/mqtt + common \ + storage \ + connectivity/mqtt \ + connectivity/http FILE_PATTERNS = *.h \ *.md EXAMPLE_PATH = tests diff --git a/Makefile b/Makefile index 3ae28f75..8d0caf86 100644 --- a/Makefile +++ b/Makefile @@ -23,7 +23,7 @@ $(MOSQITTO_LIB): $(MOSQITTO_DIR) clean: $(MAKE) -C $(DCURL_DIR) clean - $(MAKE) -C $(MOSQITTO_LIB) clean + $(MAKE) -C $(MOSQITTO_DIR) clean distclean: clean $(RM) -r $(DCURL_DIR) diff --git a/README.md b/README.md index 866ac467..a8f3ba5b 100644 --- a/README.md +++ b/README.md @@ -84,99 +84,30 @@ Tangle-accelerator is built and launched through Bazel, it also requires Redis t * [Bazel](https://docs.bazel.build/versions/master/install.html) * [Redis-server](https://redis.io/topics/quickstart) +* cmake (required by dcurl) +* openssl-dev (required by mosquitto) ## Build from Source -Before running tangle-accelerator, please edit binding address/port of accelerator instance, IRI, and redis server in `accelerator/config.h` unless they are all localhost and/or you don't want to provide external connection. With dependency of [entangled](https://github.com/iotaledger/entangled), IRI address doesn't support https at the moment. Here are some configurations you might need to change: +Before running tangle-accelerator, please edit binding address/port of accelerator instance, IRI, and redis server in `accelerator/config.h` unless they are all localhost and/or you don't want to provide external connection. With dependency of [entangled](https://github.com/iotaledger/entangled), IRI address doesn't support https at the moment. Here are some configurations and command you might need to change and use: * `TA_HOST`: binding address of accelerator instance * `TA_PORT`: port of accelerator instance * `IRI_HOST`: binding address of IRI * `IRI_PORT`: port of IRI +* `quiet`: Turn off logging message ``` $ make && bazel run //accelerator ``` +### Building Options +Tangle-accelerator supports several different build time options. -### Optional: Build Docker Images +* Docker images +* MQTT connectivity +* External database -If you prefer building a docker image, tangle-accelerator also provides build rules for it. Note that you still have to edit configurations in `accelerator/config.h`. - -``` -$ make && bazel run //accelerator:ta_image -``` - -There's also an easier option to pull image from docker hub then simply run with default configs. Please do remember a redis-server is still required in this way. - -``` -$ docker run -d --net=host --name tangle-accelerator dltcollab/tangle-accelerator -``` - -### Optional: Build and Push Docker Image to Docker Hub - -Before pushing the docker image to Docker Hub, you need to log in the docker registry: - -``` -$ docker login -``` - -Then you could push the docker image with the following command: - -``` -$ make && bazel run //accelerator:push_docker -``` - -If you get the following error message: - -``` -SyntaxError: invalid syntax ----------------- -Note: The failure of target @containerregistry//:digester (with exit code 1) may have been caused by the fact that it is running under Python 3 instead of Python 2. Examine the error to determine if that appears to be the problem. Since this target is built in the host configuration, the only way to change its version is to set --host_force_python=PY2, which affects the entire build. - -If this error started occurring in Bazel 0.27 and later, it may be because the Python toolchain now enforces that targets analyzed as PY2 and PY3 run under a Python 2 and Python 3 interpreter, respectively. See https://github.com/bazelbuild/bazel/issues/7899 for more information. ------------- -``` - -Use the `--host_force_python=PY2` parameter to force the Bazel to use the Python2 in entire build. - -``` -$ make && bazel run //accelerator:push_docker --host_force_python=PY2 -``` - -### Optional: Enable MQTT connectivity -MQTT connectivity is an optional feature allowing IoT endpoint devices to collaborate with `Tangle-Accelerator`. - -``` -make MQTT && bazel run //accelerator:accelerator_mqtt -``` - -Note you may need to set up the `MQTT_HOST` and `TOPIC_ROOT` in `config.h` to connect to a MQTT broker. -For more information for MQTT connectivity of `tangle-accelerator`, you could read `connectivity/mqtt/usage.md`. - -### Optional: Enable external database for transaction reattachment -Transaction reattachment is an optional feature. - -You can enable it in the build time by adding option : `--define db=enable` - -Transaction reattachment relies on ScyllDB, you need to install the dependency by following commands. - -For Ubuntu Linux 16.04/x86_64: - -``` -wget https://downloads.datastax.com/cpp-driver/ubuntu/16.04/cassandra/v2.14.1/cassandra-cpp-driver_2.14.1-1_amd64.deb -wget https://downloads.datastax.com/cpp-driver/ubuntu/16.04/cassandra/v2.14.1/cassandra-cpp-driver-dev_2.14.1-1_amd64.deb -sudo dpkg -i cassandra-cpp-driver_2.14.1-1_amd64.deb -sudo dpkg -i cassandra-cpp-driver-dev_2.14.1-1_amd64.deb -``` - -For Ubuntu Linux 18.04/x86_64: - -``` -wget https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.14.1/cassandra-cpp-driver_2.14.1-1_amd64.deb -wget https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.14.1/cassandra-cpp-driver-dev_2.14.1-1_amd64.deb -sudo dpkg -i cassandra-cpp-driver_2.14.1-1_amd64.deb -sudo dpkg -i cassandra-cpp-driver-dev_2.14.1-1_amd64.deb -``` +See [docs/build.md](https://github.com/DLTcollab/tangle-accelerator/docs/build.md) for more information. ## Developing diff --git a/WORKSPACE b/WORKSPACE index 11f457e7..75dbd19b 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -3,26 +3,20 @@ load("//third_party:third_party.bzl", "third_party_deps") git_repository( name = "rules_iota", - commit = "73f598ad1ce3ba79ff22d747f723d6d5cbf351e1", + commit = "e08b0038f376d6c82b80f5283bb0a86648bb58dc", remote = "https://github.com/iotaledger/rules_iota.git", ) git_repository( name = "iota_toolchains", - commit = "0f93f76fdff9f091dd3a99aa3f19b5d3f4f7f7fc", + commit = "700904f445d15ef948d112bf0bccf7dd3814ae5c", remote = "https://github.com/iotaledger/toolchains.git", ) git_repository( name = "entangled", - remote = "https://github.com/iotaledger/entangled.git", - tag = "cclient-v1.0.0-beta", -) - -git_repository( - name = "served", - commit = "757172e1d8aa7e273e800ce8ec91974c90a2a8b1", - remote = "https://github.com/meltwater/served.git", + commit = "fe3929b8ac6e7377eed82b83aad96369b42d0641", + remote = "https://github.com/DLTcollab/entangled", ) git_repository( diff --git a/accelerator/BUILD b/accelerator/BUILD index ed354d95..63ee420f 100644 --- a/accelerator/BUILD +++ b/accelerator/BUILD @@ -1,66 +1,55 @@ -package(default_visibility = ["//visibility:public"]) - load("@io_bazel_rules_docker//cc:image.bzl", "cc_image") load("@io_bazel_rules_docker//container:container.bzl", "container_push") cc_binary( name = "accelerator", srcs = select({ - "//connectivity/mqtt:mqtt_enable": [ - "conn_mqtt.c", + ":mqtt_enable": [ + "mqtt_main.c", ], "//conditions:default": ["main.c"], }), - copts = [ - "-DLOGGER_ENABLE", - ] + select({ - ":DEBUG_MODE": ["-g"], - ":PROFILING_MODE": [ - "-DNDEBUG", + copts = select({ + ":debug_mode": ["-g"], + ":profile_mode": [ "-pg", ], - "//conditions:default": ["-DNDEBUG"], - }) + select({ - "//connectivity/mqtt:mqtt_enable": [ - "-DMQTT_ENABLE", - ], - "//conditions:default": [], - }) + select({ - "//storage:db_enable": ["-DDB_ENABLE"], "//conditions:default": [], }), deps = [ - ":ta_errors", ":ta_config", - ":http", + "//connectivity/http", "@entangled//utils/handles:signal", - ":apis", - ":proxy_apis", ] + select({ - "//connectivity/mqtt:mqtt_enable": [ - "//connectivity/mqtt:mqtt_utils", + ":mqtt_enable": [ + "//connectivity/mqtt", ], "//conditions:default": [], }), ) -cc_binary( - name = "accelerator_microhttpd", - srcs = ["main.c"], - copts = ["-DLOGGER_ENABLE"] + select({ - ":DEBUG_MODE": ["-g"], - ":PROFILING_MODE": [ - "-DNDEBUG", - "-pg", - ], - "//conditions:default": ["-DNDEBUG"], - }), +cc_library( + name = "ta_config", + srcs = ["config.c"], + hdrs = ["config.h"], + visibility = ["//visibility:public"], deps = [ - ":http", - ":ta_config", - ":ta_errors", - "@entangled//utils/handles:signal", - ], + ":cli_info", + "//accelerator/core:pow", + "//common", + ":build_option", + "//utils/cache", + "@entangled//cclient/api", + "@yaml", + ] + select({ + ":db_enable": ["//storage"], + "//conditions:default": [], + }), +) + +cc_library( + name = "cli_info", + srcs = ["cli_info.h"], ) cc_image( @@ -78,123 +67,40 @@ container_push( ) cc_library( - name = "apis", - srcs = ["apis.c"], - hdrs = ["apis.h"], - linkopts = ["-lpthread"], - visibility = ["//visibility:public"], - deps = [ - ":common_core", - ":ta_errors", - "//map:mode", - "//serializer", - "@entangled//common/model:bundle", - "@entangled//common/trinary:trit_tryte", - "@entangled//mam/api", - ], -) - -cc_library( - name = "proxy_apis", - srcs = ["proxy_apis.c"], - hdrs = ["proxy_apis.h"], - visibility = ["//visibility:public"], - deps = [ - ":ta_errors", - "//serializer", - "//utils:hash_algo_djb2", - "//utils:ta_logger", - "@entangled//cclient/api", - "@entangled//cclient/request:requests", - "@entangled//cclient/response:responses", - ], -) - -cc_library( - name = "http", - srcs = ["http.c"], - hdrs = ["http.h"], - visibility = ["//visibility:public"], - deps = [ - ":apis", - ":proxy_apis", - ":ta_config", - ":ta_errors", - "@libmicrohttpd", - ], -) - -cc_library( - name = "common_core", - srcs = ["common_core.c"], - hdrs = ["common_core.h"], - visibility = ["//visibility:public"], - deps = [ - ":ta_config", - ":ta_errors", - "//request", - "//response", - "//utils:bundle_array", - "//utils:timer", - "@com_github_uthash//:uthash", - "@entangled//cclient/api", - "@entangled//cclient/serialization:serializer", - "@entangled//cclient/serialization:serializer_json", - "@entangled//common/model:bundle", - "@entangled//utils:time", - ], -) - -cc_library( - name = "ta_config", - srcs = ["config.c"], - hdrs = ["config.h"], - copts = select({ - "//connectivity/mqtt:mqtt_enable": [ - "-DMQTT_ENABLE", - ], + name = "build_option", + defines = select({ + ":mqtt_enable": ["MQTT_ENABLE"], "//conditions:default": [], }) + select({ - "//storage:db_enable": ["-DDB_ENABLE"], + ":db_enable": ["DB_ENABLE"], "//conditions:default": [], + }) + select({ + ":debug_mode": [], + "//conditions:default": ["NDEBUG"], }), visibility = ["//visibility:public"], - deps = [ - ":message", - ":ta_errors", - "//utils:cache", - "//utils:pow", - "//utils:ta_logger", - "@entangled//cclient/api", - "@yaml", - ] + select({ - "//storage:db_enable": ["//storage"], - "//conditions:default": [], - }), ) config_setting( - name = "DEBUG_MODE", + name = "debug_mode", values = { "define": "build_type=debug", }, ) config_setting( - name = "PROFILING_MODE", + name = "profile_mode", values = { - "define": "build_type=profiling", + "define": "build_type=profile", }, ) -cc_library( - name = "ta_errors", - hdrs = ["errors.h"], - visibility = ["//visibility:public"], +config_setting( + name = "mqtt_enable", + values = {"define": "mqtt=enable"}, ) -cc_library( - name = "message", - srcs = ["message.c"], - hdrs = ["message.h"], +config_setting( + name = "db_enable", + values = {"define": "db=enable"}, ) diff --git a/accelerator/cli_info.h b/accelerator/cli_info.h new file mode 100644 index 00000000..0c60d3fd --- /dev/null +++ b/accelerator/cli_info.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2018-2019 BiiLabs Co., Ltd. and Contributors + * All Rights Reserved. + * This is free software; you can redistribute it and/or modify it under the + * terms of the MIT license. A copy of the license can be found in the file + * "LICENSE" at the root of this distribution. + */ + +#ifndef ACCELERATOR_CLI_INFO_H_ +#define ACCELERATOR_CLI_INFO_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @file accelerator/cli_info.h + * @brief Message and options for tangled-accelerator configures + */ + +typedef enum ta_cli_arg_value_e { + /** TA */ + TA_HOST_CLI = 127, + TA_PORT_CLI, + TA_THREAD_COUNT_CLI, + + /** IRI */ + IRI_HOST_CLI, + IRI_PORT_CLI, + + /** MQTT */ + MQTT_HOST_CLI, + MQTT_ROOT_CLI, + + /** REDIS */ + REDIS_HOST_CLI, + REDIS_PORT_CLI, + + /** DB */ + DB_HOST_CLI, + + /** CONFIG */ + MILESTONE_DEPTH_CLI, + MWM_CLI, + SEED_CLI, + CACHE, + CONF_CLI, + PROXY_API, + + /** LOGGER */ + QUIET, +} ta_cli_arg_value_t; + +static struct ta_cli_argument_s { + char const* name; + int has_arg; /* one of: no_argument, required_argument, optional_argument */ + int* flag; + int val; + const char* desc; +} ta_cli_arguments_g[] = { + {"help", no_argument, NULL, 'h', "Show tangle-accelerator usage"}, + {"version", no_argument, NULL, 'v', "tangle-accelerator version"}, + {"ta_host", required_argument, NULL, TA_HOST_CLI, "TA listening host"}, + {"ta_port", required_argument, NULL, TA_PORT_CLI, "TA listening port"}, + {"ta_thread", optional_argument, NULL, TA_THREAD_COUNT_CLI, "TA executing thread"}, + {"iri_host", required_argument, NULL, IRI_HOST_CLI, "IRI listening host"}, + {"iri_port", required_argument, NULL, IRI_PORT_CLI, "IRI listening port"}, + {"mqtt_host", required_argument, NULL, MQTT_HOST_CLI, "MQTT listening host"}, + {"mqtt_root", required_argument, NULL, MQTT_ROOT_CLI, "MQTT listening topic root"}, + {"redis_host", required_argument, NULL, REDIS_HOST_CLI, "Redis server listening host"}, + {"redis_port", required_argument, NULL, REDIS_PORT_CLI, "Redis server listening port"}, + {"db_host", required_argument, NULL, DB_HOST_CLI, "DB server listening host"}, + {"milestone_depth", optional_argument, NULL, MILESTONE_DEPTH_CLI, "IRI milestone depth"}, + {"mwm", optional_argument, NULL, MWM_CLI, "minimum weight magnitude"}, + {"seed", optional_argument, NULL, SEED_CLI, "IOTA seed"}, + {"cache", required_argument, NULL, CACHE, "Enable cache server with Y"}, + {"config", required_argument, NULL, CONF_CLI, "Read configuration file"}, + {"proxy_passthrough", no_argument, NULL, PROXY_API, "Pass proxy API directly to IRI without processing"}, + {"quiet", no_argument, NULL, QUIET, "Disable logger"}, + {NULL, 0, NULL, 0, NULL}}; + +static const int cli_cmd_num = sizeof(ta_cli_arguments_g) / sizeof(struct ta_cli_argument_s); + +static inline void ta_usage() { + printf("tangle-accelerator usage:\n"); + for (int i = 0; i < cli_cmd_num; i++) { + printf("--%-34s ", ta_cli_arguments_g[i].name); + printf(" "); + if (ta_cli_arguments_g[i].has_arg == required_argument) { + printf(" arg "); + } else if (ta_cli_arguments_g[i].has_arg == optional_argument) { + printf("[arg]"); + } else { + printf(" "); + } + printf(" %s \n", ta_cli_arguments_g[i].desc); + } +} + +#ifdef __cplusplus +} +#endif + +#endif // ACCELERATOR_CLI_INFO_H_ diff --git a/accelerator/config.c b/accelerator/config.c index 2d4be2a7..29b9929f 100644 --- a/accelerator/config.c +++ b/accelerator/config.c @@ -29,7 +29,7 @@ struct option* cli_build_options() { for (int i = 0; i < cli_cmd_num; ++i) { long_options[i].name = ta_cli_arguments_g[i].name; long_options[i].has_arg = ta_cli_arguments_g[i].has_arg; - long_options[i].flag = NULL; + long_options[i].flag = ta_cli_arguments_g[i].flag; long_options[i].val = ta_cli_arguments_g[i].val; } return long_options; @@ -68,6 +68,16 @@ static status_t cli_core_set(ta_core_t* const core, int key, char* const value) iota_service->http.port = atoi(value); break; +#ifdef MQTT_ENABLE + // MQTT configuration + case MQTT_HOST_CLI: + ta_conf->mqtt_host = value; + break; + case MQTT_ROOT_CLI: + ta_conf->mqtt_topic_root = value; + break; +#endif + // Cache configuration case REDIS_HOST_CLI: cache->host = value; @@ -78,6 +88,7 @@ static status_t cli_core_set(ta_core_t* const core, int key, char* const value) #ifdef DB_ENABLE // DB configuration case DB_HOST_CLI: + free(db_service->host); db_service->host = strdup(value); break; #endif @@ -95,9 +106,9 @@ static status_t cli_core_set(ta_core_t* const core, int key, char* const value) cache->cache_state = (toupper(value[0]) == 'T'); break; - // Verbose configuration - case VERBOSE: - verbose_mode = (toupper(value[0]) == 'T'); + // Quiet mode configuration + case QUIET: + quiet_mode = (toupper(value[0]) == 'T'); break; case PROXY_API: @@ -164,8 +175,8 @@ status_t ta_core_default_init(ta_core_t* const core) { ta_log_info("Initializing DB connection\n"); db_service->host = strdup(DB_HOST); #endif - // Turn off verbose mode default - verbose_mode = false; + // Turn off quiet mode default + quiet_mode = false; return ret; } @@ -199,7 +210,7 @@ status_t ta_core_file_init(ta_core_t* const core, int argc, char** argv) { case '?': ret = SC_CONF_UNKNOWN_OPTION; ta_log_error("%s\n", "SC_CONF_UNKNOWN_OPTION"); - break; + continue; case CONF_CLI: ret = cli_core_set(core, key, optarg); break; @@ -285,16 +296,16 @@ status_t ta_core_cli_init(ta_core_t* const core, int argc, char** argv) { case '?': ret = SC_CONF_UNKNOWN_OPTION; ta_log_error("%s\n", "SC_CONF_UNKNOWN_OPTION"); - break; + continue; case 'h': ta_usage(); exit(EXIT_SUCCESS); case 'v': printf("%s\n", TA_VERSION); exit(EXIT_SUCCESS); - case VERBOSE: - // Turn on verbose mode - verbose_mode = true; + case QUIET: + // Turn on quiet mode + quiet_mode = true; // Enable backend_redis logger br_logger_init(); @@ -337,7 +348,7 @@ status_t ta_core_set(ta_core_t* core) { cache_init(cache->cache_state, cache->host, cache->port); #ifdef DB_ENABLE ta_log_info("Initializing db client service\n"); - if ((ret = db_client_service_init(db_service)) != SC_OK) { + if ((ret = db_client_service_init(db_service, DB_USAGE_REATTACH)) != SC_OK) { ta_log_error("Initializing DB connection failed\n"); } #endif @@ -347,20 +358,14 @@ status_t ta_core_set(ta_core_t* core) { } void ta_core_destroy(ta_core_t* const core) { - iota_client_service_t* const iota_service = &core->iota_service; -#ifdef DB_ENABLE - db_client_service_t* const db_service = &core->db_service; -#endif - ta_log_info("Destroying IRI connection\n"); iota_client_extended_destroy(); - iota_client_core_destroy(iota_service); + iota_client_core_destroy(&core->iota_service); #ifdef DB_ENABLE - if (db_service->enabled) { - ta_log_info("Destroying DB connection\n"); - db_client_service_free(db_service); - } + ta_log_info("Destroying DB connection\n"); + db_client_service_free(&core->db_service); #endif + pow_destroy(); cache_stop(); logger_helper_release(logger_id); diff --git a/accelerator/config.h b/accelerator/config.h index 997cd458..29edbe7e 100644 --- a/accelerator/config.h +++ b/accelerator/config.h @@ -12,15 +12,15 @@ #include #include -#include "accelerator/message.h" +#include "accelerator/cli_info.h" +#include "accelerator/core/pow.h" #include "cclient/api/core/core_api.h" #include "cclient/api/extended/extended_api.h" #ifdef DB_ENABLE #include "storage/ta_storage.h" #endif -#include "utils/cache.h" -#include "utils/logger.h" -#include "utils/pow.h" +#include "common/logger.h" +#include "utils/cache/cache.h" #define FILE_PATH_SIZE 128 @@ -29,11 +29,11 @@ extern "C" { #endif /** - * @file config.h + * @file accelerator/config.h * @brief Configuration of tangle-accelerator */ -#define TA_VERSION "tangle-accelerator/0.8.0" +#define TA_VERSION "tangle-accelerator/0.9.0" #define TA_HOST "localhost" #ifdef MQTT_ENABLE diff --git a/accelerator/core/BUILD b/accelerator/core/BUILD new file mode 100644 index 00000000..59eacf86 --- /dev/null +++ b/accelerator/core/BUILD @@ -0,0 +1,77 @@ +cc_library( + name = "apis", + srcs = ["apis.c"], + hdrs = ["apis.h"], + linkopts = ["-lpthread"], + visibility = ["//visibility:public"], + deps = [ + "//accelerator:build_option", + "//accelerator/core", + "//accelerator/core:mam_core", + "//accelerator/core/serializer", + "//common", + ], +) + +cc_library( + name = "proxy_apis", + srcs = ["proxy_apis.c"], + hdrs = ["proxy_apis.h"], + visibility = ["//visibility:public"], + deps = [ + "//accelerator/core/serializer", + "//utils:hash_algo_djb2", + "@entangled//cclient/api", + "@entangled//cclient/request:requests", + "@entangled//cclient/response:responses", + ], +) + +cc_library( + name = "core", + srcs = ["core.c"], + hdrs = ["core.h"], + visibility = ["//visibility:public"], + deps = [ + "//accelerator:ta_config", + "//accelerator/core/request", + "//accelerator/core/response", + "//utils:bundle_array", + "//utils:timer", + "@com_github_uthash//:uthash", + "@entangled//cclient/api", + "@entangled//utils:time", + ], +) + +cc_library( + name = "mam_core", + srcs = ["mam_core.c"], + hdrs = ["mam_core.h"], + visibility = ["//visibility:public"], + deps = [ + "//accelerator/core", + "//accelerator/core/request", + "//accelerator/core/response", + "@entangled//common/trinary:flex_trit", + "@entangled//mam/api", + "@entangled//utils/containers/hash:hash_array", + ], +) + +cc_library( + name = "pow", + srcs = ["pow.c"], + hdrs = ["pow.h"], + visibility = ["//visibility:public"], + deps = [ + "//common:ta_errors", + "//common:ta_logger", + "//third_party:dcurl", + "@com_github_uthash//:uthash", + "@entangled//common/helpers:digest", + "@entangled//common/model:bundle", + "@entangled//common/trinary:flex_trit", + "@entangled//utils:time", + ], +) diff --git a/accelerator/apis.c b/accelerator/core/apis.c similarity index 79% rename from accelerator/apis.c rename to accelerator/core/apis.c index 20c74c97..0d637eee 100644 --- a/accelerator/apis.c +++ b/accelerator/core/apis.c @@ -8,7 +8,7 @@ #include "apis.h" #include -#include "map/mode.h" +#include "mam_core.h" #include "utils/handles/lock.h" #define APIS_LOGGER "apis" @@ -341,7 +341,7 @@ status_t api_receive_mam_message(const iota_config_t* const iconf, const iota_cl goto done; } - if (map_api_bundle_read(&mam, bundle, &payload) != RC_OK) { + if (ta_mam_api_bundle_read(&mam, bundle, &payload) != RC_OK) { ret = SC_MAM_FAILED_RESPONSE; ta_log_error("%s\n", "SC_MAM_FAILED_RESPONSE"); goto done; @@ -391,8 +391,8 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien lock_handle_unlock(&cjson_lock); // Creating MAM API - ret = map_mam_init(&mam, iconf, req->seed, req->channel_ord, &psks, &ntru_pks, (tryte_t*)req->psk, - (tryte_t*)req->ntru_pk); + ret = ta_mam_init(&mam, iconf, req->seed, req->channel_ord, &psks, &ntru_pks, (tryte_t*)req->psk, + (tryte_t*)req->ntru_pk); if (ret) { ta_log_error("%d\n", ret); goto done; @@ -400,8 +400,8 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien // Create epid merkle tree and find the smallest unused secret key. // Write both Header and Pakcet into one single bundle. - ret = map_written_msg_to_bundle(service, &mam, req->ch_mss_depth, req->ep_mss_depth, chid, epid, psks, ntru_pks, - req->message, &bundle, msg_id, &ch_remain_sk, &ep_remain_sk); + ret = ta_mam_written_msg_to_bundle(service, &mam, req->ch_mss_depth, req->ep_mss_depth, chid, epid, psks, ntru_pks, + req->message, &bundle, msg_id, &ch_remain_sk, &ep_remain_sk); if (ret) { ta_log_error("%d\n", ret); goto done; @@ -444,8 +444,8 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien // Sending bundle if (ch_remain_sk == 1 || ep_remain_sk == 1) { // Send announcement for the next endpoint or channel - ret = map_announce_next_mss_private_key_to_bundle(&mam, req->ch_mss_depth, req->ep_mss_depth, chid, &ch_remain_sk, - &ep_remain_sk, psks, ntru_pks, chid1, &bundle); + ret = ta_mam_announce_next_mss_private_key_to_bundle(&mam, req->ch_mss_depth, req->ep_mss_depth, chid, + &ch_remain_sk, &ep_remain_sk, psks, ntru_pks, chid1, &bundle); if (ret) { ta_log_error("%d\n", ret); goto done; @@ -500,8 +500,7 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien return ret; } -status_t api_send_transfer(const iota_config_t* const iconf, const iota_client_service_t* const service, - const char* const obj, char** json_result) { +status_t api_send_transfer(const ta_core_t* const core, const char* const obj, char** json_result) { status_t ret = SC_OK; ta_send_transfer_req_t* req = ta_send_transfer_req_new(); ta_send_transfer_res_t* res = ta_send_transfer_res_new(); @@ -521,25 +520,31 @@ status_t api_send_transfer(const iota_config_t* const iconf, const iota_client_s goto done; } - ret = ta_send_transfer(iconf, service, req, res); + ret = ta_send_transfer(&core->iota_conf, &core->iota_service, req, res); if (ret) { lock_handle_unlock(&cjson_lock); goto done; } lock_handle_unlock(&cjson_lock); - // return transaction object hash243_queue_push(&txn_obj_req->hashes, hash243_queue_peek(res->hash)); lock_handle_lock(&cjson_lock); - ret = ta_find_transaction_objects(service, txn_obj_req, res_txn_array); + ret = ta_find_transaction_objects(&core->iota_service, txn_obj_req, res_txn_array); if (ret) { lock_handle_unlock(&cjson_lock); goto done; } lock_handle_unlock(&cjson_lock); - - ret = ta_send_transfer_res_serialize(res_txn_array, json_result); + res->txn_array = res_txn_array; +#ifdef DB_ENABLE + ret = db_insert_tx_into_identity(&core->db_service, res->hash, PENDING_TXN, res->uuid_string); + if (ret != SC_OK) { + ta_log_error("fail to insert new pending transaction for reattachement\n"); + goto done; + } +#endif + ret = ta_send_transfer_res_serialize(res, json_result); done: ta_send_transfer_req_free(&req); @@ -580,3 +585,90 @@ status_t api_send_trytes(const iota_config_t* const iconf, const iota_client_ser hash_array_free(trytes); return ret; } + +#ifdef DB_ENABLE +status_t api_find_transactions_by_id(const iota_client_service_t* const iota_service, + const db_client_service_t* const db_service, const char* const obj, + char** json_result) { + if (obj == NULL) { + ta_log_error("Invalid NULL pointer to uuid string\n"); + return SC_TA_NULL; + } + status_t ret = SC_OK; + ta_log_info("find transaction by uuid string: %s\n", obj); + db_identity_array_t* db_identity_array = db_identity_array_new(); + ret = db_get_identity_objs_by_uuid_string(db_service, obj, db_identity_array); + if (ret != SC_OK) { + ta_log_error("fail to find transaction by uuid string\n"); + goto exit; + } + + db_identity_t* itr = (db_identity_t*)utarray_front(db_identity_array); + if (itr != NULL) { + ret = api_find_transaction_object_single(iota_service, (const char* const)db_ret_identity_hash(itr), json_result); + } else { + ta_log_error("No corresponding transaction found by uuid string : %s\n", obj); + ret = SC_TA_WRONG_REQUEST_OBJ; + } + +exit: + db_identity_array_free(&db_identity_array); + return ret; +} + +status_t api_get_identity_info_by_hash(const db_client_service_t* const db_service, const char* const obj, + char** json_result) { + if (obj == NULL) { + ta_log_error("Invalid NULL pointer to uuid string\n"); + return SC_TA_NULL; + } + status_t ret = SC_OK; + ta_log_info("get identity info by hash : %s\n", obj); + db_identity_array_t* db_identity_array = db_identity_array_new(); + ret = db_get_identity_objs_by_hash(db_service, (const cass_byte_t*)obj, db_identity_array); + if (ret != SC_OK) { + ta_log_error("fail to get identity objs by transaction hash\n"); + goto exit; + } + + db_identity_t* itr = (db_identity_t*)utarray_front(db_identity_array); + if (itr != NULL) { + ret = db_identity_serialize(json_result, itr); + } else { + ta_log_error("No corresponding identity info found by hash : %s\n", obj); + ret = SC_TA_WRONG_REQUEST_OBJ; + } +exit: + db_identity_array_free(&db_identity_array); + return ret; +} + +status_t api_get_identity_info_by_id(const db_client_service_t* const db_service, const char* const obj, + char** json_result) { + if (obj == NULL) { + ta_log_error("Invalid NULL pointer to uuid string\n"); + return SC_TA_NULL; + } + + status_t ret = SC_OK; + ta_log_info("get identity info by uuid string : %s\n", obj); + db_identity_array_t* db_identity_array = db_identity_array_new(); + ret = db_get_identity_objs_by_uuid_string(db_service, obj, db_identity_array); + if (ret != SC_OK) { + ta_log_error("fail to get identity objs by uuid string\n"); + goto exit; + } + + db_identity_t* itr = (db_identity_t*)utarray_front(db_identity_array); + if (itr != NULL) { + ret = db_identity_serialize(json_result, itr); + } else { + ta_log_error("No corresponding identity info found by uuid string : %s\n", obj); + ret = SC_TA_WRONG_REQUEST_OBJ; + } + +exit: + db_identity_array_free(&db_identity_array); + return ret; +} +#endif diff --git a/accelerator/apis.h b/accelerator/core/apis.h similarity index 79% rename from accelerator/apis.h rename to accelerator/core/apis.h index 3d4cf5dc..087e666f 100644 --- a/accelerator/apis.h +++ b/accelerator/core/apis.h @@ -6,10 +6,10 @@ * "LICENSE" at the root of this distribution. */ -#ifndef ACCELERATOR_APIS_H_ -#define ACCELERATOR_APIS_H_ +#ifndef CORE_APIS_H_ +#define CORE_APIS_H_ -#include "accelerator/common_core.h" +#include "accelerator/core/core.h" #include "common/trinary/trit_tryte.h" #include "mam/api/api.h" #include "mam/mam/mam_channel_t_set.h" @@ -20,7 +20,7 @@ extern "C" { #endif /** - * @file apis.h + * @file accelerator/core/apis.h * @brief General tangle-accelerator APIs * * tangle-accelerator APIs provide major IOTA APIs wrapper for public usage. @@ -167,8 +167,7 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien * fields include address, value, tag, and message. This API would also try to * find the transactions after bundle sent. * - * @param[in] iconf IOTA API parameter configurations - * @param[in] service IRI node end point service + * @param core[in] Pointer to Tangle-accelerator core configuration structure * @param[in] obj Input data in JSON * @param[out] json_result Result containing transaction objects in json format * @@ -176,8 +175,7 @@ status_t api_mam_send_message(const iota_config_t* const iconf, const iota_clien * - SC_OK on success * - non-zero on error */ -status_t api_send_transfer(const iota_config_t* const iconf, const iota_client_service_t* const service, - const char* const obj, char** json_result); +status_t api_send_transfer(const ta_core_t* const core, const char* const obj, char** json_result); /** * @brief Return transaction object with given single transaction hash. @@ -268,9 +266,57 @@ status_t api_find_transactions_obj_by_tag(const iota_client_service_t* const ser */ status_t api_send_trytes(const iota_config_t* const iconf, const iota_client_service_t* const service, const char* const obj, char** json_result); +#ifdef DB_ENABLE +/** + * @brief Return transaction object with given single identity number. + * + * Explore transaction hash information with given single identity number. This would + * return whole transaction object details in json format instead of raw trytes. + * + * @param[in] iota_service IRI node end point service + * @param[in] db_service db client service + * @param[in] obj identity number + * @param[out] json_result Result containing the only one transaction object in json format + * + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t api_find_transactions_by_id(const iota_client_service_t* const iota_service, + const db_client_service_t* const db_service, const char* const obj, + char** json_result); + +/** + * @brief Return db identity object with given single transaction hash. + * + * @param[in] db_service db client service + * @param[in] obj transaction hash + * @param[out] json_result Result containing the only one db identity object in json format + * + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t api_get_identity_info_by_hash(const db_client_service_t* const db_service, const char* const obj, + char** json_result); + +/** + * @brief Return db identity object with given single transaction id. + * + * @param[in] db_service db client service + * @param[in] obj transaction id + * @param[out] json_result Result containing the only one db identity object in json format + * + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t api_get_identity_info_by_id(const db_client_service_t* const db_service, const char* const obj, + char** json_result); +#endif #ifdef __cplusplus } #endif -#endif // ACCELERATOR_APIS_H_ +#endif // CORE_APIS_H_ diff --git a/accelerator/common_core.c b/accelerator/core/core.c similarity index 99% rename from accelerator/common_core.c rename to accelerator/core/core.c index 08746005..72a0d0ec 100644 --- a/accelerator/common_core.c +++ b/accelerator/core/core.c @@ -6,10 +6,10 @@ * "LICENSE" at the root of this distribution. */ -#include "common_core.h" +#include "core.h" #include -#define CC_LOGGER "common_core" +#define CC_LOGGER "core" static logger_id_t logger_id; @@ -112,7 +112,7 @@ status_t ta_send_trytes(const iota_config_t* const iconf, const iota_client_serv } // set the value of attach_res->trytes as output trytes result - memcpy(trytes, attach_res->trytes, hash_array_len(attach_res->trytes) * sizeof(hash8019_array_p)); + memcpy(trytes, attach_res->trytes, hash_array_len(attach_res->trytes) * sizeof(flex_trit_t)); done: get_transactions_to_approve_req_free(&tx_approve_req); @@ -518,12 +518,12 @@ status_t ta_get_bundles_by_addr(const iota_client_service_t* const service, tryt } iota_transaction_t* curr_tx = NULL; + bundle_transactions_t* bundle = NULL; TX_OBJS_FOREACH(obj_res, curr_tx) { + bundle_transactions_new(&bundle); flex_trits_to_trytes(bundle_hash, NUM_TRYTES_BUNDLE, transaction_bundle(curr_tx), NUM_TRITS_BUNDLE, NUM_TRITS_BUNDLE); - bundle_transactions_t* bundle = NULL; - bundle_transactions_new(&bundle); ret = ta_get_bundle(service, bundle_hash, bundle); if (ret != SC_OK) { ta_log_error("%d\n", ret); @@ -537,6 +537,7 @@ status_t ta_get_bundles_by_addr(const iota_client_service_t* const service, tryt } done: + bundle_transactions_free(&bundle); find_transactions_req_free(&txn_req); find_transactions_res_free(&txn_res); ta_find_transaction_objects_req_free(&obj_req); diff --git a/accelerator/common_core.h b/accelerator/core/core.h similarity index 97% rename from accelerator/common_core.h rename to accelerator/core/core.h index 56216d62..11101243 100644 --- a/accelerator/common_core.h +++ b/accelerator/core/core.h @@ -6,14 +6,13 @@ * "LICENSE" at the root of this distribution. */ -#ifndef ACCELERATOR_COMMON_CORE_H_ -#define ACCELERATOR_COMMON_CORE_H_ +#ifndef CORE_CORE_H_ +#define CORE_CORE_H_ -#include #include "accelerator/config.h" +#include "accelerator/core/request/request.h" +#include "accelerator/core/response/response.h" #include "common/model/transfer.h" -#include "request/request.h" -#include "response/response.h" #include "utils/bundle_array.h" #include "utils/time.h" #include "utils/timer.h" @@ -23,7 +22,7 @@ extern "C" { #endif /** - * @file common_core.h + * @file accelerator/core/core.h * @brief General tangle-accelerator core functions * * tangle-accelerator core functions provide major IOTA usage with @@ -220,4 +219,4 @@ status_t ta_get_bundles_by_addr(const iota_client_service_t* const service, tryt } #endif -#endif // ACCELERATOR_COMMON_CORE_H_ +#endif // CORE_CORE_H diff --git a/map/mode.c b/accelerator/core/mam_core.c similarity index 84% rename from map/mode.c rename to accelerator/core/mam_core.c index 118ac85d..5bfb91f0 100644 --- a/map/mode.c +++ b/accelerator/core/mam_core.c @@ -6,7 +6,7 @@ * "LICENSE" at the root of this distribution. */ -#include "map/mode.h" +#include "accelerator/core/mam_core.h" #include "common/model/transfer.h" #include "utils/containers/hash/hash_array.h" @@ -14,9 +14,9 @@ static logger_id_t logger_id; -void map_logger_init() { logger_id = logger_helper_enable(MODE_LOGGER, LOGGER_DEBUG, true); } +void ta_mam_logger_init() { logger_id = logger_helper_enable(MODE_LOGGER, LOGGER_DEBUG, true); } -int map_logger_release() { +int ta_mam_logger_release() { logger_helper_release(logger_id); if (logger_helper_destroy() != RC_OK) { ta_log_error("Destroying logger failed %s.\n", MODE_LOGGER); @@ -35,9 +35,10 @@ static void bundle_transactions_renew(bundle_transactions_t **bundle) { bundle_transactions_new(bundle); } -static retcode_t map_write_header(mam_api_t *const api, tryte_t const *const channel_id, - tryte_t const *const endpoint_id, mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, - bundle_transactions_t *const bundle, trit_t *const msg_id) { +static retcode_t ta_mam_write_header(mam_api_t *const api, tryte_t const *const channel_id, + tryte_t const *const endpoint_id, mam_psk_t_set_t psks, + mam_ntru_pk_t_set_t ntru_pks, bundle_transactions_t *const bundle, + trit_t *const msg_id) { retcode_t ret = RC_OK; // In TA we only write header on endpoint, since in our architecture we only post Message on signatures owned by @@ -59,8 +60,8 @@ static retcode_t map_write_header(mam_api_t *const api, tryte_t const *const cha * * @return return code */ -static retcode_t map_write_packet(mam_api_t *const api, bundle_transactions_t *const bundle, char const *const payload, - trit_t const *const msg_id) { +static retcode_t ta_mam_write_packet(mam_api_t *const api, bundle_transactions_t *const bundle, + char const *const payload, trit_t const *const msg_id) { retcode_t ret = RC_OK; const bool last_packet = true; const mam_msg_checksum_t checksum = MAM_MSG_CHECKSUM_SIG; @@ -197,9 +198,9 @@ static mam_endpoint_t *mam_api_endpoint_get(mam_api_t const *const api, tryte_t * External functions ***********************************************************************************************************/ -status_t map_mam_init(mam_api_t *const api, const iota_config_t *const iconf, tryte_t const *const seed, - int32_t channel_ord, mam_psk_t_set_t *const psks, mam_ntru_pk_t_set_t *const ntru_pks, - tryte_t const *const psk, tryte_t const *const ntru_pk) { +status_t ta_mam_init(mam_api_t *const api, const iota_config_t *const iconf, tryte_t const *const seed, + int32_t channel_ord, mam_psk_t_set_t *const psks, mam_ntru_pk_t_set_t *const ntru_pks, + tryte_t const *const psk, tryte_t const *const ntru_pk) { status_t ret = SC_OK; if (!api || (!iconf && !seed)) { return SC_MAM_NULL; @@ -234,11 +235,11 @@ status_t map_mam_init(mam_api_t *const api, const iota_config_t *const iconf, tr return SC_OK; } -status_t map_written_msg_to_bundle(const iota_client_service_t *const service, mam_api_t *const api, - const size_t channel_depth, const size_t endpoint_depth, tryte_t *const chid, - tryte_t *const epid, mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, - char const *const payload, bundle_transactions_t **bundle, tryte_t *msg_id, - uint32_t *ch_remain_sk, uint32_t *ep_remain_sk) { +status_t ta_mam_written_msg_to_bundle(const iota_client_service_t *const service, mam_api_t *const api, + const size_t channel_depth, const size_t endpoint_depth, tryte_t *const chid, + tryte_t *const epid, mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, + char const *const payload, bundle_transactions_t **bundle, tryte_t *msg_id, + uint32_t *ch_remain_sk, uint32_t *ep_remain_sk) { status_t ret = SC_OK; if (!service || !api || !chid || !epid || channel_depth < 1 || endpoint_depth < 1) { ta_log_error("%s\n", "SC_MAM_NULL"); @@ -288,7 +289,7 @@ status_t map_written_msg_to_bundle(const iota_client_service_t *const service, m ep_remain_sk_local = 1 << endpoint_depth; while (ep_remain_sk_local) { - if (map_write_header(api, chid, epid, psks, ntru_pks, *bundle, msg_id_trits)) { + if (ta_mam_write_header(api, chid, epid, psks, ntru_pks, *bundle, msg_id_trits)) { ret = SC_MAM_FAILED_WRITE; ta_log_error("%s\n", "SC_MAM_FAILED_WRITE"); goto done; @@ -311,7 +312,7 @@ status_t map_written_msg_to_bundle(const iota_client_service_t *const service, m end_loop: // Writing packet to bundle - if (map_write_packet(api, *bundle, payload, msg_id_trits)) { + if (ta_mam_write_packet(api, *bundle, payload, msg_id_trits)) { ret = SC_MAM_FAILED_WRITE; ta_log_error("%s\n", "SC_MAM_FAILED_WRITE"); goto done; @@ -328,11 +329,11 @@ status_t map_written_msg_to_bundle(const iota_client_service_t *const service, m return ret; } -status_t map_announce_next_mss_private_key_to_bundle(mam_api_t *const api, const size_t channel_depth, - const size_t endpoint_depth, tryte_t *const chid, - uint32_t *ch_remain_sk, uint32_t *ep_remain_sk, - mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, - tryte_t *const chid1, bundle_transactions_t **bundle) { +status_t ta_mam_announce_next_mss_private_key_to_bundle(mam_api_t *const api, const size_t channel_depth, + const size_t endpoint_depth, tryte_t *const chid, + uint32_t *ch_remain_sk, uint32_t *ep_remain_sk, + mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, + tryte_t *const chid1, bundle_transactions_t **bundle) { status_t ret = SC_OK; tryte_t epid1[NUM_TRYTES_ADDRESS]; trit_t msg_id[MAM_MSG_ID_SIZE]; @@ -374,7 +375,7 @@ status_t map_announce_next_mss_private_key_to_bundle(mam_api_t *const api, const return ret; } -retcode_t map_api_bundle_read(mam_api_t *const api, bundle_transactions_t *bundle, char **payload_out) { +retcode_t ta_mam_api_bundle_read(mam_api_t *const api, bundle_transactions_t *bundle, char **payload_out) { retcode_t ret = RC_OK; tryte_t *payload_trytes = NULL; size_t payload_size = 0; diff --git a/map/mode.h b/accelerator/core/mam_core.h similarity index 64% rename from map/mode.h rename to accelerator/core/mam_core.h index c6129932..a546440d 100644 --- a/map/mode.h +++ b/accelerator/core/mam_core.h @@ -6,23 +6,24 @@ * "LICENSE" at the root of this distribution. */ -#ifndef __MAP_MODE_H__ -#define __MAP_MODE_H__ +#ifndef CORE_MAM_CORE_H_ +#define CORE_MAM_CORE_H_ -#include "accelerator/common_core.h" -#include "accelerator/errors.h" +#include "accelerator/core/core.h" #include "common/trinary/flex_trit.h" #include "common/trinary/tryte_ascii.h" #include "mam/api/api.h" #include "mam/mam/mam_channel_t_set.h" -#include "request/request.h" -#include "response/response.h" #include "utarray.h" #ifdef __cplusplus extern "C" { #endif +/** + * @file accelerator/core/mam_core.h + */ + /** * Initialize a mam_api_t object * @@ -32,9 +33,9 @@ extern "C" { * * @return return code */ -status_t map_mam_init(mam_api_t* const api, const iota_config_t* const iconf, tryte_t const* const seed, - int32_t channel_ord, mam_psk_t_set_t* const psks, mam_ntru_pk_t_set_t* const ntru_pks, - tryte_t const* const psk, tryte_t const* const ntru_pk); +status_t ta_mam_init(mam_api_t* const api, const iota_config_t* const iconf, tryte_t const* const seed, + int32_t channel_ord, mam_psk_t_set_t* const psks, mam_ntru_pk_t_set_t* const ntru_pks, + tryte_t const* const psk, tryte_t const* const ntru_pk); /** * @brief Write payload to bundle on the smallest secret key. @@ -58,11 +59,11 @@ status_t map_mam_init(mam_api_t* const api, const iota_config_t* const iconf, tr * * @return return code */ -status_t map_written_msg_to_bundle(const iota_client_service_t* const service, mam_api_t* const api, - const size_t channel_depth, const size_t endpoint_depth, tryte_t* const chid, - tryte_t* const epid, mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, - char const* const payload, bundle_transactions_t** bundle, tryte_t* msg_id, - uint32_t* ch_remain_sk, uint32_t* ep_remain_sk); +status_t ta_mam_written_msg_to_bundle(const iota_client_service_t* const service, mam_api_t* const api, + const size_t channel_depth, const size_t endpoint_depth, tryte_t* const chid, + tryte_t* const epid, mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, + char const* const payload, bundle_transactions_t** bundle, tryte_t* msg_id, + uint32_t* ch_remain_sk, uint32_t* ep_remain_sk); /** * @brief Write an announcement to bundle. @@ -83,11 +84,11 @@ status_t map_written_msg_to_bundle(const iota_client_service_t* const service, m * * @return return code */ -status_t map_announce_next_mss_private_key_to_bundle(mam_api_t* const api, const size_t channel_depth, - const size_t endpoint_depth, tryte_t* const chid, - uint32_t* ch_remain_sk, uint32_t* ep_remain_sk, - mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, - tryte_t* const chid1, bundle_transactions_t** bundle); +status_t ta_mam_announce_next_mss_private_key_to_bundle(mam_api_t* const api, const size_t channel_depth, + const size_t endpoint_depth, tryte_t* const chid, + uint32_t* ch_remain_sk, uint32_t* ep_remain_sk, + mam_psk_t_set_t psks, mam_ntru_pk_t_set_t ntru_pks, + tryte_t* const chid1, bundle_transactions_t** bundle); /** * Read the MAM message from a bundle @@ -98,10 +99,10 @@ status_t map_announce_next_mss_private_key_to_bundle(mam_api_t* const api, const * * @return return code */ -retcode_t map_api_bundle_read(mam_api_t* const api, bundle_transactions_t* bundle, char** payload_out); +retcode_t ta_mam_api_bundle_read(mam_api_t* const api, bundle_transactions_t* bundle, char** payload_out); #ifdef __cplusplus } #endif -#endif // __MAP_MODE_H__ +#endif // CORE_MAM_CORE_H_ diff --git a/utils/pow.c b/accelerator/core/pow.c similarity index 95% rename from utils/pow.c rename to accelerator/core/pow.c index b4198c0f..531dccff 100644 --- a/utils/pow.c +++ b/accelerator/core/pow.c @@ -8,8 +8,8 @@ #include "pow.h" #include "common/helpers/digest.h" +#include "common/logger.h" #include "third_party/dcurl/src/dcurl.h" -#include "utils/logger.h" #include "utils/time.h" #define POW_LOGGER "pow" @@ -80,8 +80,8 @@ status_t ta_pow(const bundle_transactions_t* bundle, const flex_trit_t* const tr transaction_set_attachment_timestamp_upper(tx, 3812798742493LL); transaction_set_attachment_timestamp_lower(tx, 0); - transaction_serialize_on_flex_trits(tx, tx_trits); - if (tx_trits == NULL) { + size_t offset = transaction_serialize_on_flex_trits(tx, tx_trits); + if (offset != NUM_TRITS_SERIALIZED_TRANSACTION) { ret = SC_CCLIENT_INVALID_FLEX_TRITS; ta_log_error("%s\n", "SC_CCLIENT_INVALID_FLEX_TRITS"); goto done; diff --git a/utils/pow.h b/accelerator/core/pow.h similarity index 90% rename from utils/pow.h rename to accelerator/core/pow.h index 555f08a2..8118d581 100644 --- a/utils/pow.h +++ b/accelerator/core/pow.h @@ -6,12 +6,12 @@ * "LICENSE" at the root of this distribution. */ -#ifndef UTILS_POW_H_ -#define UTILS_POW_H_ +#ifndef CORE_POW_H_ +#define CORE_POW_H_ #include -#include "accelerator/errors.h" #include "common/model/bundle.h" +#include "common/ta_errors.h" #include "common/trinary/flex_trit.h" #include "utarray.h" @@ -20,8 +20,8 @@ extern "C" { #endif /** - * @file pow.h - * @brief Implementation of pow interface + * @file accelerator/core/pow.h + * @brief PoW interface * @example test_pow.c */ @@ -78,4 +78,4 @@ status_t ta_pow(const bundle_transactions_t* bundle, const flex_trit_t* const tr } #endif -#endif // UTILS_POW_H_ +#endif // CORE_POW_H_ diff --git a/accelerator/proxy_apis.c b/accelerator/core/proxy_apis.c similarity index 100% rename from accelerator/proxy_apis.c rename to accelerator/core/proxy_apis.c diff --git a/accelerator/proxy_apis.h b/accelerator/core/proxy_apis.h similarity index 87% rename from accelerator/proxy_apis.h rename to accelerator/core/proxy_apis.h index b299cb6d..b90bf534 100644 --- a/accelerator/proxy_apis.h +++ b/accelerator/core/proxy_apis.h @@ -6,22 +6,20 @@ * "LICENSE" at the root of this distribution. */ -#ifndef ACCELERATOR_PROXY_APIS_H_ -#define ACCELERATOR_PROXY_APIS_H_ +#ifndef CORE_PROXY_APIS_H_ +#define CORE_PROXY_APIS_H_ -#include "accelerator/errors.h" #include "cclient/api/core/core_api.h" #include "cclient/request/requests.h" #include "cclient/response/responses.h" #include "serializer/serializer.h" -#include "utils/logger.h" #ifdef __cplusplus extern "C" { #endif /** - * @file proxy_apis.h + * @file accelerator/core/proxy_apis.h * @brief Implement Proxy APIs * * tangle-accelerator provides major IRI Proxy APIs wrapper. @@ -64,7 +62,6 @@ status_t proxy_apis_lock_destroy(); /** * @brief Proxy API of IOTA core functionalities * - * @param[in] iconf IOTA API parameter configurations * @param[in] service IRI node end point service * @param[in] obj IOTA core APIs request body * @param[out] json_result Result of IOTA core APIs @@ -80,4 +77,4 @@ status_t proxy_api_wrapper(const ta_config_t* const iconf, const iota_client_ser } #endif -#endif // ACCELERATOR_PROXY_APIS_H_ +#endif // CORE_PROXY_APIS_H_ diff --git a/request/BUILD b/accelerator/core/request/BUILD similarity index 84% rename from request/BUILD rename to accelerator/core/request/BUILD index 917a1ca9..7e0b2f16 100644 --- a/request/BUILD +++ b/accelerator/core/request/BUILD @@ -6,10 +6,10 @@ cc_library( hdrs = glob([ "*.h", ]), - include_prefix = "request", + include_prefix = "accelerator/core/request", visibility = ["//visibility:public"], deps = [ - "//accelerator:ta_errors", + "//common:ta_errors", "@entangled//common:errors", "@entangled//common/model:transaction", "@entangled//common/trinary:tryte", diff --git a/request/request.h b/accelerator/core/request/request.h similarity index 75% rename from request/request.h rename to accelerator/core/request/request.h index bf0cad91..00a283a7 100644 --- a/request/request.h +++ b/accelerator/core/request/request.h @@ -9,12 +9,12 @@ #ifndef REQUEST_REQUEST_H_ #define REQUEST_REQUEST_H_ -#include "request/ta_find_transaction_objects.h" -#include "request/ta_send_mam.h" -#include "request/ta_send_transfer.h" +#include "ta_find_transaction_objects.h" +#include "ta_send_mam.h" +#include "ta_send_transfer.h" /** - * @file request.h + * @file accelerator/core/request/request.h * @brief Data structure of request type */ diff --git a/request/ta_find_transaction_objects.c b/accelerator/core/request/ta_find_transaction_objects.c similarity index 88% rename from request/ta_find_transaction_objects.c rename to accelerator/core/request/ta_find_transaction_objects.c index b1b04004..76850a9c 100644 --- a/request/ta_find_transaction_objects.c +++ b/accelerator/core/request/ta_find_transaction_objects.c @@ -19,7 +19,9 @@ ta_find_transaction_objects_req_t* ta_find_transaction_objects_req_new() { } void ta_find_transaction_objects_req_free(ta_find_transaction_objects_req_t** req) { - hash243_queue_free(&(*req)->hashes); - free((*req)); - *req = NULL; + if (*req) { + hash243_queue_free(&(*req)->hashes); + free((*req)); + *req = NULL; + } } diff --git a/request/ta_find_transaction_objects.h b/accelerator/core/request/ta_find_transaction_objects.h similarity index 93% rename from request/ta_find_transaction_objects.h rename to accelerator/core/request/ta_find_transaction_objects.h index cd087169..6392df5b 100644 --- a/request/ta_find_transaction_objects.h +++ b/accelerator/core/request/ta_find_transaction_objects.h @@ -9,8 +9,8 @@ #ifndef REQUEST_TA_GET_TRANSACTION_OBJECT_H_ #define REQUEST_TA_GET_TRANSACTION_OBJECT_H_ -#include "accelerator/errors.h" #include "common/model/transaction.h" +#include "common/ta_errors.h" #include "utils/containers/hash/hash243_queue.h" #ifdef __cplusplus @@ -18,7 +18,7 @@ extern "C" { #endif /** - * @file request/ta_find_transaction_objects.h + * @file accelerator/core/request/ta_find_transaction_objects.h */ /** struct of ta_find_transaction_objects_req_t */ diff --git a/request/ta_send_mam.c b/accelerator/core/request/ta_send_mam.c similarity index 100% rename from request/ta_send_mam.c rename to accelerator/core/request/ta_send_mam.c diff --git a/request/ta_send_mam.h b/accelerator/core/request/ta_send_mam.h similarity index 95% rename from request/ta_send_mam.h rename to accelerator/core/request/ta_send_mam.h index cc8c04ff..bc4b0b8f 100644 --- a/request/ta_send_mam.h +++ b/accelerator/core/request/ta_send_mam.h @@ -11,8 +11,8 @@ #include #include -#include "accelerator/errors.h" #include "common/model/transaction.h" +#include "common/ta_errors.h" #include "common/trinary/tryte.h" #ifdef __cplusplus @@ -20,7 +20,7 @@ extern "C" { #endif /** - * @file request/ta_send_mam.h + * @file accelerator/core/request/ta_send_mam.h */ /** struct of ta_send_mam_req_t */ diff --git a/request/ta_send_transfer.c b/accelerator/core/request/ta_send_transfer.c similarity index 100% rename from request/ta_send_transfer.c rename to accelerator/core/request/ta_send_transfer.c diff --git a/request/ta_send_transfer.h b/accelerator/core/request/ta_send_transfer.h similarity index 96% rename from request/ta_send_transfer.h rename to accelerator/core/request/ta_send_transfer.h index a7882650..40440c23 100644 --- a/request/ta_send_transfer.h +++ b/accelerator/core/request/ta_send_transfer.h @@ -19,7 +19,7 @@ extern "C" { #endif /** - * @file request/ta_send_transfer.h + * @file accelerator/core/request/ta_send_transfer.h */ /** struct of ta_send_transfer_req_t */ diff --git a/response/BUILD b/accelerator/core/response/BUILD similarity index 65% rename from response/BUILD rename to accelerator/core/response/BUILD index c2afddfc..ffabce5d 100644 --- a/response/BUILD +++ b/accelerator/core/response/BUILD @@ -6,14 +6,18 @@ cc_library( hdrs = glob([ "*.h", ]), - include_prefix = "response", + include_prefix = "accelerator/core/response", visibility = ["//visibility:public"], deps = [ - "//accelerator:ta_errors", + "//common", + "//accelerator:build_option", "@entangled//common:errors", "@entangled//common/model:transaction", "@entangled//mam/mam:message", "@entangled//utils/containers/hash:hash243_queue", "@entangled//utils/containers/hash:hash243_stack", - ], + ] + select({ + "//accelerator:db_enable": ["//storage"], + "//conditions:default": [], + }), ) diff --git a/response/response.h b/accelerator/core/response/response.h similarity index 62% rename from response/response.h rename to accelerator/core/response/response.h index 0eb7c606..218c5d5d 100644 --- a/response/response.h +++ b/accelerator/core/response/response.h @@ -9,15 +9,15 @@ #ifndef RESPONSE_RESPONSE_H_ #define RESPONSE_RESPONSE_H_ -#include "response/ta_find_transactions.h" -#include "response/ta_find_transactions_obj.h" -#include "response/ta_generate_address.h" -#include "response/ta_get_tips.h" -#include "response/ta_send_mam.h" -#include "response/ta_send_transfer.h" +#include "ta_find_transactions.h" +#include "ta_find_transactions_obj.h" +#include "ta_generate_address.h" +#include "ta_get_tips.h" +#include "ta_send_mam.h" +#include "ta_send_transfer.h" /** - * @file response.h + * @file accelerator/core/response/response.h * @brief Data structure of response type */ diff --git a/response/ta_find_transactions.c b/accelerator/core/response/ta_find_transactions.c similarity index 100% rename from response/ta_find_transactions.c rename to accelerator/core/response/ta_find_transactions.c diff --git a/response/ta_find_transactions.h b/accelerator/core/response/ta_find_transactions.h similarity index 95% rename from response/ta_find_transactions.h rename to accelerator/core/response/ta_find_transactions.h index f37acf0b..4f6296a1 100644 --- a/response/ta_find_transactions.h +++ b/accelerator/core/response/ta_find_transactions.h @@ -17,7 +17,7 @@ extern "C" { #endif /** - * @file response/ta_find_transactions.h + * @file accelerator/core/response/ta_find_transactions.h */ /** struct of ta_find_transactions_by_tag_res_t */ diff --git a/response/ta_find_transactions_obj.c b/accelerator/core/response/ta_find_transactions_obj.c similarity index 100% rename from response/ta_find_transactions_obj.c rename to accelerator/core/response/ta_find_transactions_obj.c diff --git a/response/ta_find_transactions_obj.h b/accelerator/core/response/ta_find_transactions_obj.h similarity index 95% rename from response/ta_find_transactions_obj.h rename to accelerator/core/response/ta_find_transactions_obj.h index 50639f19..9c7c4369 100644 --- a/response/ta_find_transactions_obj.h +++ b/accelerator/core/response/ta_find_transactions_obj.h @@ -17,7 +17,7 @@ extern "C" { #endif /** - * @file response/ta_find_transactions_obj.h + * @file accelerator/core/response/ta_find_transactions_obj.h */ /** struct of ta_find_transactions_obj_res_t */ diff --git a/response/ta_generate_address.c b/accelerator/core/response/ta_generate_address.c similarity index 100% rename from response/ta_generate_address.c rename to accelerator/core/response/ta_generate_address.c diff --git a/response/ta_generate_address.h b/accelerator/core/response/ta_generate_address.h similarity index 95% rename from response/ta_generate_address.h rename to accelerator/core/response/ta_generate_address.h index 38fa55d5..b647a5a9 100644 --- a/response/ta_generate_address.h +++ b/accelerator/core/response/ta_generate_address.h @@ -17,7 +17,7 @@ extern "C" { #endif /** - * @file response/ta_generate_address.h + * @file accelerator/core/response/ta_generate_address.h */ /** struct of ta_generate_address_res_t */ diff --git a/response/ta_get_tips.c b/accelerator/core/response/ta_get_tips.c similarity index 100% rename from response/ta_get_tips.c rename to accelerator/core/response/ta_get_tips.c diff --git a/response/ta_get_tips.h b/accelerator/core/response/ta_get_tips.h similarity index 95% rename from response/ta_get_tips.h rename to accelerator/core/response/ta_get_tips.h index 415a0d7a..a588584a 100644 --- a/response/ta_get_tips.h +++ b/accelerator/core/response/ta_get_tips.h @@ -17,7 +17,7 @@ extern "C" { #endif /** - * @file response/ta_get_tips.h + * @file accelerator/core/response/ta_get_tips.h */ /** struct of ta_get_tips_res_t */ diff --git a/response/ta_send_mam.c b/accelerator/core/response/ta_send_mam.c similarity index 100% rename from response/ta_send_mam.c rename to accelerator/core/response/ta_send_mam.c diff --git a/response/ta_send_mam.h b/accelerator/core/response/ta_send_mam.h similarity index 98% rename from response/ta_send_mam.h rename to accelerator/core/response/ta_send_mam.h index f20dc3bd..fd0fe6e7 100644 --- a/response/ta_send_mam.h +++ b/accelerator/core/response/ta_send_mam.h @@ -9,8 +9,8 @@ #ifndef RESPONSE_TA_SEND_MAM_H_ #define RESPONSE_TA_SEND_MAM_H_ -#include "accelerator/errors.h" #include "common/model/transaction.h" +#include "common/ta_errors.h" #include "common/trinary/tryte.h" #include "mam/mam/message.h" @@ -19,7 +19,7 @@ extern "C" { #endif /** - * @file response/ta_send_mam.h + * @file accelerator/core/response/ta_send_mam.h */ #define NUM_TRYTES_MAM_MSG_ID MAM_MSG_ID_SIZE / 3 diff --git a/response/ta_send_transfer.c b/accelerator/core/response/ta_send_transfer.c similarity index 100% rename from response/ta_send_transfer.c rename to accelerator/core/response/ta_send_transfer.c diff --git a/response/ta_send_transfer.h b/accelerator/core/response/ta_send_transfer.h similarity index 81% rename from response/ta_send_transfer.h rename to accelerator/core/response/ta_send_transfer.h index e93fad68..1870bad8 100644 --- a/response/ta_send_transfer.h +++ b/accelerator/core/response/ta_send_transfer.h @@ -10,6 +10,10 @@ #define RESPONSE_TA_SEND_TRANSFER_H_ #include +#ifdef DB_ENABLE +#include "storage/ta_storage.h" +#endif +#include "common/model/transaction.h" #include "utils/containers/hash/hash243_queue.h" #ifdef __cplusplus @@ -17,13 +21,17 @@ extern "C" { #endif /** - * @file response/ta_send_transfer.h + * @file accelerator/core/response/ta_send_transfer.h */ /** struct of ta_send_transfer_res_t */ typedef struct { /** Transaction address is a 243 long flex trits hash queue. */ hash243_queue_t hash; + transaction_array_t* txn_array; +#ifdef DB_ENABLE + char uuid_string[DB_UUID_STRING_LENGTH]; +#endif } ta_send_transfer_res_t; /** diff --git a/serializer/BUILD b/accelerator/core/serializer/BUILD similarity index 52% rename from serializer/BUILD rename to accelerator/core/serializer/BUILD index 6795ec0a..ce60923b 100644 --- a/serializer/BUILD +++ b/accelerator/core/serializer/BUILD @@ -1,29 +1,22 @@ -package(default_visibility = ["//visibility:public"]) - cc_library( name = "serializer", srcs = ["serializer.c"], hdrs = ["serializer.h"], - copts = ["-DLOGGER_ENABLE"] + select({ - "//connectivity/mqtt:mqtt_enable": ["-DMQTT_ENABLE"], - "//conditions:default": [], - }), visibility = ["//visibility:public"], deps = [ "//accelerator:ta_config", - "//accelerator:ta_errors", - "//request", - "//response", + "//common", + "//accelerator:build_option", + "//accelerator/core/request", + "//accelerator/core/response", "//utils:fill_nines", - "//utils:ta_logger", "@cJSON", "@entangled//cclient/response:responses", "@entangled//common/trinary:flex_trit", - "@entangled//common/trinary:tryte_ascii", "@entangled//utils:char_buffer", "@entangled//utils/containers/hash:hash_array", ] + select({ - "//connectivity/mqtt:mqtt_enable": ["//connectivity/mqtt:mqtt_common"], + "//accelerator:mqtt_enable": ["//connectivity/mqtt:mqtt_common"], "//conditions:default": [], }), ) diff --git a/serializer/serializer.c b/accelerator/core/serializer/serializer.c similarity index 94% rename from serializer/serializer.c rename to accelerator/core/serializer/serializer.c index b8d85e69..8ebef0cb 100644 --- a/serializer/serializer.c +++ b/accelerator/core/serializer/serializer.c @@ -10,7 +10,8 @@ #ifdef MQTT_ENABLE #include "connectivity/mqtt/mqtt_common.h" #endif -#include "utils/logger.h" +#include "common/logger.h" +#include "time.h" #define SERI_LOGGER "serializer" static logger_id_t logger_id; @@ -45,7 +46,7 @@ status_t ta_get_info_serialize(char** obj, ta_config_t* const ta_config, iota_co cJSON_AddNumberToObject(json_root, "redis_port", cache->port); cJSON_AddNumberToObject(json_root, "milestone_depth", tangle->milestone_depth); cJSON_AddNumberToObject(json_root, "mwm", tangle->mwm); - cJSON_AddBoolToObject(json_root, "verbose", verbose_mode); + cJSON_AddBoolToObject(json_root, "quiet", quiet_mode); *obj = cJSON_PrintUnformatted(json_root); if (*obj == NULL) { @@ -56,7 +57,47 @@ status_t ta_get_info_serialize(char** obj, ta_config_t* const ta_config, iota_co cJSON_Delete(json_root); return ret; } +#ifdef DB_ENABLE +status_t db_identity_serialize(char** obj, db_identity_t* id_obj) { + status_t ret = SC_OK; + cJSON* json_root = cJSON_CreateObject(); + if (json_root == NULL) { + ta_log_error("%s\n", "SC_SERIALIZER_JSON_CREATE"); + return SC_SERIALIZER_JSON_CREATE; + } + // uuid + char uuid_str[DB_UUID_STRING_LENGTH]; + db_get_identity_uuid_string(id_obj, uuid_str); + cJSON_AddStringToObject(json_root, "id", uuid_str); + // transaction hash + char hash_trytes[NUM_TRYTES_HASH + 1]; + memcpy(hash_trytes, db_ret_identity_hash(id_obj), NUM_TRYTES_HASH); + hash_trytes[NUM_TRYTES_HASH] = '\0'; + cJSON_AddStringToObject(json_root, "hash", hash_trytes); + + // status + if (db_ret_identity_status(id_obj) == CONFIRMED_TXN) { + cJSON_AddStringToObject(json_root, "status", "CONFIRMED"); + } else { + cJSON_AddStringToObject(json_root, "status", "PENDING"); + } + + // timestamp + time_t raw_time = db_ret_identity_timestamp(id_obj); + struct tm* ts = localtime(&raw_time); + char buf[40]; + strftime(buf, sizeof(buf), "%a %Y-%m-%d %H:%M:%S %Z", ts); + cJSON_AddStringToObject(json_root, "timestamp", buf); + *obj = cJSON_PrintUnformatted(json_root); + if (*obj == NULL) { + ta_log_error("%s\n", "SC_SERIALIZER_JSON_PARSE"); + ret = SC_SERIALIZER_JSON_PARSE; + } + cJSON_Delete(json_root); + return ret; +} +#endif static status_t ta_hash243_stack_to_json_array(hash243_stack_t stack, cJSON* json_root) { size_t array_count = 0; hash243_stack_entry_t* s_iter = NULL; @@ -625,7 +666,7 @@ status_t ta_find_transactions_by_tag_res_serialize(const ta_find_transactions_by return ret; } -status_t ta_send_transfer_res_serialize(transaction_array_t* res, char** obj) { +status_t ta_send_transfer_res_serialize(ta_send_transfer_res_t* res, char** obj) { status_t ret = SC_OK; cJSON* json_root = cJSON_CreateObject(); if (json_root == NULL) { @@ -634,11 +675,13 @@ status_t ta_send_transfer_res_serialize(transaction_array_t* res, char** obj) { goto done; } - ret = iota_transaction_to_json_object(transaction_array_at(res, 0), &json_root); + ret = iota_transaction_to_json_object(transaction_array_at(res->txn_array, 0), &json_root); if (ret != SC_OK) { goto done; } - +#ifdef DB_ENABLE + cJSON_AddStringToObject(json_root, "id", res->uuid_string); +#endif *obj = cJSON_PrintUnformatted(json_root); if (*obj == NULL) { ret = SC_SERIALIZER_JSON_PARSE; diff --git a/serializer/serializer.h b/accelerator/core/serializer/serializer.h similarity index 89% rename from serializer/serializer.h rename to accelerator/core/serializer/serializer.h index 1d8f0302..587f1dad 100644 --- a/serializer/serializer.h +++ b/accelerator/core/serializer/serializer.h @@ -10,11 +10,11 @@ #define SERIALIZER_SERIALIZER_H_ #include "accelerator/config.h" +#include "accelerator/core/request/request.h" +#include "accelerator/core/response/response.h" #include "cJSON.h" #include "cclient/response/responses.h" #include "common/trinary/tryte_ascii.h" -#include "request/request.h" -#include "response/response.h" #include "utils/char_buffer.h" #include "utils/containers/hash/hash_array.h" #include "utils/fill_nines.h" @@ -23,7 +23,7 @@ extern "C" { #endif /** - * @file serializer.h + * @file accelerator/core/serializer/serializer.h * @brief Serialization of data strings * @example test_serializer.c */ @@ -73,6 +73,19 @@ int serializer_logger_release(); status_t ta_get_info_serialize(char** obj, ta_config_t* const ta_config, iota_config_t* const tangle, ta_cache_t* const cache); +#ifdef DB_ENABLE +/** + * @brief Serialze identity info into JSON + * + * @param[out] obj db identity info in JSON + * @param[in] id_obj pointer to db_identity_t; + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t db_identity_serialize(char** obj, db_identity_t* id_obj); +#endif + /** * @brief Serialze type of ta_generate_address_res_t to JSON string * @@ -97,6 +110,19 @@ status_t ta_generate_address_res_serialize(const ta_generate_address_res_t* cons */ status_t ta_get_tips_res_serialize(const get_tips_res_t* const res, char** obj); +/** + * @brief Serialze the response of api_insert_identity() + * + * @param[in] hash Response transaction hash + * @param[in] uuid_string Response uuid string + * @param[out] obj Input values in JSON + * + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t ta_insert_identity_res_serialize(const char* hash, const char* uuid_string, char** obj); + /** * @brief Deserialze JSON string to type of ta_send_transfer_req_t * @@ -112,14 +138,14 @@ status_t ta_send_transfer_req_deserialize(const char* const obj, ta_send_transfe /** * @brief Serialze the response of api_send_transfer() * - * @param[in] res Response data in type of transaction_array_t + * @param[in] res Response data in type of ta_send_transfer_res_t * @param[out] obj Input values in JSON * * @return * - SC_OK on success * - non-zero on error */ -status_t ta_send_transfer_res_serialize(transaction_array_t* res, char** obj); +status_t ta_send_transfer_res_serialize(ta_send_transfer_res_t* res, char** obj); /** * @brief Deserialze JSON string to hash8019_array_p diff --git a/accelerator/main.c b/accelerator/main.c index b4236a1e..d70a131e 100644 --- a/accelerator/main.c +++ b/accelerator/main.c @@ -1,9 +1,9 @@ #include -#include "accelerator/errors.h" -#include "accelerator/http.h" +#include "common/logger.h" +#include "common/ta_errors.h" +#include "connectivity/http/http.h" #include "utils/handles/signal.h" -#include "utils/logger.h" #define MAIN_LOGGER "main" @@ -55,15 +55,6 @@ int main(int argc, char* argv[]) { return EXIT_FAILURE; } - // Enable other loggers when verbose mode is on - if (verbose_mode) { - http_logger_init(); - } else { - // Destroy logger when verbose mode is off - logger_helper_release(logger_id); - logger_helper_destroy(); - } - if (ta_http_init(&ta_http, &ta_core) != SC_OK) { ta_log_error("HTTP initialization failed %s.\n", MAIN_LOGGER); return EXIT_FAILURE; @@ -74,7 +65,20 @@ int main(int argc, char* argv[]) { goto cleanup; } - log_warning(logger_id, "Tangle-accelerator starts running\n"); + log_info(logger_id, "Tangle-accelerator starts running\n"); + + // Disable loggers when quiet mode is on + if (quiet_mode) { + // Destroy logger when quiet mode is on + logger_helper_release(logger_id); + logger_helper_destroy(); + } else { + http_logger_init(); + apis_logger_init(); + cc_logger_init(); + pow_logger_init(); + timer_logger_init(); + } /* pause() cause TA to sleep until it catch a signal, * also the return value and errno should be -1 and EINTR on success. @@ -86,16 +90,21 @@ int main(int argc, char* argv[]) { } cleanup: - log_warning(logger_id, "Destroying API lock\n"); + log_info(logger_id, "Destroying API lock\n"); if (apis_lock_destroy() != SC_OK) { ta_log_error("Destroying api lock failed %s.\n", MAIN_LOGGER); return EXIT_FAILURE; } - log_warning(logger_id, "Destroying TA configurations\n"); + log_info(logger_id, "Destroying TA configurations\n"); ta_core_destroy(&ta_core); - if (verbose_mode) { + if (quiet_mode == false) { http_logger_release(); + apis_logger_release(); + cc_logger_release(); + serializer_logger_release(); + pow_logger_release(); + timer_logger_release(); logger_helper_release(logger_id); if (logger_helper_destroy() != RC_OK) { ta_log_error("Destroying logger failed %s.\n", MAIN_LOGGER); diff --git a/accelerator/message.c b/accelerator/message.c deleted file mode 100644 index 5043a709..00000000 --- a/accelerator/message.c +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (C) 2018-2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -#include "message.h" -#include "stdio.h" - -void ta_usage() { - printf("tangle-accelerator usage:\n"); - for (int i = 0; i < cli_cmd_num; i++) { - printf("--%-34s ", ta_cli_arguments_g[i].name); - printf(" "); - if (ta_cli_arguments_g[i].has_arg == REQUIRED_ARG) { - printf(" arg "); - } else if (ta_cli_arguments_g[i].has_arg == OPTIONAL_ARG) { - printf("[arg]"); - } else { - printf(" "); - } - printf(" %s \n", ta_cli_arguments_g[i].desc); - } -} diff --git a/accelerator/message.h b/accelerator/message.h deleted file mode 100644 index 0ba73a64..00000000 --- a/accelerator/message.h +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (C) 2018-2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -#ifndef ACCELERATOR_MESSAGE_H_ -#define ACCELERATOR_MESSAGE_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * @file accelerator/message.h - * @brief Message and options for tangled-accelerator configures - */ - -typedef enum ta_cli_arg_value_e { - /** TA */ - TA_HOST_CLI = 127, - TA_PORT_CLI, - TA_THREAD_COUNT_CLI, - - /** IRI */ - IRI_HOST_CLI, - IRI_PORT_CLI, - - /** REDIS */ - REDIS_HOST_CLI, - REDIS_PORT_CLI, - - /** DB */ - DB_HOST_CLI, - - /** CONFIG */ - MILESTONE_DEPTH_CLI, - MWM_CLI, - SEED_CLI, - CACHE, - CONF_CLI, - PROXY_API, - - /** LOGGER */ - VERBOSE, -} ta_cli_arg_value_t; - -typedef enum ta_cli_arg_requirement_e { NO_ARG, REQUIRED_ARG, OPTIONAL_ARG } ta_cli_arg_requirement_t; - -static struct ta_cli_argument_s { - char const* name; - int val; - char const* desc; - ta_cli_arg_requirement_t has_arg; -} ta_cli_arguments_g[] = {{"help", 'h', "Show tangle-accelerator usage", NO_ARG}, - {"version", 'v', "tangle-accelerator version", NO_ARG}, - {"ta_host", TA_HOST_CLI, "TA listening host", REQUIRED_ARG}, - {"ta_port", TA_PORT_CLI, "TA listening port", REQUIRED_ARG}, - {"ta_thread", TA_THREAD_COUNT_CLI, "TA executing thread", OPTIONAL_ARG}, - {"iri_host", IRI_HOST_CLI, "IRI listening host", REQUIRED_ARG}, - {"iri_port", IRI_PORT_CLI, "IRI listening port", REQUIRED_ARG}, - {"redis_host", REDIS_HOST_CLI, "Redis server listening host", REQUIRED_ARG}, - {"redis_port", REDIS_PORT_CLI, "Redis server listening port", REQUIRED_ARG}, - {"db_host", DB_HOST_CLI, "DB server listening host", REQUIRED_ARG}, - {"milestone_depth", MILESTONE_DEPTH_CLI, "IRI milestone depth", OPTIONAL_ARG}, - {"mwm", MWM_CLI, "minimum weight magnitude", OPTIONAL_ARG}, - {"seed", SEED_CLI, "IOTA seed", OPTIONAL_ARG}, - {"cache", CACHE, "Enable cache server with Y", REQUIRED_ARG}, - {"config", CONF_CLI, "Read configuration file", REQUIRED_ARG}, - {"proxy_passthrough", PROXY_API, "Pass proxy API directly to IRI without processing", NO_ARG}, - {"verbose", VERBOSE, "Enable logger", NO_ARG}}; - -static const int cli_cmd_num = sizeof(ta_cli_arguments_g) / sizeof(struct ta_cli_argument_s); -void ta_usage(); -#ifdef __cplusplus -} -#endif - -#endif // ACCELERATOR_MESSAGE_H_ diff --git a/accelerator/conn_mqtt.c b/accelerator/mqtt_main.c similarity index 83% rename from accelerator/conn_mqtt.c rename to accelerator/mqtt_main.c index d17c0552..ea0d81d5 100644 --- a/accelerator/conn_mqtt.c +++ b/accelerator/mqtt_main.c @@ -1,9 +1,9 @@ #include "accelerator/config.h" +#include "common/ta_errors.h" #include "config.h" #include "connectivity/mqtt/duplex_callback.h" #include "connectivity/mqtt/duplex_utils.h" #include "connectivity/mqtt/mqtt_common.h" -#include "errors.h" #define CONN_MQTT_LOGGER "conn-mqtt" @@ -27,6 +27,11 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } + // Initialize configurations with configuration file + if (ta_core_file_init(&ta_core, argc, argv) != SC_OK) { + return EXIT_FAILURE; + } + // Initialize configurations with CLI value if (ta_core_cli_init(&ta_core, argc, argv) != SC_OK) { return EXIT_FAILURE; @@ -37,16 +42,21 @@ int main(int argc, char *argv[]) { return EXIT_FAILURE; } - if (verbose_mode) { + // Disable loggers when quiet mode is on + if (quiet_mode) { + // Destroy logger when quiet mode is on + logger_helper_release(logger_id); + logger_helper_destroy(); + } else { mqtt_utils_logger_init(); mqtt_common_logger_init(); mqtt_callback_logger_init(); mqtt_pub_logger_init(); mqtt_sub_logger_init(); - } else { - // Destroy logger when verbose mode is off - logger_helper_release(logger_id); - logger_helper_destroy(); + apis_logger_init(); + cc_logger_init(); + pow_logger_init(); + timer_logger_init(); } // Initialize `mosq` and `cfg` @@ -89,12 +99,17 @@ int main(int argc, char *argv[]) { mosquitto_lib_cleanup(); mosq_config_free(&cfg); - if (verbose_mode) { + if (quiet_mode == false) { mqtt_utils_logger_release(); mqtt_common_logger_release(); mqtt_callback_logger_release(); mqtt_pub_logger_release(); mqtt_sub_logger_release(); + apis_logger_release(); + cc_logger_release(); + serializer_logger_release(); + pow_logger_release(); + timer_logger_release(); logger_helper_release(logger_id); if (logger_helper_destroy() != RC_OK) { return EXIT_FAILURE; diff --git a/accelerator/server.cc b/accelerator/server.cc deleted file mode 100644 index 4c1d165f..00000000 --- a/accelerator/server.cc +++ /dev/null @@ -1,499 +0,0 @@ -/* - * Copyright (C) 2018-2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -#include -#include -#include -#include "accelerator/apis.h" -#include "accelerator/config.h" -#include "accelerator/errors.h" -#include "accelerator/proxy_apis.h" -#include "cJSON.h" -#include "utils/logger.h" -#include "utils/macros.h" - -#define SERVER_LOGGER "server" - -static ta_core_t ta_core; -static logger_id_t logger_id; - -void set_method_header(served::response& res, http_method_t method) { - res.set_header("Server", ta_core.ta_conf.version); - res.set_header("Access-Control-Allow-Origin", "*"); - - switch (method) { - case HTTP_METHOD_OPTIONS: - res.set_header("Access-Control-Allow-Methods", "GET, POST, OPTIONS"); - res.set_header("Access-Control-Allow-Headers", "Origin, Content-Type, Accept"); - res.set_header("Access-Control-Max-Age", "86400"); - break; - default: - res.set_header("Content-Type", "application/json"); - break; - } -} - -status_t set_response_content(status_t ret, char** json_result) { - status_t http_ret; - if (ret == SC_OK) { - return SC_HTTP_OK; - } - - cJSON* json_obj = cJSON_CreateObject(); - switch (ret) { - case SC_CCLIENT_NOT_FOUND: - case SC_MAM_NOT_FOUND: - http_ret = SC_HTTP_NOT_FOUND; - cJSON_AddStringToObject(json_obj, "message", "Request not found"); - break; - case SC_CCLIENT_JSON_KEY: - case SC_MAM_NO_PAYLOAD: - http_ret = SC_HTTP_BAD_REQUEST; - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - break; - default: - http_ret = SC_HTTP_INTERNAL_SERVICE_ERROR; - cJSON_AddStringToObject(json_obj, "message", "Internal service error"); - break; - } - *json_result = cJSON_PrintUnformatted(json_obj); - return http_ret; -} - -int main(int argc, char* argv[]) { - served::multiplexer mux; - mux.use_after(served::plugin::access_log); - - // Initialize logger - if (ta_logger_init() != SC_OK) { - return EXIT_FAILURE; - } - - logger_id = logger_helper_enable(SERVER_LOGGER, LOGGER_DEBUG, true); - - // Initialize configurations with default value - if (ta_core_default_init(&ta_core) != SC_OK) { - return EXIT_FAILURE; - } - - // Initialize configurations with file value - if (ta_core_file_init(&ta_core, argc, argv) != SC_OK) { - return EXIT_FAILURE; - } - - // Initialize configurations with CLI value - if (ta_core_cli_init(&ta_core, argc, argv) != SC_OK) { - return EXIT_FAILURE; - } - - if (ta_core_set(&ta_core) != SC_OK) { - ta_log_error("Configure failed %s.\n", SERVER_LOGGER); - return EXIT_FAILURE; - } - - // Initialize apis cJSON lock - if (apis_lock_init() != SC_OK) { - ta_log_error("Lock initialization failed %s.\n", SERVER_LOGGER); - return EXIT_FAILURE; - } - - // Enable other loggers when verbose mode is on - if (verbose_mode) { - apis_logger_init(); - cc_logger_init(); - serializer_logger_init(); - pow_logger_init(); - timer_logger_init(); - } else { - // Destroy logger when verbose mode is off - logger_helper_release(logger_id); - logger_helper_destroy(); - } - - mux.handle("/mam/{bundle:[A-Z9]{81}}") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result = NULL; - - ret = api_receive_mam_message(&ta_core.iota_conf, &ta_core.iota_service, req.params["bundle"].c_str(), - &json_result); - ret = set_response_content(ret, &json_result); - - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - mux.handle("/mam") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .post([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - if (req.header("content-type").find("application/json") == std::string::npos) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - json_result = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - cJSON_Delete(json_obj); - } else { - ret = api_mam_send_message(&ta_core.iota_conf, &ta_core.iota_service, req.body().c_str(), &json_result); - ret = set_response_content(ret, &json_result); - res.set_status(ret); - } - - set_method_header(res, HTTP_METHOD_POST); - res << json_result; - }); - - /** - * @method {get} /transaction/ Find transaction object with get request - * - * @return {String[]} hash Transaction object - */ - mux.handle("/transaction/{hash:[A-Z9]{81}}") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result = NULL; - - ret = api_find_transaction_object_single(&ta_core.iota_service, req.params["hash"].c_str(), &json_result); - ret = set_response_content(ret, &json_result); - - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {post} /transaction/object Find transaction object - * - * @return {String[]} object Info of entire transaction object - */ - mux.handle("/transaction/object") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .post([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - if (req.header("content-type").find("application/json") == std::string::npos) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - json_result = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - cJSON_Delete(json_obj); - } else { - ret = api_find_transaction_objects(&ta_core.iota_service, req.body().c_str(), &json_result); - ret = set_response_content(ret, &json_result); - res.set_status(ret); - } - - set_method_header(res, HTTP_METHOD_POST); - res << json_result; - }); - - /** - * @method {get} /tips Fetch pair tips which base on GetTransactionToApprove - * - * @return {String[]} tips Pair of transaction hashes - */ - mux.handle("/tips/pair") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - UNUSED(req); - status_t ret = SC_OK; - char* json_result; - - ret = api_get_tips_pair(&ta_core.iota_conf, &ta_core.iota_service, &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {get} /tips Fetch all tips - * - * @return {String[]} tips List of transaction hashes - */ - mux.handle("/tips") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - UNUSED(req); - status_t ret = SC_OK; - char* json_result; - - ret = api_get_tips(&ta_core.iota_service, &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {get} /address Generate an unused address - * - * @return {String} address hashes - */ - mux.handle("/address") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - UNUSED(req); - status_t ret = SC_OK; - char* json_result; - - ret = api_generate_address(&ta_core.iota_conf, &ta_core.iota_service, &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {get} /tag//hashes Find transaction hash with tag - * - * @return {String} Transaction hashes - */ - mux.handle("/tag/{tag:[A-Z9]{1,27}}/hashes") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - ret = api_find_transactions_by_tag(&ta_core.iota_service, req.params["tag"].c_str(), &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {get} /tag/:tag Find transaction objects by tag - * - * @param {String} tag Must be 27 trytes long - * - * @return {String[]} transactions List of transaction objects - */ - mux.handle("/tag/{tag:[A-Z9]{1,27}}") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - ret = api_find_transactions_obj_by_tag(&ta_core.iota_service, req.params["tag"].c_str(), &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {post} /transaction send transfer - * - * @return {String} transaction object - */ - mux.handle("/transaction") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .post([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - if (req.header("content-type").find("application/json") == std::string::npos) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - json_result = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - cJSON_Delete(json_obj); - } else { - ret = api_send_transfer(&ta_core.iota_conf, &ta_core.iota_service, req.body().c_str(), &json_result); - ret = set_response_content(ret, &json_result); - res.set_status(ret); - } - - set_method_header(res, HTTP_METHOD_POST); - res << json_result; - }); - - /** - * @method {post} /tryte send trytes - * - * @return {String} transaction object - */ - mux.handle("/tryte") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .post([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - if (req.header("content-type").find("application/json") == std::string::npos) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - json_result = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - cJSON_Delete(json_obj); - } else { - ret = api_send_trytes(&ta_core.iota_conf, &ta_core.iota_service, req.body().c_str(), &json_result); - ret = set_response_content(ret, &json_result); - res.set_status(ret); - } - - set_method_header(res, HTTP_METHOD_POST); - res << json_result; - }); - - /** - * @method {get} / Dump information about a running accelerator - * - * @return {String[]} object Info of a running accelerator - */ - mux.handle("/info") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([&](served::response& res, const served::request& req) { - UNUSED(req); - status_t ret = SC_OK; - char* json_result = NULL; - - ret = api_get_ta_info(&ta_core.ta_conf, &ta_core.iota_conf, &ta_core.cache, &json_result); - ret = set_response_content(ret, &json_result); - set_method_header(res, HTTP_METHOD_GET); - res.set_status(ret); - res << json_result; - }); - - /** - * @method {get} {*} Client bad request - * @method {options} {*} Get server information - * - * @return {String} message Error message - */ - mux.handle("{*}") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .get([](served::response& res, const served::request&) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid path"); - const char* json = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - set_method_header(res, HTTP_METHOD_GET); - res << json; - - cJSON_Delete(json_obj); - }); - - /** - * @method {post} / IOTA proxy api - * - * @return {String} IOTA proxy api response - */ - mux.handle("/") - .method(served::method::OPTIONS, - [&](served::response& res, const served::request& req) { - UNUSED(req); - set_method_header(res, HTTP_METHOD_OPTIONS); - }) - .post([&](served::response& res, const served::request& req) { - status_t ret = SC_OK; - char* json_result; - - if (req.header("content-type").find("application/json") == std::string::npos) { - cJSON* json_obj = cJSON_CreateObject(); - cJSON_AddStringToObject(json_obj, "message", "Invalid request header"); - json_result = cJSON_PrintUnformatted(json_obj); - - res.set_status(SC_HTTP_BAD_REQUEST); - cJSON_Delete(json_obj); - } else { - ret = proxy_api_wrapper(&ta_core.ta_conf, &ta_core.iota_service, req.body().c_str(), &json_result); - ret = set_response_content(ret, &json_result); - res.set_status(ret); - } - - set_method_header(res, HTTP_METHOD_POST); - res << json_result; - }); - - std::cout << "Starting..." << std::endl; - served::net::server server(ta_core.ta_conf.host, ta_core.ta_conf.port, mux); - server.run(ta_core.ta_conf.thread_count); - - if (apis_lock_destroy() != SC_OK) { - ta_log_error("Destroying api lock failed %s.\n", SERVER_LOGGER); - return EXIT_FAILURE; - } - ta_core_destroy(&ta_core.iota_service, &ta_core.db_service); - - if (verbose_mode) { - apis_logger_release(); - cc_logger_release(); - serializer_logger_release(); - pow_logger_release(); - timer_logger_release(); - logger_helper_release(logger_id); - if (logger_helper_destroy() != RC_OK) { - return EXIT_FAILURE; - } - } - return 0; -} diff --git a/common/BUILD b/common/BUILD new file mode 100644 index 00000000..90e12109 --- /dev/null +++ b/common/BUILD @@ -0,0 +1,23 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "common", + deps = [ + ":ta_errors", + ":ta_logger", + ], +) + +cc_library( + name = "ta_errors", + hdrs = ["ta_errors.h"], +) + +cc_library( + name = "ta_logger", + srcs = ["logger.h"], + defines = ["LOGGER_ENABLE"], + deps = [ + "@entangled//utils:logger_helper", + ], +) diff --git a/utils/logger.h b/common/logger.h similarity index 92% rename from utils/logger.h rename to common/logger.h index 2b9e000d..8cd1d731 100644 --- a/utils/logger.h +++ b/common/logger.h @@ -6,12 +6,13 @@ * "LICENSE" at the root of this distribution. */ -#ifndef ACCELERATOR_LOGGER_H_ -#define ACCELERATOR_LOGGER_H_ +#ifndef COMMON_LOGGER_H_ +#define COMMON_LOGGER_H_ #ifdef __cplusplus extern "C" { #endif +#include "common/ta_errors.h" #include "utils/logger_helper.h" #ifdef NDEBUG @@ -20,6 +21,10 @@ extern "C" { #define TA_LOGGER_LEVEL LOGGER_DEBUG #endif +/** + * @file common/logger.h + */ + /** * @brief initialize logger level according to build type. * @return @@ -65,10 +70,10 @@ static inline status_t ta_logger_init() { fflush(stdout); \ } while (0) -bool verbose_mode; /**< flag of verbose mode */ +bool quiet_mode; /**< flag of quiet mode */ #ifdef __cplusplus } #endif -#endif // TA_SCYLLA_API_H_ +#endif // COMMON_LOGGER_H_ diff --git a/accelerator/errors.h b/common/ta_errors.h similarity index 98% rename from accelerator/errors.h rename to common/ta_errors.h index 8d6b8208..226972ee 100644 --- a/accelerator/errors.h +++ b/common/ta_errors.h @@ -6,17 +6,16 @@ * "LICENSE" at the root of this distribution. */ -#ifndef ACCELERATOR_ERRORS_H_ -#define ACCELERATOR_ERRORS_H_ +#ifndef COMMON_TA_ERRORS_H_ +#define COMMON_TA_ERRORS_H_ #include - #ifdef __cplusplus extern "C" { #endif /** - * @file errors.h + * @file common/ta_errors.h * @brief Error Code of tangle-acclerator * * bit division: @@ -238,4 +237,4 @@ typedef enum { } #endif -#endif // ACCELERATOR_ERRORS_H_ +#endif // COMMON_TA_ERRORS_H_ diff --git a/connectivity/http/BUILD b/connectivity/http/BUILD new file mode 100644 index 00000000..cdde0bfd --- /dev/null +++ b/connectivity/http/BUILD @@ -0,0 +1,12 @@ +cc_library( + name = "http", + srcs = ["http.c"], + hdrs = ["http.h"], + visibility = ["//visibility:public"], + deps = [ + "//accelerator/core:apis", + "//accelerator/core:proxy_apis", + "@entangled//utils:macros", + "@libmicrohttpd", + ], +) diff --git a/accelerator/http.c b/connectivity/http/http.c similarity index 78% rename from accelerator/http.c rename to connectivity/http/http.c index 6b69006b..0a41a76e 100644 --- a/accelerator/http.c +++ b/connectivity/http/http.c @@ -1,11 +1,12 @@ #include +#include #include #include #include #include -#include "accelerator/http.h" -#include "cJSON.h" +#include "http.h" +#include "utils/macros.h" #define HTTP_LOGGER "http" @@ -46,7 +47,7 @@ static status_t ta_http_url_matcher(char const *const url, char *const regex_rul // Did not match pattern ret = SC_HTTP_URL_NOT_MATCH; } else { - if (pmatch.rm_eo - pmatch.rm_so != strlen(url)) { + if ((size_t)(pmatch.rm_eo - pmatch.rm_so) != strlen(url)) { ret = SC_HTTP_URL_NOT_MATCH; } } @@ -124,23 +125,25 @@ static int set_response_content(status_t ret, char **json_result) { } static inline int process_find_txns_obj_by_tag_request(ta_http_t *const http, char const *const url, char **const out) { - status_t ret; - char *tag = NULL; - ret = ta_get_url_parameter(url, 1, &tag); - if (ret == SC_OK) { - ret = api_find_transactions_obj_by_tag(&http->core->iota_service, tag, out); - } - return set_response_content(ret, out); + status_t ret; + char *tag = NULL; + ret = ta_get_url_parameter(url, 1, &tag); + if (ret == SC_OK) { + ret = api_find_transactions_obj_by_tag(&http->core->iota_service, tag, out); + } + free(tag); + return set_response_content(ret, out); } static inline int process_find_txns_by_tag_request(ta_http_t *const http, char const *const url, char **const out) { - status_t ret; - char *tag = NULL; - ret = ta_get_url_parameter(url, 1, &tag); - if (ret == SC_OK) { - ret = api_find_transactions_by_tag(&http->core->iota_service, tag, out); - } - return set_response_content(ret, out); + status_t ret; + char *tag = NULL; + ret = ta_get_url_parameter(url, 1, &tag); + if (ret == SC_OK) { + ret = api_find_transactions_by_tag(&http->core->iota_service, tag, out); + } + free(tag); + return set_response_content(ret, out); } static inline int process_generate_address_request(ta_http_t *const http, char **const out) { @@ -154,8 +157,9 @@ static inline int process_find_txn_obj_single_request(ta_http_t *const http, cha char *hash = NULL; ret = ta_get_url_parameter(url, 1, &hash); if (ret == SC_OK) { - ret = api_find_transaction_object_single(&http->core->iota_service, hash, out); + ret = api_find_transaction_object_single(&http->core->iota_service, hash, out); } + free(hash); return set_response_content(ret, out); } @@ -179,7 +183,7 @@ static inline int process_get_tips_request(ta_http_t *const http, char **const o static inline int process_send_transfer_request(ta_http_t *const http, char const *const payload, char **const out) { status_t ret; - ret = api_send_transfer(&http->core->iota_conf, &http->core->iota_service, payload, out); + ret = api_send_transfer(http->core, payload, out); return set_response_content(ret, out); } @@ -190,9 +194,48 @@ static inline int process_recv_mam_msg_request(ta_http_t *const http, char const if (ret == SC_OK) { ret = api_receive_mam_message(&http->core->iota_conf, &http->core->iota_service, bundle, out); } + free(bundle); + return set_response_content(ret, out); +} +#ifdef DB_ENABLE +static inline int process_get_identity_info_by_hash_request(ta_http_t *const http, char const *const url, + char **const out) { + status_t ret = SC_OK; + char *hash = NULL; + ret = ta_get_url_parameter(url, 2, &hash); + if (ret == SC_OK) { + ret = api_get_identity_info_by_hash(&http->core->db_service, hash, out); + } + free(hash); + + return set_response_content(ret, out); +} + +static inline int process_get_identity_info_by_id_request(ta_http_t *const http, char const *const url, + char **const out) { + status_t ret; + char *buf = NULL; + ret = ta_get_url_parameter(url, 2, &buf); + if (ret == SC_OK) { + ret = api_get_identity_info_by_id(&http->core->db_service, buf, out); + } + free(buf); return set_response_content(ret, out); } +static inline int process_find_transaction_by_id_request(ta_http_t *const http, char const *const url, + char **const out) { + status_t ret; + char *buf = NULL; + ret = ta_get_url_parameter(url, 2, &buf); + if (ret == SC_OK) { + ret = api_find_transactions_by_id(&http->core->iota_service, &http->core->db_service, buf, out); + } + free(buf); + return set_response_content(ret, out); +} +#endif + static inline int process_mam_send_msg_request(ta_http_t *const http, char const *const payload, char **const out) { status_t ret; ret = api_mam_send_message(&http->core->iota_conf, &http->core->iota_service, payload, out); @@ -221,6 +264,7 @@ static inline int process_invalid_path_request(char **const out) { cJSON *json_obj = cJSON_CreateObject(); cJSON_AddStringToObject(json_obj, "message", "Invalid path"); *out = cJSON_PrintUnformatted(json_obj); + cJSON_Delete(json_obj); return MHD_HTTP_BAD_REQUEST; } @@ -228,6 +272,7 @@ static inline int process_method_not_allowed_request(char **const out) { cJSON *json_obj = cJSON_CreateObject(); cJSON_AddStringToObject(json_obj, "message", "Method not allowed"); *out = cJSON_PrintUnformatted(json_obj); + cJSON_Delete(json_obj); return MHD_HTTP_METHOD_NOT_ALLOWED; } @@ -235,6 +280,7 @@ static inline int process_options_request(char **const out) { cJSON *json_obj = cJSON_CreateObject(); cJSON_AddStringToObject(json_obj, "message", "OPTIONS request"); *out = cJSON_PrintUnformatted(json_obj); + cJSON_Delete(json_obj); return MHD_HTTP_OK; } @@ -249,17 +295,17 @@ static int ta_http_process_request(ta_http_t *const http, char const *const url, } else if (ta_http_url_matcher(url, "/mam[/]?") == SC_OK) { if (payload != NULL) { return process_mam_send_msg_request(http, payload, out); - } else { - return process_method_not_allowed_request(out); } + return process_method_not_allowed_request(out); + } else if (ta_http_url_matcher(url, "/transaction/[A-Z9]{81}[/]?") == SC_OK) { return process_find_txn_obj_single_request(http, url, out); } else if (ta_http_url_matcher(url, "/transaction/object[/]?") == SC_OK) { if (payload != NULL) { return process_find_txn_obj_request(http, payload, out); - } else { - return process_method_not_allowed_request(out); } + return process_method_not_allowed_request(out); + } else if (ta_http_url_matcher(url, "/tips/pair[/]?") == SC_OK) { return process_get_tips_pair_request(http, out); } else if (ta_http_url_matcher(url, "/tips[/]?") == SC_OK) { @@ -267,26 +313,39 @@ static int ta_http_process_request(ta_http_t *const http, char const *const url, } else if (ta_http_url_matcher(url, "/address[/]?") == SC_OK) { return process_generate_address_request(http, out); } else if (ta_http_url_matcher(url, "/tag/[A-Z9]{1,27}/hashes[/]?") == SC_OK) { - return process_find_txns_by_tag_request(http, url, out); + return process_find_txns_by_tag_request(http, url, out); } else if (ta_http_url_matcher(url, "/tag/[A-Z9]{1,27}[/]?") == SC_OK) { - return process_find_txns_obj_by_tag_request(http, url, out); - } else if (ta_http_url_matcher(url, "/transaction[/]?") == SC_OK) { + return process_find_txns_obj_by_tag_request(http, url, out); + } +#ifdef DB_ENABLE + else if (ta_http_url_matcher(url, "/identity/hash/[A-Z9]{81}[/]?") == SC_OK) { + return process_get_identity_info_by_hash_request(http, url, out); + } else if (ta_http_url_matcher(url, "/identity/id/[a-z0-9-]{36}[/]?") == SC_OK) { + return process_get_identity_info_by_id_request(http, url, out); + } else if (ta_http_url_matcher(url, "/transaction/id/[a-z0-9-]{36}[/]?") == SC_OK) { + return process_find_transaction_by_id_request(http, url, out); + } +#endif + else if (ta_http_url_matcher(url, "/transaction[/]?") == SC_OK) { if (payload != NULL) { return process_send_transfer_request(http, payload, out); - } else { - return process_method_not_allowed_request(out); } + return process_method_not_allowed_request(out); + } else if (ta_http_url_matcher(url, "/tryte[/]?") == SC_OK) { if (payload != NULL) { return process_send_trytes_request(http, payload, out); - } else { - return process_method_not_allowed_request(out); } + return process_method_not_allowed_request(out); + } else if (ta_http_url_matcher(url, "/info[/]?") == SC_OK) { - return process_get_ta_info_request(http, out); + return process_get_ta_info_request(http, out); } else if (ta_http_url_matcher(url, "/") == SC_OK) { - // POST request - return process_proxy_api_request(http, payload, out); + if (payload != NULL) { + return process_proxy_api_request(http, payload, out); + } + return process_method_not_allowed_request(out); + } else { ta_log_error("SC_HTTP_URL_NOT_MATCH : %s\n", url); return process_invalid_path_request(out); @@ -295,6 +354,7 @@ static int ta_http_process_request(ta_http_t *const http, char const *const url, } static int ta_http_header_iter(void *cls, enum MHD_ValueKind kind, const char *key, const char *value) { + UNUSED(kind); ta_http_request_t *header = cls; if (0 == strcmp(MHD_HTTP_HEADER_CONTENT_TYPE, key)) { @@ -304,6 +364,8 @@ static int ta_http_header_iter(void *cls, enum MHD_ValueKind kind, const char *k } static int request_log(void *cls, const struct sockaddr *addr, socklen_t addrlen) { + UNUSED(cls); + UNUSED(addrlen); char buf[30]; struct sockaddr_in *addr_ip = (struct sockaddr_in *)addr; char *ip = inet_ntoa(addr_ip->sin_addr); @@ -315,6 +377,7 @@ static int request_log(void *cls, const struct sockaddr *addr, socklen_t addrlen static int ta_http_handler(void *cls, struct MHD_Connection *connection, const char *url, const char *method, const char *version, const char *upload_data, size_t *upload_data_size, void **ptr) { + UNUSED(version); int ret = MHD_NO, req_ret = MHD_HTTP_OK; int post = 0, options = 0; ta_http_t *api = (ta_http_t *)cls; @@ -353,8 +416,14 @@ static int ta_http_handler(void *cls, struct MHD_Connection *connection, const c // While upload_data_size > 0 process upload_data if (*upload_data_size > 0) { if (http_req->request == NULL) { - http_req->request = (char *)malloc(*upload_data_size); + http_req->request = (char *)malloc((*upload_data_size) + 1); + if (http_req->request == NULL) { + ta_log_error("%s\n", "Not enough size for allocating HTTP request payload."); + goto cleanup; + } + strncpy(http_req->request, upload_data, *upload_data_size); + http_req->request[*upload_data_size] = 0; } else { ret = MHD_NO; ta_log_error("%s\n", "MHD_NO"); @@ -425,7 +494,7 @@ status_t ta_http_start(ta_http_t *const http) { MHD_start_daemon(MHD_USE_AUTO_INTERNAL_THREAD | MHD_USE_THREAD_PER_CONNECTION | MHD_USE_ERROR_LOG | MHD_USE_DEBUG, atoi(http->core->ta_conf.port), request_log, NULL, ta_http_handler, http, MHD_OPTION_END); if (http->daemon == NULL) { - ta_log_error("%s\n", "SC_HTTP_OOM"); + ta_log_error("%s\n", strerror(errno)); return SC_HTTP_OOM; } return SC_OK; diff --git a/accelerator/http.h b/connectivity/http/http.h similarity index 78% rename from accelerator/http.h rename to connectivity/http/http.h index 61026e8a..12d9c3f5 100644 --- a/accelerator/http.h +++ b/connectivity/http/http.h @@ -1,16 +1,19 @@ -#ifndef ACCELERATOR_HTTP_H_ -#define ACCELERATOR_HTTP_H_ +#ifndef HTTP_HTTP_H_ +#define HTTP_HTTP_H_ #include -#include "accelerator/apis.h" -#include "accelerator/config.h" -#include "accelerator/errors.h" -#include "accelerator/proxy_apis.h" +#include "accelerator/core/apis.h" +#include "accelerator/core/proxy_apis.h" #ifdef __cplusplus extern "C" { #endif +/** + * @file connectivity/http/http.h + * @brief Router of HTTP protocol + */ + typedef struct ta_http_s { void *daemon; ta_core_t *core; @@ -62,4 +65,4 @@ status_t ta_http_stop(ta_http_t *const http); } #endif -#endif // ACCELERATOR_HTTP_H_ +#endif // HTTP_HTTP_H_ diff --git a/connectivity/mqtt/BUILD b/connectivity/mqtt/BUILD index 69fb9cda..03910baf 100644 --- a/connectivity/mqtt/BUILD +++ b/connectivity/mqtt/BUILD @@ -1,10 +1,5 @@ -config_setting( - name = "mqtt_enable", - values = {"define": "mqtt=enable"}, -) - cc_library( - name = "mqtt_utils", + name = "mqtt", srcs = [ "duplex_callback.c", "duplex_utils.c", @@ -13,13 +8,11 @@ cc_library( "duplex_callback.h", "duplex_utils.h", ], - copts = ["-DMQTT_ENABLE"], visibility = ["//visibility:public"], deps = [ ":mqtt_common", - "//accelerator:apis", - "//accelerator:common_core", - "//accelerator:ta_errors", + "//accelerator/core:apis", + "//common:ta_errors", "@entangled//common/model:transaction", ], ) @@ -36,11 +29,10 @@ cc_library( "pub_utils.h", "sub_utils.h", ], - copts = ["-DMQTT_ENABLE"], - visibility = ["//serializer:__pkg__"], + visibility = ["//accelerator/core/serializer:__pkg__"], deps = [ "//accelerator:ta_config", - "//accelerator:ta_errors", + "//common:ta_errors", "//third_party:mosquitto", ], ) diff --git a/connectivity/mqtt/duplex_callback.c b/connectivity/mqtt/duplex_callback.c index 04b51953..01706936 100644 --- a/connectivity/mqtt/duplex_callback.c +++ b/connectivity/mqtt/duplex_callback.c @@ -63,7 +63,7 @@ static status_t mqtt_request_handler(mosq_config_t *cfg, char *subscribe_topic, mqtt_transaction_hash_req_deserialize(req, hash); ret = api_find_transaction_object_single(&ta_core.iota_service, hash, &json_result); } else if (!strncmp(p + 12, "send", 4)) { - ret = api_send_transfer(&ta_core.iota_conf, &ta_core.iota_service, req, &json_result); + ret = api_send_transfer(&ta_core, req, &json_result); } } else if ((p = strstr(api_sub_topic, "tips"))) { if (!strncmp(p + 5, "all", 3)) { diff --git a/connectivity/mqtt/duplex_callback.h b/connectivity/mqtt/duplex_callback.h index ce4445bc..e090c77c 100644 --- a/connectivity/mqtt/duplex_callback.h +++ b/connectivity/mqtt/duplex_callback.h @@ -6,15 +6,12 @@ * "LICENSE" at the root of this distribution. */ -#ifndef DUPLEX_CALLBACK_H -#define DUPLEX_CALLBACK_H +#ifndef MQTT_DUPLEX_CALLBACK_H_ +#define MQTT_DUPLEX_CALLBACK_H_ -#include "accelerator/apis.h" -#include "accelerator/common_core.h" +#include "accelerator/core/apis.h" #include "common/model/transaction.h" #include "duplex_utils.h" -#include "serializer/serializer.h" -#include "utils/logger.h" #ifdef __cplusplus extern "C" { @@ -22,6 +19,7 @@ extern "C" { /** * @file connectivity/mqtt/duplex_callbacks.h + * @brief Callback functions to handle MQTT requests */ /** @@ -57,4 +55,4 @@ status_t duplex_callback_func_set(struct mosquitto *mosq); } #endif -#endif // DUPLEX_CALLBACK_H +#endif // MQTT_DUPLEX_CALLBACK_H_ diff --git a/connectivity/mqtt/duplex_utils.c b/connectivity/mqtt/duplex_utils.c index 29b1e591..bc3a4db4 100644 --- a/connectivity/mqtt/duplex_utils.c +++ b/connectivity/mqtt/duplex_utils.c @@ -10,7 +10,7 @@ #include #include #include -#include "utils/logger.h" +#include "common/logger.h" #define MQTT_UTILS_LOGGER "mqtt-utils" diff --git a/connectivity/mqtt/duplex_utils.h b/connectivity/mqtt/duplex_utils.h index c499a1b3..16d8bbf2 100644 --- a/connectivity/mqtt/duplex_utils.h +++ b/connectivity/mqtt/duplex_utils.h @@ -6,8 +6,8 @@ * "LICENSE" at the root of this distribution. */ -#ifndef DUPLEX_UTILS_H -#define DUPLEX_UTILS_H +#ifndef MQTT_DUPLEX_UTILS_H_ +#define MQTT_DUPLEX_UTILS_H_ #include "pub_utils.h" #include "sub_utils.h" @@ -18,6 +18,7 @@ extern "C" { /** * @file connectivity/mqtt/duplex_utils.h + * @brief MQTT read/write client */ /** @@ -113,4 +114,4 @@ status_t duplex_client_start(struct mosquitto *loop_mosq, mosq_config_t *loop_cf } #endif -#endif // DUPLEX_UTILS_H +#endif // MQTT_DUPLEX_UTILS_H_ diff --git a/connectivity/mqtt/mqtt_common.c b/connectivity/mqtt/mqtt_common.c index 4515943c..3de1a0ac 100644 --- a/connectivity/mqtt/mqtt_common.c +++ b/connectivity/mqtt/mqtt_common.c @@ -11,7 +11,7 @@ #include #include #include -#include "utils/logger.h" +#include "common/logger.h" #define MQTT_COMMON_LOGGER "mqtt-common" static logger_id_t logger_id; diff --git a/connectivity/mqtt/mqtt_common.h b/connectivity/mqtt/mqtt_common.h index 700904d9..d70649de 100644 --- a/connectivity/mqtt/mqtt_common.h +++ b/connectivity/mqtt/mqtt_common.h @@ -6,15 +6,12 @@ * "LICENSE" at the root of this distribution. */ -#ifndef CLIENT_CONFIG_H -#define CLIENT_CONFIG_H +#ifndef MQTT_MQTT_COMMON_H_ +#define MQTT_MQTT_COMMON_H_ #include -#include "accelerator/errors.h" - -#ifdef MQTT_ENABLE +#include "common/ta_errors.h" #include "third_party/mosquitto/lib/mosquitto.h" -#endif #ifdef __cplusplus extern "C" { @@ -22,6 +19,7 @@ extern "C" { /** * @file connectivity/mqtt/mqtt_common.h + * @brief Common functions and configures of MQTT clients. */ #define ID_LEN 32 @@ -29,7 +27,6 @@ extern "C" { typedef enum client_type_s { client_pub, client_sub, client_duplex } client_type_t; -#ifdef MQTT_ENABLE typedef enum mosq_err_t mosq_retcode_t; // typedef the original enum typedef struct mosq_general_config_s { @@ -209,10 +206,9 @@ status_t mosq_client_connect(struct mosquitto *mosq, mosq_config_t *cfg); * - non-zero on error */ status_t cfg_add_topic(mosq_config_t *cfg, client_type_t client_type, char *topic); -#endif #ifdef __cplusplus } #endif -#endif // CLIENT_CONFIG_H +#endif // MQTT_MQTT_COMMON_H_ diff --git a/connectivity/mqtt/pub_utils.c b/connectivity/mqtt/pub_utils.c index 20218dfb..89bfcf5a 100644 --- a/connectivity/mqtt/pub_utils.c +++ b/connectivity/mqtt/pub_utils.c @@ -10,7 +10,7 @@ #include #include #include -#include "utils/logger.h" +#include "common/logger.h" #define MQTT_PUB_LOGGER "mqtt-pub" static logger_id_t logger_id; diff --git a/connectivity/mqtt/pub_utils.h b/connectivity/mqtt/pub_utils.h index 75d8a42e..2b6efdaf 100644 --- a/connectivity/mqtt/pub_utils.h +++ b/connectivity/mqtt/pub_utils.h @@ -6,8 +6,8 @@ * "LICENSE" at the root of this distribution. */ -#ifndef PUB_UTILS_H -#define PUB_UTILS_H +#ifndef MQTT_PUB_UTILS_H_ +#define MQTT_PUB_UTILS_H_ #include "mqtt_common.h" #undef uthash_free @@ -21,6 +21,7 @@ extern "C" { /** * @file connectivity/mqtt/pub_utils.h + * @brief MQTT publisher utilities. */ /** @@ -83,4 +84,4 @@ status_t init_check_error(mosq_config_t *cfg, client_type_t client_type); } #endif -#endif // PUB_UTILS_H +#endif // MQTT_PUB_UTILS_H_ diff --git a/connectivity/mqtt/sub_utils.c b/connectivity/mqtt/sub_utils.c index 7e8d38b4..5535cc6b 100644 --- a/connectivity/mqtt/sub_utils.c +++ b/connectivity/mqtt/sub_utils.c @@ -10,7 +10,7 @@ #include #include #include -#include "utils/logger.h" +#include "common/logger.h" #define MQTT_SUB_LOGGER "mqtt-sub" static logger_id_t logger_id; diff --git a/connectivity/mqtt/sub_utils.h b/connectivity/mqtt/sub_utils.h index 508ee3d8..375c957a 100644 --- a/connectivity/mqtt/sub_utils.h +++ b/connectivity/mqtt/sub_utils.h @@ -6,8 +6,8 @@ * "LICENSE" at the root of this distribution. */ -#ifndef SUB_UTILS_H -#define SUB_UTILS_H +#ifndef MQTT_SUB_UTILS_H_ +#define MQTT_SUB_UTILS_H_ #include "mqtt_common.h" #include "third_party/mosquitto/config.h" @@ -18,6 +18,7 @@ extern "C" { /** * @file connectivity/mqtt/sub_utils.h + * @brief MQTT subscriber utilities. */ /** @@ -72,4 +73,4 @@ void subscribe_callback_sub_func(struct mosquitto *mosq, void *obj, int mid, int } #endif -#endif // SUB_UTILS_H +#endif // MQTT_SUB_UTILS_H_ diff --git a/MQTT_server.md b/docs/MQTT-mode-intro.md similarity index 100% rename from MQTT_server.md rename to docs/MQTT-mode-intro.md diff --git a/docs/build.md b/docs/build.md new file mode 100644 index 00000000..30cbde1b --- /dev/null +++ b/docs/build.md @@ -0,0 +1,87 @@ +# Building Options + +## Build Docker Images + +If you prefer building a docker image, tangle-accelerator also provides build rules for it. Note that you still have to edit configurations in `accelerator/config.h`. + +``` +$ make && bazel run //accelerator:ta_image +``` + +There's also an easier option to pull image from docker hub then simply run with default configs. Please do remember a redis-server is still required in this way. + +``` +$ docker run -d --net=host --name tangle-accelerator dltcollab/tangle-accelerator +``` + +## Build and Push Docker Image to Docker Hub + +Before pushing the docker image to Docker Hub, you need to log in the docker registry: + +``` +$ docker login +``` + +Then you could push the docker image with the following command: + +``` +$ make && bazel run //accelerator:push_docker +``` + +If you get the following error message: + +``` +SyntaxError: invalid syntax +---------------- +Note: The failure of target @containerregistry//:digester (with exit code 1) may have been caused by the fact that it is running under Python 3 instead of Python 2. Examine the error to determine if that appears to be the problem. Since this target is built in the host configuration, the only way to change its version is to set --host_force_python=PY2, which affects the entire build. + +If this error started occurring in Bazel 0.27 and later, it may be because the Python toolchain now enforces that targets analyzed as PY2 and PY3 run under a Python 2 and Python 3 interpreter, respectively. See https://github.com/bazelbuild/bazel/issues/7899 for more information. +------------ +``` + +Use the `--host_force_python=PY2` parameter to force the Bazel to use the Python2 in entire build. + +``` +$ make && bazel run //accelerator:push_docker --host_force_python=PY2 +``` + +## Enable MQTT connectivity +MQTT connectivity is an optional feature allowing IoT endpoint devices to collaborate with `tangle-accelerator`. + +``` +make MQTT && bazel run --define mqtt=enable //accelerator +``` + +Note you may need to set up the `MQTT_HOST` and `TOPIC_ROOT` in `config.h` to connect to a MQTT broker, or you can use CLI option `--mqtt_host`, and `--mqtt_root` to set MQTT broker address and MQTT topic root, respectively. +For more information for MQTT connectivity of `tangle-accelerator`, you could read `connectivity/mqtt/usage.md`. + +## Enable external database for transaction reattachment +Transaction reattachment is an optional feature. + +You can enable it in the build time with command : + +``` +make && bazel run --define db=enable //accelerator +``` + +When enabling reattachment, every transaction issues from the `tangle-accelerator` API called `Send Transfer Message` will be stored in the specific ScyllaDB host and response a UUID string for each transfer message as the identifier. With a promoting process that monitors the status of storing transactions, persistent pending transactions will be reattached to the Tangle. + +Transaction reattachment relies on ScyllDB, you need to install the dependency by following commands. + +For Ubuntu Linux 16.04/x86_64: + +``` +wget https://downloads.datastax.com/cpp-driver/ubuntu/16.04/cassandra/v2.14.1/cassandra-cpp-driver_2.14.1-1_amd64.deb +wget https://downloads.datastax.com/cpp-driver/ubuntu/16.04/cassandra/v2.14.1/cassandra-cpp-driver-dev_2.14.1-1_amd64.deb +sudo dpkg -i cassandra-cpp-driver_2.14.1-1_amd64.deb +sudo dpkg -i cassandra-cpp-driver-dev_2.14.1-1_amd64.deb +``` + +For Ubuntu Linux 18.04/x86_64: + +``` +wget https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.14.1/cassandra-cpp-driver_2.14.1-1_amd64.deb +wget https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.14.1/cassandra-cpp-driver-dev_2.14.1-1_amd64.deb +sudo dpkg -i cassandra-cpp-driver_2.14.1-1_amd64.deb +sudo dpkg -i cassandra-cpp-driver-dev_2.14.1-1_amd64.deb +``` \ No newline at end of file diff --git a/docs/reattacher.md b/docs/reattacher.md new file mode 100644 index 00000000..945e677f --- /dev/null +++ b/docs/reattacher.md @@ -0,0 +1,22 @@ +# Transaction reattacher + +`Transaction reattacher` is a service that helps persistent pending transactions to be re-attached to the Tangle. A persistent transaction is a transaction that does not be confirmed more than 30 minutes. + +When enabling the external database for transaction reattachment, `Tangle-Accelerator` will store transactions issued by API [Send Transfer Message](https://github.com/DLTcollab/tangle-accelerator/wiki/Send-Transfer-Message). + +`Transaction reattacher` will periodically read pending transactions from a specific ScyllaDB cluster, and get the latest inclusion status of those transactions from an IOTA full node. `Reattacher` will update the newest inclusion status to the ScyllaDB cluster. For persistent transactions, `reattacher` performs reattachment, which will do tips selection and PoW for the original bundle, and reattach it to the Tangle. After reattachment, `reattacher` will update the new transaction hash to the ScyllaDB cluster. + + +See [docs/build.md] for more information about enabling transaction reattachment. + +## Build Instructions + +`bazel build //reattacher` + +The reattacher support following options : + +* `DB_HOST`: binding address of ScyllDB cluster +* `IRI_HOST`: binding address of IRI +* `IRI_PORT`: port of IRI + +If you do not specify `DB_HOST` or `IRI_HOST`, the address will be set as `localhost`. diff --git a/hooks/formatter b/hooks/formatter index b3f4f733..da80af2c 100755 --- a/hooks/formatter +++ b/hooks/formatter @@ -4,3 +4,8 @@ for file in $(find $(git rev-parse --show-toplevel) | grep -E "\.(c|cc|cpp|h|hh| do clang-format -style=file -fallback-style=none -i $file done + +for file in $(find $(git rev-parse --show-toplevel) | grep -E "\BUILD\$" | grep -Ev "/third_party/") +do + buildifier $file +done diff --git a/map/BUILD b/map/BUILD deleted file mode 100644 index 2bc145bc..00000000 --- a/map/BUILD +++ /dev/null @@ -1,17 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -cc_library( - name = "mode", - srcs = ["mode.c"], - hdrs = ["mode.h"], - deps = [ - "//accelerator:common_core", - "//request", - "//response", - "@entangled//common/model:bundle", - "@entangled//common/trinary:flex_trit", - "@entangled//common/trinary:tryte_ascii", - "@entangled//mam/api", - "@entangled//utils/containers/hash:hash_array", - ], -) diff --git a/reattacher/BUILD b/reattacher/BUILD new file mode 100644 index 00000000..f3940da3 --- /dev/null +++ b/reattacher/BUILD @@ -0,0 +1,9 @@ +cc_binary( + name = "reattacher", + srcs = ["reattacher_main.c"], + deps = [ + "//accelerator:ta_config", + "//storage", + "@entangled//cclient/api", + ], +) diff --git a/reattacher/reattacher_main.c b/reattacher/reattacher_main.c new file mode 100644 index 00000000..c734ed10 --- /dev/null +++ b/reattacher/reattacher_main.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2020 BiiLabs Co., Ltd. and Contributors + * All Rights Reserved. + * This is free software; you can redistribute it and/or modify it under the + * terms of the MIT license. A copy of the license can be found in the file + * "LICENSE" at the root of this distribution. + */ +#include +#include " +#include "accelerator/config.h" +#include "cclient/api/core/core_api.h" +#include "cclient/api/extended/extended_api.h" +#include "common/model/bundle.h" +#include "storage/ta_storage.h" + +#define PRESISTENT_PENDING_SECOND 1800 /**< 30 mins */ +#define DELAY_INTERVAL 300 /**< 5 mins */ + +#define logger_id scylladb_logger_id + +static status_t init_iota_client_service(iota_client_service_t* const serv) { + if (serv == NULL) { + ta_log_error("Invalid NULL pointer\n"); + return SC_TA_NULL; + } + serv->http.path = "/"; + serv->http.content_type = "application/json"; + serv->http.accept = "application/json"; + serv->http.api_version = 1; + serv->http.ca_pem = NULL; + serv->serializer_type = SR_JSON; + if (iota_client_core_init(serv) != RC_OK) { + ta_log_error("Failed to connect to IRI.\n"); + return SC_TA_OOM; + } + return SC_OK; +} + +static status_t handle_pending_txn(iota_client_service_t* iota_service, db_client_service_t* db_service, + db_identity_t* obj) { + status_t ret = SC_OK; + hash243_queue_t req_txn = NULL; + get_inclusion_states_res_t* res = get_inclusion_states_res_new(); + flex_trit_t trit_hash[NUM_TRITS_HASH]; + char tryte_hash[NUM_TRYTES_HASH]; + flex_trits_from_trytes(trit_hash, NUM_TRITS_HASH, (const tryte_t*)db_ret_identity_hash(obj), NUM_TRYTES_HASH, + NUM_TRYTES_HASH); + + hash243_queue_push(&req_txn, trit_hash); + + if (iota_client_get_latest_inclusion(iota_service, req_txn, res) != RC_OK || + get_inclusion_states_res_states_count(res) != 1) { + ret = SC_CCLIENT_FAILED_RESPONSE; + ta_log_error("Failed to get inclustion status\n"); + db_show_identity_info(obj); + goto exit; + } + if (get_inclusion_states_res_states_at(res, 0)) { + // confirmed transaction + ta_log_info("Find confirmed transaction\n"); + db_set_identity_status(obj, CONFIRMED_TXN); + ret = db_insert_identity_table(db_service, obj); + if (ret != SC_OK) { + ta_log_error("Failed to insert identity table\n"); + db_show_identity_info(obj); + goto exit; + } + } else if (db_ret_identity_time_elapsed(obj) > PRESISTENT_PENDING_SECOND) { + // reattach + ta_log_info("Reattach pending transaction\n"); + db_show_identity_info(obj); + bundle_transactions_t* res_bundle_txn; + bundle_transactions_new(&res_bundle_txn); + + if (iota_client_replay_bundle(iota_service, trit_hash, MILESTONE_DEPTH, MWM, NULL, res_bundle_txn) != RC_OK) { + ta_log_error("Failed to reattach to Tangle\n"); + db_show_identity_info(obj); + ret = SC_CCLIENT_FAILED_RESPONSE; + goto reattach_done; + } + + /** + * < get the second transaction in the bundle, + * the first transaction is the original transaction before reattachment + */ + iota_transaction_t* txn = bundle_at(res_bundle_txn, 1); + flex_trits_to_trytes((tryte_t*)tryte_hash, NUM_TRYTES_HASH, transaction_hash(txn), NUM_TRITS_HASH, NUM_TRITS_HASH); + + db_set_identity_hash(obj, (cass_byte_t*)tryte_hash, NUM_TRYTES_HASH); + db_set_identity_timestamp(obj, time(NULL)); + + ret = db_insert_identity_table(db_service, obj); + if (ret != SC_OK) { + ta_log_error("Failed to insert identity table\n"); + goto exit; + } + + reattach_done: + bundle_transactions_free(&res_bundle_txn); + } + +exit: + hash243_queue_free(&req_txn); + get_inclusion_states_res_free(&res); + + return ret; +} + +int main(int argc, char** argv) { + int optIdx; + db_client_service_t db_service; + iota_client_service_t iota_service; + db_service.host = strdup("localhost"); + iota_service.http.host = "localhost"; + iota_service.http.port = 14265; + + const struct option longOpt[] = {{"iri_host", required_argument, NULL, 'h'}, + {"iri_port", required_argument, NULL, 'p'}, + {"db_host", required_argument, NULL, 'd'}, + {NULL, 0, NULL, 0}}; + + /* Parse the command line options */ + /* TODO: Support macOS since getopt_long() is GNU extension */ + while (1) { + int cmdOpt = getopt_long(argc, argv, "b:", longOpt, &optIdx); + if (cmdOpt == -1) break; + + /* Invalid option */ + if (cmdOpt == '?') continue; + + if (cmdOpt == 'h') { + iota_service.http.host = optarg; + } + if (cmdOpt == 'p') { + iota_service.http.port = atoi(optarg); + } + if (cmdOpt == 'd') { + free(db_service.host); + db_service.host = strdup(optarg); + } + } + if (ta_logger_init() != SC_OK) { + ta_log_error("logger init fail\n"); + return EXIT_FAILURE; + } + scylladb_logger_init(); + if (db_client_service_init(&db_service, DB_USAGE_REATTACH) != SC_OK) { + ta_log_error("Failed to init db client service\n"); + return EXIT_FAILURE; + } + if (init_iota_client_service(&iota_service) != SC_OK) { + ta_log_error("Failed to init iota client service\n"); + return EXIT_FAILURE; + } + while (1) { + db_identity_array_t* id_array = db_identity_array_new(); + db_get_identity_objs_by_status(&db_service, PENDING_TXN, id_array); + db_identity_t* itr; + IDENTITY_TABLE_ARRAY_FOREACH(id_array, itr) { + if (handle_pending_txn(&iota_service, &db_service, itr) != SC_OK) { + ta_log_warning("Failed to handle pending transaction\n"); + db_show_identity_info(itr); + } + } + db_identity_array_free(&id_array); + sleep(DELAY_INTERVAL); + } + + db_client_service_free(&db_service); + iota_client_core_destroy(&iota_service); + scylladb_logger_release(); + return 0; +} diff --git a/storage/BUILD b/storage/BUILD index 8945fe0d..82efb277 100644 --- a/storage/BUILD +++ b/storage/BUILD @@ -1,15 +1,8 @@ -package(default_visibility = ["//visibility:public"]) - -config_setting( - name = "db_enable", - values = {"define": "db=enable"}, -) - cc_library( name = "storage", hdrs = ["ta_storage.h"], + visibility = ["//visibility:public"], deps = [ - ":scylladb_client", ":scylladb_identity", ":scylladb_permanode", ], @@ -19,11 +12,9 @@ cc_library( name = "scylladb_identity", srcs = ["scylladb_identity.c"], hdrs = ["scylladb_identity.h"], - copts = ["-DLOGGER_ENABLE"], linkopts = ["-lcassandra"], deps = [ ":scylladb_client", - ":scylladb_utils", "@com_github_uthash//:uthash", ], ) @@ -32,11 +23,9 @@ cc_library( name = "scylladb_permanode", srcs = ["scylladb_permanode.c"], hdrs = ["scylladb_permanode.h"], - copts = ["-DLOGGER_ENABLE"], linkopts = ["-lcassandra"], deps = [ ":scylladb_client", - ":scylladb_utils", "@com_github_uthash//:uthash", "@entangled//utils/containers/hash:hash243_queue", ], @@ -48,7 +37,6 @@ cc_library( hdrs = [ "scylladb_client.h", ], - copts = ["-DLOGGER_ENABLE"], linkopts = [ "-lcassandra", ], @@ -61,11 +49,9 @@ cc_library( name = "scylladb_utils", srcs = ["scylladb_utils.c"], hdrs = ["scylladb_utils.h"], - copts = ["-DLOGGER_ENABLE"], linkopts = ["-lcassandra"], deps = [ - "//accelerator:ta_errors", - "//utils:ta_logger", + "//common", "@entangled//common/model:bundle", ], ) diff --git a/storage/scylladb_client.c b/storage/scylladb_client.c index cbe4b2b5..114ff6d0 100644 --- a/storage/scylladb_client.c +++ b/storage/scylladb_client.c @@ -9,6 +9,21 @@ #include "scylladb_utils.h" #define logger_id scylladb_logger_id +static struct db_keyspace_names_s { + db_client_usage_t usage; + const char* name; +} db_keyspace_names[] = {{DB_USAGE_REATTACH, "reattachment"}}; +static const int db_keyspace_name_nums = sizeof(db_keyspace_names) / sizeof(struct db_keyspace_names_s); + +static const char* get_keyspace_name(db_client_usage_t usage) { + for (int i = 0; i < db_keyspace_name_nums; i++) { + if (db_keyspace_names[i].usage == usage) { + return db_keyspace_names[i].name; + } + } + return NULL; +} + static void print_error(CassFuture* future) { const char* message; size_t message_length; @@ -22,9 +37,14 @@ static CassCluster* create_cluster(const char* hosts) { return cluster; } -static CassError connect_session(CassSession* session, const CassCluster* cluster) { +static CassError connect_session(CassSession* session, const CassCluster* cluster, const char* keyspace_name) { CassError rc = CASS_OK; - CassFuture* future = cass_session_connect(session, cluster); + CassFuture* future; + if (keyspace_name == NULL) { + future = cass_session_connect(session, cluster); + } else { + future = cass_session_connect_keyspace(session, cluster, keyspace_name); + } cass_future_wait(future); rc = cass_future_error_code(future); @@ -36,7 +56,7 @@ static CassError connect_session(CassSession* session, const CassCluster* cluste return rc; } -status_t db_client_service_init(db_client_service_t* service) { +status_t db_client_service_init(db_client_service_t* service, db_client_usage_t usage) { if (service == NULL) { ta_log_error("NULL pointer to ScyllaDB client service for connection endpoint(s)\n"); return SC_TA_NULL; @@ -50,7 +70,8 @@ status_t db_client_service_init(db_client_service_t* service) { service->uuid_gen = cass_uuid_gen_new(); service->session = cass_session_new(); service->cluster = create_cluster(service->host); - if (connect_session(service->session, service->cluster) != CASS_OK) { + const char* keyspace_name = get_keyspace_name(usage); + if (connect_session(service->session, service->cluster, keyspace_name) != CASS_OK) { ta_log_error("connect ScyllaDB cluster with host : %s failed\n", service->host); service->enabled = false; return SC_STORAGE_CONNECT_FAIL; diff --git a/storage/scylladb_client.h b/storage/scylladb_client.h index aa135f9a..63ab92b5 100644 --- a/storage/scylladb_client.h +++ b/storage/scylladb_client.h @@ -5,15 +5,28 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef TA_SCYLLADB_CLIENT_H_ -#define TA_SCYLLADB_CLIENT_H_ +#ifndef STORAGE_SCYLLADB_CLIENT_H_ +#define STORAGE_SCYLLADB_CLIENT_H_ #ifdef __cplusplus extern "C" { #endif #include "scylladb_utils.h" +/** + * @file storage/scylladb_client.h + * @brief ScyllaDB client service for connection and executing queries. + */ + #define DB_UUID_STRING_LENGTH CASS_UUID_STRING_LENGTH + +/* + * DB_USAGE_NULL is for non-preserved usage and user-defined keyspace. + * Call db_init_identity_keyspace to give the keyspace name. + * This method could be enhanced by supporting user-defined keyspace name when + * specific the preserved usage. + */ +typedef enum { DB_USAGE_REATTACH = 0, DB_USAGE_NULL, NUM_DB_USAGE } db_client_usage_t; typedef struct { CassCluster* cluster; CassSession* session; @@ -27,11 +40,12 @@ typedef struct { * @brief init ScyllaDB client serivce and connect to specific cluster * * @param[out] service ScyllaDB client service + * @param[in] usage specfic usage for db client serivce * @return * - SC_OK on success * - non-zero on error */ -status_t db_client_service_init(db_client_service_t* service); +status_t db_client_service_init(db_client_service_t* service, db_client_usage_t usage); /** * @brief free ScyllaDB client serivce @@ -47,4 +61,4 @@ status_t db_client_service_free(db_client_service_t* service); } #endif -#endif // TA_SCYLLADB_CLIENT_H_ +#endif // STORAGE_SCYLLADB_CLIENT_H_ diff --git a/storage/scylladb_identity.c b/storage/scylladb_identity.c index f7db4d38..0b84dc8e 100644 --- a/storage/scylladb_identity.c +++ b/storage/scylladb_identity.c @@ -10,16 +10,30 @@ #include "time.h" #define logger_id scylladb_logger_id - #define DB_IDENTITY_UUID_VERSION 4 struct db_identity_s { CassUuid uuid; cass_int64_t timestamp; cass_int8_t status; - cass_byte_t hash[NUM_FLEX_TRITS_BUNDLE]; + cass_byte_t hash[DB_NUM_TRYTES_HASH]; }; +status_t db_show_identity_info(db_identity_t* obj) { + if (obj == NULL) { + ta_log_error("Invaild NULL pointer : obj\n"); + return SC_TA_NULL; + } + char uuid_string[DB_UUID_STRING_LENGTH]; + char hash[DB_NUM_TRYTES_HASH + 1]; + db_get_identity_uuid_string(obj, uuid_string); + memcpy(hash, obj->hash, DB_NUM_TRYTES_HASH); + hash[DB_NUM_TRYTES_HASH] = 0; + ta_log_info("identity info\n uuid string : %s\nhash :%s\ntimestamp : %ld\ntime eclapsed : %ld\nstatus : %d\n", + uuid_string, hash, obj->timestamp, db_ret_identity_time_elapsed(obj), obj->status); + return SC_OK; +} + status_t db_identity_new(db_identity_t** obj) { *obj = (db_identity_t*)malloc(sizeof(struct db_identity_s)); if (NULL == *obj) { @@ -122,11 +136,11 @@ status_t db_set_identity_hash(db_identity_t* obj, const cass_byte_t* hash, size_ if (hash == NULL) { ta_log_error("NULL pointer to hash to insert into identity table\n"); } - if (length != NUM_FLEX_TRITS_HASH) { + if (length != DB_NUM_TRYTES_HASH) { ta_log_error("SC_STORAGE_INVAILD_INPUT\n"); return SC_STORAGE_INVAILD_INPUT; } - memcpy(obj->hash, hash, NUM_FLEX_TRITS_HASH); + memcpy(obj->hash, hash, DB_NUM_TRYTES_HASH); return SC_OK; } @@ -153,13 +167,7 @@ static void print_error(CassFuture* future) { ta_log_error("Error: %.*s\n", (int)message_length, message); } -static status_t create_identity_table(CassSession* session, bool need_drop) { - if (need_drop) { - if (execute_query(session, "DROP TABLE IF EXISTS identity;") != CASS_OK) { - ta_log_error("drop identity table fail\n"); - return SC_STORAGE_CASSANDRA_QUREY_FAIL; - } - } +static status_t create_identity_table(CassSession* session, bool need_truncate) { if (execute_query(session, "CREATE TABLE IF NOT EXISTS identity(" "id uuid, ts timestamp, hash blob, status tinyint, PRIMARY KEY (id));") != CASS_OK) { @@ -174,6 +182,12 @@ static status_t create_identity_table(CassSession* session, bool need_drop) { ta_log_error("create identity table index fail\n"); return SC_STORAGE_CASSANDRA_QUREY_FAIL; } + if (need_truncate) { + if (db_truncate_table(session, "identity") != SC_OK) { + ta_log_error("truncate identity table fail\n"); + return SC_STORAGE_CASSANDRA_QUREY_FAIL; + } + } return SC_OK; } @@ -187,7 +201,7 @@ status_t db_insert_tx_into_identity(db_client_service_t* service, const char* ha ta_log_error("db new identity failed\n"); goto exit; } - if ((ret = db_set_identity_hash(identity, (cass_byte_t*)hash, NUM_FLEX_TRITS_HASH)) != SC_OK) { + if ((ret = db_set_identity_hash(identity, (cass_byte_t*)hash, DB_NUM_TRYTES_HASH)) != SC_OK) { ta_log_error("db set identity hash failed\n"); goto exit; } @@ -214,7 +228,7 @@ status_t db_insert_tx_into_identity(db_client_service_t* service, const char* ha return ret; } -status_t db_init_identity_keyspace(db_client_service_t* service, bool need_drop, const char* keyspace_name) { +status_t db_init_identity_keyspace(db_client_service_t* service, bool need_truncate, const char* keyspace_name) { status_t ret = SC_OK; CassStatement* use_statement = NULL; char* use_query = NULL; @@ -237,8 +251,7 @@ status_t db_init_identity_keyspace(db_client_service_t* service, bool need_drop, ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; goto exit; } - - if ((ret = create_identity_table(service->session, need_drop)) != SC_OK) { + if ((ret = create_identity_table(service->session, need_truncate)) != SC_OK) { ta_log_error("%s\n", "create identity table fail"); goto exit; } @@ -251,7 +264,7 @@ status_t db_init_identity_keyspace(db_client_service_t* service, bool need_drop, static CassStatement* ret_insert_identity_statement(const CassPrepared* prepared, const db_identity_t* obj) { CassStatement* statement = NULL; statement = cass_prepared_bind(prepared); - cass_statement_bind_bytes_by_name(statement, "hash", obj->hash, NUM_FLEX_TRITS_HASH); + cass_statement_bind_bytes_by_name(statement, "hash", obj->hash, DB_NUM_TRYTES_HASH); cass_statement_bind_int8_by_name(statement, "status", obj->status); cass_statement_bind_int64_by_name(statement, "ts", obj->timestamp); cass_statement_bind_uuid_by_name(statement, "id", obj->uuid); @@ -262,9 +275,7 @@ status_t db_insert_identity_table(db_client_service_t* service, db_identity_t* o status_t ret = SC_OK; const CassPrepared* insert_prepared = NULL; CassStatement* statement = NULL; - const char* insert_query = - "INSERT INTO identity (id, ts, status, hash)" - "VALUES (?, ?, ?, ?);"; + const char* query = "UPDATE identity Set ts = ?, status = ?, hash =? Where id = ?"; if (service == NULL) { ta_log_error("NULL pointer to ScyllaDB client service for connection endpoint(s)"); return SC_TA_NULL; @@ -274,7 +285,7 @@ status_t db_insert_identity_table(db_client_service_t* service, db_identity_t* o return SC_TA_NULL; } - if (prepare_query(service->session, insert_query, &insert_prepared) != CASS_OK) { + if (prepare_query(service->session, query, &insert_prepared) != CASS_OK) { ta_log_error("%s\n", "prepare INSERT query fail"); return SC_STORAGE_CASSANDRA_QUREY_FAIL; } @@ -298,17 +309,20 @@ static status_t get_identity_array(CassSession* session, CassStatement* statemen CassIterator* iterator; db_identity_t* identity = NULL; if ((ret = db_identity_new(&identity)) != SC_OK) { + ta_log_error("fail to create db idenetity\n"); goto exit; } + future = cass_session_execute(session, statement); - cass_future_wait(future); + if (cass_future_error_code(future) != CASS_OK) { print_error(future); - ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; + cass_future_free(future); goto exit; } result = cass_future_get_result(future); + cass_future_free(future); iterator = cass_iterator_from_result(result); while (cass_iterator_next(iterator)) { @@ -324,7 +338,7 @@ static status_t get_identity_array(CassSession* session, CassStatement* statemen itr = (db_identity_t*)utarray_back(identity_array); cass_value_get_bytes(cass_row_get_column_by_name(row, "hash"), &hash, &len); - db_set_identity_hash(itr, hash, NUM_FLEX_TRITS_HASH); + db_set_identity_hash(itr, hash, DB_NUM_TRYTES_HASH); cass_value_get_int8(cass_row_get_column_by_name(row, "status"), &value); db_set_identity_status(itr, value); cass_value_get_int64(cass_row_get_column_by_name(row, "ts"), &ts); @@ -337,9 +351,12 @@ static status_t get_identity_array(CassSession* session, CassStatement* statemen cass_iterator_free(iterator); exit: - cass_future_free(future); cass_statement_free(statement); db_identity_free(&identity); + if ((db_identity_t*)utarray_front(identity_array) == NULL) { + ta_log_error("no identity is found\n"); + return SC_STORAGE_INVAILD_INPUT; + } return ret; } @@ -370,10 +387,12 @@ status_t db_get_identity_objs_by_uuid_string(db_client_service_t* service, const const char* query = "SELECT * FROM identity WHERE id = ?;"; CassUuid uuid; cass_uuid_from_string(uuid_string, &uuid); + if (prepare_query(service->session, query, &select_prepared) != CASS_OK) { ta_log_error("%s\n", "prepare SELECT query fail"); return SC_STORAGE_CASSANDRA_QUREY_FAIL; } + statement = cass_prepared_bind(select_prepared); cass_statement_bind_uuid_by_name(statement, "id", uuid); get_identity_array(service->session, statement, identity_array); @@ -394,8 +413,8 @@ status_t db_get_identity_objs_by_hash(db_client_service_t* service, const cass_b return SC_STORAGE_CASSANDRA_QUREY_FAIL; } statement = cass_prepared_bind(select_prepared); - cass_statement_bind_bytes_by_name(statement, "hash", hash, NUM_FLEX_TRITS_HASH); - get_identity_array(service->session, statement, identity_array); + cass_statement_bind_bytes_by_name(statement, "hash", hash, DB_NUM_TRYTES_HASH); + ret = get_identity_array(service->session, statement, identity_array); cass_prepared_free(select_prepared); return ret; diff --git a/storage/scylladb_identity.h b/storage/scylladb_identity.h index 03e3db77..a651163b 100644 --- a/storage/scylladb_identity.h +++ b/storage/scylladb_identity.h @@ -5,20 +5,34 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef TA_SCYLLADB_IDENTITY_H_ -#define TA_SCYLLADB_IDENTITY_H_ +#ifndef STORAGE_SCYLLADB_IDENTITY_H_ +#define STORAGE_SCYLLADB_IDENTITY_H_ #ifdef __cplusplus extern "C" { #endif #include "scylladb_client.h" -#include "scylladb_utils.h" #include "utarray.h" + +/** + * @file storage/scylladb_identity.h + * @brief Identity table and corresponding insertion and selection functions. + * + * Data stored in the identity table is used for transaction reattachment. + * The identity table contains following 4 columns. + * id : A UUID as the identifer of transactions. + * hash : The transaction hash that would be updated after reattachment. + * status : The inclustion status of transaction. + * ts : The timestamp of update time of status or hash. + */ + typedef struct db_identity_s db_identity_t; typedef UT_array db_identity_array_t; typedef enum { PENDING_TXN = 0, INSERTING_TXN, CONFIRMED_TXN, NUM_OF_TXN_STATUS } db_txn_status_t; +#define DB_NUM_TRYTES_HASH NUM_TRYTES_HASH + /** * Allocate memory of db_identity_array_t */ @@ -157,14 +171,14 @@ status_t db_insert_tx_into_identity(db_client_service_t* service, const char* ha * @brief connect to ScyllaDB cluster and initialize identity keyspace and table * * @param[in] service ScyllaDB client service for connection - * @param[in] need_drop true : drop table, false : keep old table + * @param[in] need_truncate true : clear all data, false : keep old data * @param[in] keyspace_name keyspace name the session should use * * @return * - SC_OK on success * - non-zero on error */ -status_t db_init_identity_keyspace(db_client_service_t* service, bool need_drop, const char* keyspace_name); +status_t db_init_identity_keyspace(db_client_service_t* service, bool need_truncate, const char* keyspace_name); /** * @brief get identity objs with selected status from identity table @@ -241,8 +255,18 @@ status_t db_get_identity_objs_by_hash(db_client_service_t* service, const cass_b */ status_t db_insert_identity_table(db_client_service_t* service, db_identity_t* obj); +/** + * @brief show logger info for details of identity object + * + * @param[in] obj pointer to db_identity_t + * @return + * - SC_OK on success + * - SC_TA_NULL/SC_STORAGE_INVAILD_INPUT on error + */ +status_t db_show_identity_info(db_identity_t* obj); + #ifdef __cplusplus } #endif -#endif // TA_SCYLLADB_IDENTITY_H_ \ No newline at end of file +#endif // STORAGE_SCYLLADB_IDENTITY_H_ \ No newline at end of file diff --git a/storage/scylladb_permanode.c b/storage/scylladb_permanode.c index 7d174ded..49d63906 100644 --- a/storage/scylladb_permanode.c +++ b/storage/scylladb_permanode.c @@ -9,7 +9,6 @@ #include "scylladb_permanode.h" #define logger_id scylladb_logger_id - typedef struct select_where_s { cass_byte_t* bundle; cass_byte_t* address; @@ -251,15 +250,9 @@ static status_t get_blob(const CassRow* row, cass_byte_t* target, const char* co return ret; } -static status_t create_bundle_table(CassSession* session, bool need_drop) { +static status_t create_bundle_table(CassSession* session, bool need_truncate) { status_t ret = SC_OK; - if (need_drop) { - if (execute_query(session, "DROP TABLE IF EXISTS bundleTable;") != CASS_OK) { - ta_log_error("Drop bundleTable fail\n"); - ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; - goto exit; - } - } + if (execute_query(session, "CREATE TABLE IF NOT EXISTS bundleTable (" "bundle blob," @@ -275,19 +268,18 @@ static status_t create_bundle_table(CassSession* session, bool need_drop) { ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; goto exit; } + if (need_truncate) { + if (db_truncate_table(session, "bundleTable") != SC_OK) { + ta_log_error("truncate bundleTable fail\n"); + return SC_STORAGE_CASSANDRA_QUREY_FAIL; + } + } exit: return ret; } -static status_t create_edge_table(CassSession* session, bool need_drop) { +static status_t create_edge_table(CassSession* session, bool need_truncate) { status_t ret = SC_OK; - if (need_drop) { - if (execute_query(session, "DROP TABLE IF EXISTS edgeTable;") != CASS_OK) { - ta_log_error("Drop edgeTable fail\n"); - ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; - goto exit; - } - } if (execute_query(session, "CREATE TABLE IF NOT EXISTS edgeTable(" "edge blob," @@ -299,12 +291,18 @@ static status_t create_edge_table(CassSession* session, bool need_drop) { ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; goto exit; } + if (need_truncate) { + if (db_truncate_table(session, "edgeTable") != SC_OK) { + ta_log_error("truncate edgeTable fail\n"); + return SC_STORAGE_CASSANDRA_QUREY_FAIL; + } + } exit: return ret; } -status_t db_permanent_keyspace_init(db_client_service_t* service, bool need_drop, const char* keyspace_name) { +status_t db_permanent_keyspace_init(db_client_service_t* service, bool need_truncate, const char* keyspace_name) { status_t ret = SC_OK; CassStatement* use_statement = NULL; char* use_query = NULL; @@ -329,11 +327,11 @@ status_t db_permanent_keyspace_init(db_client_service_t* service, bool need_drop goto exit; } - if ((ret = create_bundle_table(service->session, need_drop)) != SC_OK) { + if ((ret = create_bundle_table(service->session, need_truncate)) != SC_OK) { ta_log_error("%s\n", "create bundle table fail"); goto exit; } - if ((ret = create_edge_table(service->session, need_drop)) != SC_OK) { + if ((ret = create_edge_table(service->session, need_truncate)) != SC_OK) { ta_log_error("%s\n", "create edge table fail"); goto exit; } diff --git a/storage/scylladb_permanode.h b/storage/scylladb_permanode.h index b89aafec..1db26e9c 100644 --- a/storage/scylladb_permanode.h +++ b/storage/scylladb_permanode.h @@ -5,17 +5,21 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef TA_SCYLLADB_PERMANODE_H_ -#define TA_SCYLLADB_PERMANODE_H_ +#ifndef STORAGE_SCYLLADB_PERMANODE_H_ +#define STORAGE_SCYLLADB_PERMANODE_H_ #ifdef __cplusplus extern "C" { #endif #include "common/model/transaction.h" #include "scylladb_client.h" -#include "scylladb_utils.h" #include "utils/containers/hash/hash243_queue.h" +/** + * @file storage/scylladb_permanode.h + * @brief Edge and bundle table with insertion and selection functions. + */ + typedef struct scylla_iota_transaction_s scylla_iota_transaction_t; /** @@ -222,14 +226,14 @@ int64_t ret_transaction_timestamp(scylla_iota_transaction_t* obj); * @brief connect to ScyllaDB node and create table * * @param[in] service ScyllaDB db client service - * @param[in] need_drop true : drop table + * @param[in] need_truncate true : clear all data, false : keep old data * @param[in] keyspace_name keyspace name the session should use * * @return * - SC_OK on success * - non-zero on error */ -status_t db_permanent_keyspace_init(db_client_service_t* service, bool need_drop, const char* keyspace_name); +status_t db_permanent_keyspace_init(db_client_service_t* service, bool need_truncate, const char* keyspace_name); /** * @brief insert transactions into bundle table * @@ -293,4 +297,4 @@ status_t get_transactions(db_client_service_t* service, hash243_queue_t* res_que } #endif -#endif // TA_SCYLLADB_PREMANODE_H_ \ No newline at end of file +#endif // STORAGE_SCYLLADB_PERMANODE_H_ \ No newline at end of file diff --git a/storage/scylladb_utils.c b/storage/scylladb_utils.c index e8ec70dc..5905e69c 100644 --- a/storage/scylladb_utils.c +++ b/storage/scylladb_utils.c @@ -108,6 +108,22 @@ status_t make_query(char** result, const char* head_desc, const char* position, return SC_OK; } +status_t db_truncate_table(CassSession* session, const char* table_name) { + status_t ret = SC_OK; + char* query = NULL; + ret = make_query(&query, "TRUNCATE TABLE ", table_name, ""); + if (ret != SC_OK) { + ta_log_error("Fail to make truncate query\n"); + return ret; + } + if (execute_query(session, query) != CASS_OK) { + ta_log_error("Fail to truncate table: %s\n", table_name); + ret = SC_STORAGE_CASSANDRA_QUREY_FAIL; + } + free(query); + return ret; +} + status_t create_keyspace(CassSession* session, const char* keyspace_name) { status_t ret = SC_OK; char* create_query = NULL; diff --git a/storage/scylladb_utils.h b/storage/scylladb_utils.h index e2bf545a..08de81fb 100644 --- a/storage/scylladb_utils.h +++ b/storage/scylladb_utils.h @@ -5,15 +5,19 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef TA_SCYLLADB_UTILS_H_ -#define TA_SCYLLADB_UTILS_H_ +#ifndef STORAGE_SCYLLADB_UTILS_H_ +#define STORAGE_SCYLLADB_UTILS_H_ #ifdef __cplusplus extern "C" { #endif #include -#include "accelerator/errors.h" #include "cassandra.h" -#include "utils/logger.h" +#include "common/logger.h" + +/** + * @file storage/scylladb_utils.h + * @brief Universal functions, the logger and headers for ScyllaDB driver. + */ logger_id_t scylladb_logger_id; @@ -73,6 +77,17 @@ status_t make_query(char** result, const char* head_desc, const char* position, */ status_t create_keyspace(CassSession* session, const char* keyspace_name); +/** + * @brief clear all data in the specific ScyllaDB table without droping the table + * + * @param[in] session used to execute queries and maintains cluster state + * @param[in] table_name The name of table to be truncated + * @return + * - SC_OK on success + * - non-zero on error + */ +status_t db_truncate_table(CassSession* session, const char* table_name); + /** * Initialize logger */ @@ -91,4 +106,4 @@ int scylladb_logger_release(); } #endif -#endif // TA_SCYLLADB_UTILS_H_ \ No newline at end of file +#endif // STORAGE_SCYLLADB_UTILS_H_ \ No newline at end of file diff --git a/storage/ta_storage.h b/storage/ta_storage.h index 3d95eb3a..93254bdc 100644 --- a/storage/ta_storage.h +++ b/storage/ta_storage.h @@ -5,18 +5,22 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef TA_STORAGE_H_ -#define TA_SCYLLADB_TA_API_H_ +#ifndef STORAGE_TA_STORAGE_H_ +#define STORAGE_TA_STORAGE_H_ #ifdef __cplusplus extern "C" { #endif -#include "scylladb_client.h" #include "scylladb_identity.h" #include "scylladb_permanode.h" +/** + * @file storage/ta_storage.h + * @brief The high level header for TA storage driver. + */ + #ifdef __cplusplus } #endif -#endif // TA_STORAGE_H_ \ No newline at end of file +#endif // STORAGE_TA_STORAGE_H_ \ No newline at end of file diff --git a/tests/BUILD b/tests/BUILD index f97c96c8..c701f1f1 100644 --- a/tests/BUILD +++ b/tests/BUILD @@ -1,13 +1,11 @@ -package(default_visibility = ["//visibility:public"]) - cc_test( - name = "test_common", + name = "test_core", srcs = [ - "test_common.cc", + "test_core.cc", ], deps = [ ":iota_api_mock", - "//accelerator:common_core", + "//accelerator/core", ], ) @@ -19,7 +17,7 @@ cc_library( hdrs = ["iota_api_mock.hh"], deps = [ ":test_define", - "//accelerator:common_core", + "//accelerator/core", "@com_google_googletest//:gtest_main", "@entangled//cclient/api", ], @@ -30,19 +28,10 @@ cc_test( srcs = [ "driver.c", ], - copts = select({ - "//connectivity/mqtt:mqtt_enable": [ - "-DMQTT_ENABLE", - ], - "//conditions:default": [], - }) + select({ - "//storage:db_enable": ["-DDB_ENABLE"], - "//conditions:default": [], - }), deps = [ ":test_define", - "//accelerator:apis", - "//accelerator:proxy_apis", + "//accelerator/core:apis", + "//accelerator/core:proxy_apis", ], ) @@ -51,11 +40,11 @@ cc_binary( srcs = [ "driver.c", ], - copts = ["-DENABLE_STAT"], + defines = ["ENABLE_STAT"], deps = [ ":test_define", - "//accelerator:apis", - "//accelerator:proxy_apis", + "//accelerator/core:apis", + "//accelerator/core:proxy_apis", ], ) @@ -66,7 +55,7 @@ cc_test( ], deps = [ ":test_define", - "//utils:cache", + "//utils/cache", "@unity", ], ) @@ -78,7 +67,7 @@ cc_test( ], deps = [ ":test_define", - "//serializer", + "//accelerator/core/serializer", ], ) @@ -89,23 +78,10 @@ cc_test( ], deps = [ ":test_define", - "//utils:pow", + "//accelerator/core:pow", ], ) -""" -cc_test( - name = "test_map_mode", - srcs = [ - "test_map_mode.c", - ], - deps = [ - ":test_define", - "//map:mode", - ], -) -""" - cc_library( name = "test_define", hdrs = ["test_define.h"], @@ -117,14 +93,12 @@ cc_library( cc_test( name = "test_scylladb", - srcs = select({ - "//storage:db_enable": ["test_scylladb.c"], - "//conditions:default": ["empty_test.c"], - }), - deps = [ - ":test_define", - ] + select({ - "//storage:db_enable": ["//storage"], + srcs = ["test_scylladb.c"], + deps = select({ + "//accelerator:db_enable": [ + "//storage", + ":test_define", + ], "//conditions:default": [], }), ) diff --git a/tests/driver.c b/tests/driver.c index db0fa696..726ff600 100644 --- a/tests/driver.c +++ b/tests/driver.c @@ -7,8 +7,8 @@ */ #include -#include "accelerator/apis.h" -#include "accelerator/proxy_apis.h" +#include "accelerator/core/apis.h" +#include "accelerator/core/proxy_apis.h" #include "test_define.h" static ta_core_t ta_core; @@ -44,6 +44,14 @@ static const int proxy_apis_num = sizeof(proxy_apis_g) / sizeof(struct proxy_api #define TEST_COUNT 1 #endif +#ifdef DB_ENABLE +static struct identity_s { + char uuid_string[DB_UUID_STRING_LENGTH]; + char hash[NUM_FLEX_TRITS_HASH + 1]; + int8_t status; +} identities[TEST_COUNT]; +#endif + static void gen_rand_tag(char* tag) { const char tryte_alpahbet[] = "NOPQRSTUVWXYZ9ABCDEFGHIJKLM"; @@ -130,8 +138,26 @@ void test_send_transfer(void) { for (size_t count = 0; count < TEST_COUNT; count++) { test_time_start(&start_time); - TEST_ASSERT_EQUAL_INT32(SC_OK, api_send_transfer(&ta_core.iota_conf, &ta_core.iota_service, json, &json_result)); + TEST_ASSERT_EQUAL_INT32(SC_OK, api_send_transfer(&ta_core, json, &json_result)); test_time_end(&start_time, &end_time, &sum); +#ifdef DB_ENABLE + cJSON* json_obj = cJSON_Parse(json_result); + cJSON* json_item = NULL; + json_item = cJSON_GetObjectItemCaseSensitive(json_obj, "id"); + + TEST_ASSERT(json_item != NULL && json_item->valuestring != NULL && + (strnlen(json_item->valuestring, DB_UUID_STRING_LENGTH - 1) == (DB_UUID_STRING_LENGTH - 1))); + memcpy(identities[count].uuid_string, json_item->valuestring, DB_UUID_STRING_LENGTH); + + json_item = cJSON_GetObjectItemCaseSensitive(json_obj, "hash"); + TEST_ASSERT(json_item != NULL && json_item->valuestring != NULL && + (strnlen(json_item->valuestring, NUM_TRYTES_HASH) == NUM_TRYTES_HASH)); + memcpy(identities[count].hash, json_item->valuestring, NUM_TRYTES_HASH); + identities[count].hash[NUM_TRYTES_HASH] = '\0'; + identities[count].status = PENDING_TXN; + + cJSON_Delete(json_obj); +#endif free(json_result); } printf("Average time of send_transfer: %lf\n", sum / TEST_COUNT); @@ -179,6 +205,50 @@ void test_find_transactions_by_tag(void) { printf("Average time of find_transactions_by_tag: %lf\n", sum / TEST_COUNT); } +#ifdef DB_ENABLE +void test_find_transactions_by_id(void) { + char* json_result; + double sum = 0; + for (size_t count = 0; count < TEST_COUNT; count++) { + test_time_start(&start_time); + + TEST_ASSERT_EQUAL_INT32(SC_OK, api_find_transactions_by_id(&ta_core.iota_service, &ta_core.db_service, + identities[count].uuid_string, &json_result)); + test_time_end(&start_time, &end_time, &sum); + free(json_result); + } + printf("Average time of find_transactions_by_id: %lf\n", sum / TEST_COUNT); +} + +void test_api_get_identity_info_by_id(void) { + char* json_result; + double sum = 0; + for (size_t count = 0; count < TEST_COUNT; count++) { + test_time_start(&start_time); + + TEST_ASSERT_EQUAL_INT32( + SC_OK, api_get_identity_info_by_id(&ta_core.db_service, identities[count].uuid_string, &json_result)); + test_time_end(&start_time, &end_time, &sum); + free(json_result); + } + printf("Average time of get_identity_info_by_id: %lf\n", sum / TEST_COUNT); +} + +void test_api_get_identity_info_by_hash(void) { + char* json_result; + double sum = 0; + for (size_t count = 0; count < TEST_COUNT; count++) { + test_time_start(&start_time); + + TEST_ASSERT_EQUAL_INT32(SC_OK, + api_get_identity_info_by_hash(&ta_core.db_service, identities[count].hash, &json_result)); + test_time_end(&start_time, &end_time, &sum); + free(json_result); + } + printf("Average time of get_identity_info_by_hash: %lf\n", sum / TEST_COUNT); +} +#endif + void test_find_transactions_obj_by_tag(void) { char* json_result; double sum = 0; @@ -193,7 +263,6 @@ void test_find_transactions_obj_by_tag(void) { } printf("Average time of find_tx_obj_by_tag: %lf\n", sum / TEST_COUNT); } - void test_send_mam_message(void) { double sum = 0; const char* json = @@ -280,6 +349,11 @@ int main(int argc, char* argv[]) { // RUN_TEST(test_receive_mam_message); RUN_TEST(test_find_transactions_by_tag); RUN_TEST(test_find_transactions_obj_by_tag); +#ifdef DB_ENABLE + RUN_TEST(test_api_get_identity_info_by_hash); + RUN_TEST(test_api_get_identity_info_by_id); + RUN_TEST(test_find_transactions_by_id); +#endif RUN_TEST(test_proxy_apis); ta_core_destroy(&ta_core); return UNITY_END(); diff --git a/tests/empty_test.c b/tests/empty_test.c deleted file mode 100644 index d1b80f24..00000000 --- a/tests/empty_test.c +++ /dev/null @@ -1,16 +0,0 @@ -/* - * Copyright (C) 2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -/* To disable some checks of sanitizer tools to certain Bazel tests, one way is - * redirecting test source files to empty_test.c in the Bazel BULID file. Using - * Bazel `select()` function with user-defined `config_setting` to achieve the - * ignorement of sanitrizer tools. For exmaple, command `bazel test //tests/test_scylladb` - * will perform tests in `test_scylladb.c` only when users add `--define db=enable` - * at build time. Otherwise, the tests will be redirecting to this file. - */ -int main(void) { return 0; } \ No newline at end of file diff --git a/tests/iota_api_mock.hh b/tests/iota_api_mock.hh index 95e55d05..fed79855 100644 --- a/tests/iota_api_mock.hh +++ b/tests/iota_api_mock.hh @@ -6,7 +6,7 @@ * "LICENSE" at the root of this distribution. */ -#include "accelerator/common_core.h" +#include "accelerator/core/core.h" #include "cclient/api/core/core_api.h" #include "cclient/api/extended/extended_api.h" #include "gmock/gmock.h" diff --git a/tests/regression/0_build.sh b/tests/regression/0_build.sh deleted file mode 100755 index 6be9772d..00000000 --- a/tests/regression/0_build.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash - -make - -bazel run //accelerator & -pid=$! - -kill $pid -wait $pid - -trap 'exit 0' TERM diff --git a/tests/regression/1_run_TA_API.sh b/tests/regression/1_run_TA_API.sh deleted file mode 100755 index bf626359..00000000 --- a/tests/regression/1_run_TA_API.sh +++ /dev/null @@ -1,19 +0,0 @@ -make - -redis-server & - -bazel run -- accelerator --ta_port=$1& -TA=$! -sleep $2 # TA takes time to be built - -pip install --user -r tests/regression/requirements.txt -python3 tests/regression/runner.py $3 $4 $5 -rc=$? - -if [ $rc -ne 0 ] -then - exit -1; -fi - -wait $(kill -9 $TA) -trap 'exit 0' TERM diff --git a/tests/regression/common.py b/tests/regression/common.py new file mode 100644 index 00000000..d19fb259 --- /dev/null +++ b/tests/regression/common.py @@ -0,0 +1,132 @@ +import sys +import json +import random +import logging +import requests +import statistics +import subprocess +import argparse + +TIMES_TOTAL = 100 +TIMEOUT = 100 # [sec] +STATUS_CODE_500 = "500" +STATUS_CODE_405 = "405" +STATUS_CODE_404 = "404" +STATUS_CODE_400 = "400" +STATUS_CODE_200 = "200" +EMPTY_REPLY = "000" +LEN_TAG = 27 +LEN_ADDR = 81 +LEN_MSG_SIGN = 2187 +TRYTE_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZ9" +URL = "" + + +def parse_cli_arg(): + global URL + parser = argparse.ArgumentParser('Regression test runner program') + parser.add_argument('-u', + '--url', + dest='raw_url', + default="localhost:8000") + parser.add_argument('-d', '--debug', dest="debug", action="store_true") + parser.add_argument('--nostat', dest="no_stat", action="store_true") + args = parser.parse_args() + + if args.no_stat: + global TIMES_TOTAL + TIMES_TOTAL = 2 + if args.debug: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + URL = "http://" + args.raw_url + + +def eval_stat(time_cost, func_name): + avg = statistics.mean(time_cost) + var = statistics.variance(time_cost) + print("Average Elapsed Time of `" + str(func_name) + "`:" + str(avg) + + " sec") + print("With the range +- " + str(2 * var) + + "sec, including 95% of API call time consumption") + + +def fill_nines(trytes, output_len): + out_str = trytes + "9" * (output_len - len(trytes)) + + return out_str + + +def gen_rand_trytes(tryte_len): + trytes = "" + for i in range(tryte_len): + trytes = trytes + TRYTE_ALPHABET[random.randint(0, 26)] + return trytes + + +def test_logger(f): + logger = logging.getLogger(f.__module__) + name = f.__name__ + + def decorate(instance): + logger.debug(f"Testing case = {name}") + return instance + + return decorate(f) + + +def valid_trytes(trytes, trytes_len): + if len(trytes) != trytes_len: + return False + + for char in trytes: + if char not in TRYTE_ALPHABET: + return False + + return True + + +def map_field(key, value): + ret = {} + for k, v in zip(key, value): + ret.update({k: v}) + return json.dumps(ret) + + +def API(get_query, get_data=None, post_data=None): + global URL + command = "curl {} -X POST -H 'Content-Type: application/json' -w \", %{{http_code}}\" -d '{}'" + try: + response = {} + if get_data is not None: + command = str(URL + get_query + get_data) + r = requests.get(command, timeout=TIMEOUT) + response = {"content": r.text, "status_code": str(r.status_code)} + + elif post_data is not None: + command = command.format(URL + get_query, post_data) + p = subprocess.Popen(command, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + out, err = p.communicate() + curl_response = str(out.decode('ascii')) + response = { + "content": curl_response.split(", ")[0], + "status_code": curl_response.split(", ")[1] + } + else: + logging.error("Wrong request method") + response = None + + except BaseException: + logging.error(URL, "Timeout!") + logging.error('\n ' + repr(sys.exc_info())) + return None + if not response: + response = None + + logging.debug(f"Command = {command}, response = {response}") + + return response diff --git a/tests/regression/common.sh b/tests/regression/common.sh new file mode 100644 index 00000000..91897f51 --- /dev/null +++ b/tests/regression/common.sh @@ -0,0 +1,54 @@ +# Build options +setup_build_opts() { + # The options are separated by '|', the format is as follows + # | + OPTIONS=( + "|" + "|--iri_host ${IRI_HOST}" + "|--iri_port ${IRI_PORT}" + "|--ta_host ${TA_HOST}" + "|--db_host ${DB_HOST}" + "|--verbose" + "|--proxy_passthrough" + "--define db=enable|" + "--define build_type=debug|" + "--define build_type=profile|" + ) + success=() + fail=() +} + +# Check environment variables +check_env() { + ENV_NAME=( + "IRI_HOST" + "IRI_PORT" + "TA_HOST" + "TA_PORT" + "DB_HOST" + ) + + echo "Checking environment variables" + echo "==============================" + + for (( i = 0; i < ${#ENV_NAME[@]}; i++ )); do + name=${ENV_NAME[${i}]} + if [[ -z ${!name} ]]; then + echo "${name} not set" + fail=1 + else + echo "${name} is set to ${!name}" + fi + done + + echo "==============================" + + [ -z ${fail} ] || exit 1 +} + +# Parse command line arguments +get_cli_args () { + sleep_time=$1 + shift + remaining_args=$@ # Get the remaining arguments +} diff --git a/tests/regression/requirements.txt b/tests/regression/requirements.txt index c20f36f2..69c65be5 100644 --- a/tests/regression/requirements.txt +++ b/tests/regression/requirements.txt @@ -1 +1,5 @@ +certifi==2019.11.28 +chardet==3.0.4 +idna==2.7 requests==2.20.0 +urllib3==1.24.3 diff --git a/tests/regression/run-api-with-mqtt.sh b/tests/regression/run-api-with-mqtt.sh new file mode 100755 index 00000000..fa15c2e8 --- /dev/null +++ b/tests/regression/run-api-with-mqtt.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +source tests/regression/common.sh + +check_env +setup_build_opts + +# Get command line arguments +# Current arguments parsed are +get_cli_args $@ + +# Install prerequisites +make MQTT +pip install --user -r tests/regression/requirements.txt +redis-server & + +# Iterate over all available build options +for (( i = 0; i < ${#OPTIONS[@]}; i++ )); do + option=${OPTIONS[${i}]} + cli_arg=${option} | cut -d '|' -f 1 + build_arg=${option} | cut -d '|' -f 2 + + bazel run accelerator ${build_arg} -- --ta_port=${TA_PORT} ${cli_arg} & + TA=$! + sleep ${sleep_time} # TA takes time to be built + trap "kill -9 ${TA};" INT # Trap SIGINT from Ctrl-C to stop TA + + python3 tests/regression/runner.py ${remaining_args} --url localhost:${TA_PORT} + rc=$? + + if [ $rc -ne 0 ] + then + echo "Build option '${option}' failed" + fail+=("${option}") + else + success+=("${option}") + fi + + bazel clean + wait $(kill -9 ${TA}) +done + +echo "--------- Successful build options ---------" +for (( i = 0; i < ${#success[@]}; i++ )); do echo ${success[${i}]}; done +echo "----------- Failed build options -----------" +for (( i = 0; i < ${#fail[@]}; i++ )); do echo ${fail[${i}]}; done diff --git a/tests/regression/run-api.sh b/tests/regression/run-api.sh new file mode 100755 index 00000000..6bb08348 --- /dev/null +++ b/tests/regression/run-api.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +source tests/regression/common.sh + +check_env +setup_build_opts + +# Get command line arguments +# Current arguments parsed are +get_cli_args $@ + +# Install prerequisites +make +pip install --user -r tests/regression/requirements.txt +redis-server & + +# Iterate over all available build options +for (( i = 0; i < ${#OPTIONS[@]}; i++ )); do + option=${OPTIONS[${i}]} + cli_arg=${option} | cut -d '|' -f 1 + build_arg=${option} | cut -d '|' -f 2 + + bazel run accelerator ${build_arg} -- --ta_port=${TA_PORT} ${cli_arg} & + TA=$! + sleep ${sleep_time} # TA takes time to be built + trap "kill -9 ${TA};" INT # Trap SIGINT from Ctrl-C to stop TA + + python3 tests/regression/runner.py ${remaining_args} --url localhost:${TA_PORT} + rc=$? + + if [ $rc -ne 0 ] + then + echo "Build option '${option}' failed" + fail+=("${option}") + else + success+=("${option}") + fi + + bazel clean + wait $(kill -9 ${TA}) +done + +echo "--------- Successful build options ---------" +for (( i = 0; i < ${#success[@]}; i++ )); do echo ${success[${i}]}; done +echo "----------- Failed build options -----------" +for (( i = 0; i < ${#fail[@]}; i++ )); do echo ${fail[${i}]}; done diff --git a/tests/regression/runner.py b/tests/regression/runner.py index c0b12ec3..bfc55331 100755 --- a/tests/regression/runner.py +++ b/tests/regression/runner.py @@ -7,534 +7,26 @@ # "LICENSE" at the root of this distribution. # Run in Python3 -import json -import requests +from common import * +import os import sys -import subprocess import unittest -import statistics -import time -import random import logging -DEBUG_FLAG = False -TIMES_TOTAL = 100 -if len(sys.argv) == 2: - raw_url = sys.argv[1] -elif len(sys.argv) == 4: - raw_url = sys.argv[1] - if sys.argv[2] == 'Y': - DEBUG_FLAG = True - - # the 3rd arg is the option which determine if use the debugging mode of statistical tests - if sys.argv[3] == 'Y': - TIMES_TOTAL = 2 -else: - raw_url = "localhost:8000" -url = "http://" + raw_url -headers = {'content-type': 'application/json'} - -# Utils: -TIMEOUT = 100 # [sec] -STATUS_CODE_500 = "500" -STATUS_CODE_405 = "405" -STATUS_CODE_404 = "404" -STATUS_CODE_400 = "400" -STATUS_CODE_200 = "200" -EMPTY_REPLY = "000" -LEN_TAG = 27 -LEN_ADDR = 81 -LEN_MSG_SIGN = 2187 -tryte_alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ9" - - -def eval_stat(time_cost, func_name): - avg = statistics.mean(time_cost) - var = statistics.variance(time_cost) - print("Average Elapsed Time of `" + str(func_name) + "`:" + str(avg) + - " sec") - print("With the range +- " + str(2 * var) + - "sec, including 95% of API call time consumption") - - -def fill_nines(trytes, output_len): - out_str = trytes + "9" * (output_len - len(trytes)) - - return out_str - - -def gen_rand_trytes(tryte_len): - trytes = "" - for i in range(tryte_len): - trytes = trytes + tryte_alphabet[random.randint(0, 26)] - return trytes - - -def valid_trytes(trytes, trytes_len): - if len(trytes) != trytes_len: - return False - - for char in trytes: - if char not in tryte_alphabet: - return False - - return True - - -def API(get_query, get_data=None, post_data=None): - try: - response = {} - if get_data is not None: - r = requests.get(str(url + get_query + get_data), timeout=TIMEOUT) - response = {"content": r.text, "status_code": str(r.status_code)} - - elif post_data is not None: - command = "curl " + str( - url + get_query - ) + " -X POST -H 'Content-Type: application/json' -w \", %{http_code}\" -d '" + str( - post_data) + "'" - logging.debug("curl command = " + command) - p = subprocess.Popen(command, - shell=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = p.communicate() - curl_response = str(out.decode('ascii')) - response = { - "content": curl_response.split(", ")[0], - "status_code": curl_response.split(", ")[1] - } - else: - logging.error("Wrong request method") - response = None - - except BaseException: - logging.error(url, "Timeout!") - logging.error('\n ' + repr(sys.exc_info())) - return None - if not response: - response = None - - return response - - -class Regression_Test(unittest.TestCase): - def test_send_transfer(self): - logging.debug( - "\n================================send transfer================================" - ) - # cmd - # 0. positive value, tryte maessage, tryte tag, tryte address - # 1. zero value, tryte message, tryte tag, tryte address - # 2. chinese value, tryte message, tryte tag, tryte address - # 3. zero value, chinese message, tryte tag, tryte address - # 4. zero value, tryte message, chinese tag, tryte address - # 5. negative value, tryte maessage, tryte tag, tryte address - # 6. no value, tryte maessage, tryte tag, tryte address - # 7. zero value, no maessage, tryte tag, tryte address - # 8. zero value, tryte maessage, no tag, tryte address - # 9. zero value, tryte maessage, tryte tag, no address - # 10. zero value, tryte maessage, tryte tag, unicode address - rand_msg = gen_rand_trytes(30) - rand_tag = gen_rand_trytes(27) - rand_addr = gen_rand_trytes(81) - query_string = [[420, rand_msg, rand_tag, rand_addr], - [0, rand_msg, rand_tag, rand_addr], - ["生而為人, 我很抱歉", rand_msg, rand_tag, rand_addr], - [0, "生而為人, 我很抱歉", rand_tag, rand_addr], - [0, rand_msg, "生而為人, 我很抱歉", rand_addr], - [-5, rand_msg, rand_tag, rand_addr], - [None, rand_msg, rand_tag, rand_addr], - [0, None, rand_tag, rand_addr], - [0, rand_msg, None, rand_addr], - [0, rand_msg, rand_tag, None], - [0, rand_msg, rand_tag, "我思故我在"]] - - response = [] - for i in range(len(query_string)): - logging.debug("testing case = " + repr(query_string[i])) - post_data = { - "value": query_string[i][0], - "message": query_string[i][1], - "tag": query_string[i][2], - "address": query_string[i][3] - } - logging.debug("post_data = " + repr(post_data)) - post_data_json = json.dumps(post_data) - response.append(API("/transaction/", post_data=post_data_json)) - - for i in range(len(response)): - logging.debug("send transfer i = " + str(i) + ", res = " + - response[i]["content"] + ", status code = " + - response[i]["status_code"]) - - pass_case = [0, 1, 2, 5, 6, 7, 8, 9, 10] - for i in range(len(response)): - if i in pass_case: - self.assertEqual(STATUS_CODE_200, response[i]["status_code"]) - res_json = json.loads(response[i]["content"]) - - # we only send zero tx at this moment - self.assertEqual(0, res_json["value"]) - self.assertTrue(valid_trytes(res_json["tag"], LEN_TAG)) - self.assertTrue(valid_trytes(res_json["address"], LEN_ADDR)) - self.assertTrue( - valid_trytes(res_json["trunk_transaction_hash"], LEN_ADDR)) - self.assertTrue( - valid_trytes(res_json["branch_transaction_hash"], - LEN_ADDR)) - self.assertTrue(valid_trytes(res_json["bundle_hash"], - LEN_ADDR)) - self.assertTrue(valid_trytes(res_json["hash"], LEN_ADDR)) - self.assertTrue( - valid_trytes(res_json["signature_and_message_fragment"], - LEN_MSG_SIGN)) - else: - self.assertEqual(STATUS_CODE_500, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - rand_msg = gen_rand_trytes(30) - rand_tag = gen_rand_trytes(27) - rand_addr = gen_rand_trytes(81) - for i in range(TIMES_TOTAL): - start_time = time.time() - post_data = { - "value": 0, - "message": rand_msg, - "tag": rand_tag, - "address": rand_addr - } - post_data_json = json.dumps(post_data) - API("/transaction/", post_data=post_data_json) - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "send transfer") - - def test_get_transactions_object(self): - logging.debug( - "\n================================find transaction objects================================" - ) - # cmd - # 0. 81 trytes transaction hash - # 1. multiple 81 trytes transaction hash - # 2. 20 trytes transaction hash - # 3. 100 trytes transaction hash - # 4. unicode transaction hash - # 5. Null transaction hash - sent_transaction_tmp = [] - for i in range(3): - rand_tag = gen_rand_trytes(27) - rand_msg = gen_rand_trytes(30) - rand_addr = gen_rand_trytes(81) - tx_post_data = { - "value": 0, - "message": rand_msg, - "tag": rand_tag, - "address": rand_addr - } - tx_post_data_json = json.dumps(tx_post_data) - sent_transaction_obj = API("/transaction/", - post_data=tx_post_data_json) - - logging.debug("sent_transaction_obj = " + - repr(sent_transaction_obj)) - self.assertEqual(STATUS_CODE_200, sent_transaction_obj["status_code"]) - sent_transaction_obj_json = json.loads( - sent_transaction_obj["content"]) - sent_transaction_tmp.append(sent_transaction_obj_json) - sent_transaction = [[sent_transaction_tmp[0]], - [sent_transaction_tmp[1], sent_transaction_tmp[2]]] - query_string = [[sent_transaction_tmp[0]["hash"]], - [ - sent_transaction_tmp[1]["hash"], - sent_transaction_tmp[2]["hash"] - ], - gen_rand_trytes(19), - gen_rand_trytes(100), "工程師批哩趴啦的生活", ""] - - response = [] - for t_case in query_string: - logging.debug("testing case = " + repr(t_case)) - post_data_json = json.dumps({"hashes": t_case}) - response.append( - API("/transaction/object", post_data=post_data_json)) - - for i in range(len(response)): - logging.debug("response find transaction objects i = " + str(i) + - ", " + repr(response[i])) - pass_case = [0, 1] - - for i in range(len(response)): - if i in pass_case: - expect_txs = sent_transaction[i] - res_txs = json.loads(response[i]["content"]) - - for j in range(len(expect_txs)): - did_exmine = False - for k in range(len(res_txs)): - if expect_txs[j]["hash"] == res_txs[k]["hash"]: - self.assertEqual( - expect_txs[j] - ["signature_and_message_fragment"], - res_txs[k]["signature_and_message_fragment"]) - self.assertEqual(expect_txs[j]["address"], - res_txs[k]["address"]) - self.assertEqual(expect_txs[j]["value"], - res_txs[k]["value"]) - self.assertEqual(expect_txs[j]["obsolete_tag"], - res_txs[k]["obsolete_tag"]) - self.assertEqual(expect_txs[j]["timestamp"], - res_txs[k]["timestamp"]) - self.assertEqual(expect_txs[j]["last_index"], - res_txs[k]["last_index"]) - self.assertEqual(expect_txs[j]["bundle_hash"], - res_txs[k]["bundle_hash"]) - self.assertEqual( - expect_txs[j]["trunk_transaction_hash"], - res_txs[k]["trunk_transaction_hash"]) - self.assertEqual( - expect_txs[j]["branch_transaction_hash"], - res_txs[k]["branch_transaction_hash"]) - self.assertEqual(expect_txs[j]["tag"], - res_txs[k]["tag"]) - self.assertEqual( - expect_txs[j]["attachment_timestamp"], - res_txs[k]["attachment_timestamp"]) - self.assertEqual( - expect_txs[j] - ["attachment_timestamp_lower_bound"], - res_txs[k]["attachment_timestamp_lower_bound"]) - self.assertEqual( - expect_txs[j] - ["attachment_timestamp_upper_bound"], - res_txs[k]["attachment_timestamp_upper_bound"]) - self.assertEqual(expect_txs[j]["nonce"], - res_txs[k]["nonce"]) - did_exmine = True - break - - self.assertTrue(did_exmine) - - else: - self.assertEqual(STATUS_CODE_500, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - post_data_json = json.dumps({"hashes": query_string[0]}) - for i in range(TIMES_TOTAL): - start_time = time.time() - API("/transaction/object", post_data=post_data_json) - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "find transaction objects") - - def test_get_tips(self): - logging.debug( - "\n================================get_tips================================" - ) - # cmd - # 0. call get_tips normally - # 1. call get_tips with unwanted ascii string - # 2. call get_tips with unwanted unicode string - rand_tag_27 = gen_rand_trytes(27) - query_string = ["", rand_tag_27, "飛天義大利麵神教"] - - response = [] - for t_case in query_string: - logging.debug("testing case = " + repr(t_case)) - response.append(API("/tips/", get_data=t_case)) - - for i in range(len(response)): - logging.debug("get_tips i = " + str(i) + ", res = " + - repr(response[i]["content"]) + ", status code = " + - repr(response[i]["status_code"])) - - pass_case = [0] - for i in range(len(response)): - if i in pass_case: - tips_hashes_array = json.loads(response[i]["content"]) - - for tx_hashes in tips_hashes_array: - self.assertTrue(valid_trytes(tx_hashes, LEN_ADDR)) - else: - self.assertEqual(STATUS_CODE_400, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - for i in range(TIMES_TOTAL): - start_time = time.time() - API("/tips/", get_data="") - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "get_tips") - - def test_get_tips_pair(self): - logging.debug( - "\n================================get_tips_pair================================" - ) - # cmd - # 0. call get_tips normally - # 1. call get_tips with unwanted ascii string - # 2. call get_tips with unwanted unicode string - rand_tag_27 = gen_rand_trytes(27) - query_string = ["", rand_tag_27, "飛天義大利麵神教"] - - response = [] - for t_case in query_string: - logging.debug("testing case = " + repr(t_case)) - response.append(API("/tips/pair/", get_data=t_case)) - - for i in range(len(response)): - logging.debug("get_tips i = " + str(i) + ", res = " + - repr(response[i]["content"]) + ", status code = " + - repr(response[i]["status_code"])) - - pass_case = [0] - for i in range(len(response)): - if i in pass_case: - self.assertEqual(STATUS_CODE_200, response[i]["status_code"]) - res_json = json.loads(response[i]["content"]) - - self.assertTrue( - valid_trytes(res_json["trunkTransaction"], LEN_ADDR)) - self.assertTrue( - valid_trytes(res_json["branchTransaction"], LEN_ADDR)) - else: - self.assertEqual(STATUS_CODE_400, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - for i in range(TIMES_TOTAL): - start_time = time.time() - API("/tips/pair/", get_data="") - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "get_tips_pair") - - def test_generate_address(self): - logging.debug( - "\n================================generate_address================================" - ) - # cmd - # 0. call generate_address normally - # 1. call generate_address with unwanted ascii string - # 2. call generate_address with unwanted unicode string - rand_tag_81 = gen_rand_trytes(81) - query_string = ["", rand_tag_81, "飛天義大利麵神教"] - - response = [] - for t_case in query_string: - logging.debug("testing case = " + repr(t_case)) - response.append(API("/address/", get_data=t_case)) - - for i in range(len(response)): - logging.debug("generate_address i = " + str(i) + ", res = " + - repr(response[i]["content"]) + ", status code = " + - repr(response[i]["status_code"])) - - pass_case = [0] - for i in range(len(response)): - if i in pass_case: - res_json = json.loads(response[i]["content"]) - - self.assertTrue(valid_trytes(res_json[0], LEN_ADDR)) - else: - self.assertEqual(STATUS_CODE_400, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - for i in range(TIMES_TOTAL): - start_time = time.time() - API("/address", get_data="") - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "generate_address") - - def test_send_trytes(self): - logging.debug( - "\n================================send_trytes================================" - ) - # cmd - # 0. single 2673 trytes legal transaction object - # 1. multiple 2673 ligal trytes transaction object - # 2. single 200 trytes illegal transaction object - # 3. single single 3000 trytes illegal transaction object - # 4. single unicode illegal transaction object - # 5. empty trytes list - # 6. empty not trytes list object - rand_trytes = [] - for i in range(2): - all_9_context = fill_nines("", 2673 - 81 * 3) - tips_response = API("/tips/pair/", get_data="") - self.assertEqual(STATUS_CODE_200, tips_response["status_code"]) - res_json = json.loads(tips_response["content"]) - - rand_trytes.append(all_9_context + res_json["trunkTransaction"] + - res_json["branchTransaction"] + - fill_nines("", 81)) - - query_string = [[rand_trytes[0]], [rand_trytes[0], rand_trytes[1]], - [gen_rand_trytes(200)], [gen_rand_trytes(3000)], - ["逼類不司"], [""], ""] - - response = [] - for i in range(len(query_string)): - logging.debug("testing case = " + repr(query_string[i])) - post_data = {"trytes": query_string[i]} - logging.debug("post_data = " + repr(post_data)) - post_data_json = json.dumps(post_data) - response.append(API("/tryte", post_data=post_data_json)) - - for i in range(len(response)): - logging.debug("send_trytes i = " + str(i) + ", res = " + - response[i]["content"] + ", status code = " + - response[i]["status_code"]) - - pass_case = [0, 1] - for i in range(len(response)): - logging.debug("send_trytes i = " + str(i) + ", res = " + - response[i]["content"] + ", status code = " + - response[i]["status_code"]) - if i in pass_case: - res_json = json.loads(response[i]["content"]) - - self.assertEqual(query_string[i], res_json["trytes"]) - else: - self.assertEqual(STATUS_CODE_500, response[i]["status_code"]) - - # Time Statistics - time_cost = [] - post_data = {"trytes": [rand_trytes[0]]} - post_data_json = json.dumps(post_data) - for i in range(TIMES_TOTAL): - start_time = time.time() - API("/tryte", post_data=post_data_json) - time_cost.append(time.time() - start_time) - - eval_stat(time_cost, "send trytes") - - -""" - API List - mam_recv_msg: GET - mam_send_msg: POST - Find transactions by tag: GET - Get transaction object - Find transaction objects by tag - Get transaction object - Find transaction objects by tag - Fetch pair tips which base on GetTransactionToApprove - Fetch all tips - Generate an unused address - send transfer: POST - Client bad request -""" - # Run all the API Test here if __name__ == '__main__': - if DEBUG_FLAG == True: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - unittest.main(argv=['first-arg-is-ignored'], exit=True) + ver = sys.version_info + if ver.major < 3 or (ver.major == 3 and ver.minor < 6): + raise Exception("Must be using Python 3.6 or greater") + + parse_cli_arg() + + suite_path = os.path.join(os.path.dirname(__file__), "test_suite") + sys.path.append(suite_path) + for module in os.listdir(suite_path): + if module[-3:] == ".py": + mod = __import__(module[:-3], locals(), globals()) + suite = unittest.TestLoader().loadTestsFromModule(mod) + result = unittest.TextTestRunner().run(suite) + if not result.wasSuccessful(): + exit(1) diff --git a/tests/regression/test_suite/generate_address.py b/tests/regression/test_suite/generate_address.py new file mode 100644 index 00000000..61c5a1bf --- /dev/null +++ b/tests/regression/test_suite/generate_address.py @@ -0,0 +1,45 @@ +from common import * +import json +import unittest +import time +import logging + + +class GenerateAddress(unittest.TestCase): + + # Without additional GET parameter (pass) + @test_logger + def test_normal(self): + res = API("/address/", get_data=self.query_string[0]) + res_json = json.loads(res["content"]) + + self.assertEqual(STATUS_CODE_200, res["status_code"]) + self.assertTrue(valid_trytes(res_json[0], LEN_ADDR)) + + # Ascii string (fail) + @test_logger + def test_ascii_string(self): + res = API("/address/", get_data=self.query_string[1]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Unicode string (fail) + @test_logger + def test_unicode_string(self): + res = API("/address/", get_data=self.query_string[2]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + for i in range(TIMES_TOTAL): + start_time = time.time() + API("/address/", get_data="") + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "generate_address") + + @classmethod + def setUpClass(cls): + rand_tag_27 = gen_rand_trytes(27) + cls.query_string = ["", rand_tag_27, "飛天義大利麵神教"] diff --git a/tests/regression/test_suite/get_tips.py b/tests/regression/test_suite/get_tips.py new file mode 100644 index 00000000..4fd968ac --- /dev/null +++ b/tests/regression/test_suite/get_tips.py @@ -0,0 +1,46 @@ +from common import * +import json +import unittest +import time +import logging + + +class GetTips(unittest.TestCase): + + # Without additional GET parameter (pass) + @test_logger + def test_normal(self): + res = API("/tips/", get_data=self.query_string[0]) + self.assertEqual(STATUS_CODE_200, res["status_code"]) + tips_hashes = json.loads(res["content"]) + + for tx_hash in tips_hashes: + self.assertTrue(valid_trytes(tx_hash, LEN_ADDR)) + + # Ascii string (fail) + @test_logger + def test_ascii_string(self): + res = API("/tips/", get_data=self.query_string[1]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Unicode string (fail) + @test_logger + def test_unicode_string(self): + res = API("/tips/", get_data=self.query_string[2]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + for i in range(TIMES_TOTAL): + start_time = time.time() + API("/tips/", get_data="") + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "get_tips") + + @classmethod + def setUpClass(cls): + rand_tag_27 = gen_rand_trytes(27) + cls.query_string = ["", rand_tag_27, "飛天義大利麵神教"] diff --git a/tests/regression/test_suite/get_tips_pair.py b/tests/regression/test_suite/get_tips_pair.py new file mode 100644 index 00000000..a24da189 --- /dev/null +++ b/tests/regression/test_suite/get_tips_pair.py @@ -0,0 +1,46 @@ +from common import * +import json +import unittest +import time +import logging + + +class GetTipsPair(unittest.TestCase): + + # Without additional GET parameter (pass) + @test_logger + def test_normal(self): + res = API("/tips/pair/", get_data=self.query_string[0]) + self.assertEqual(STATUS_CODE_200, res["status_code"]) + tips_hash = json.loads(res["content"]) + + self.assertTrue(valid_trytes(tips_hash["trunkTransaction"], LEN_ADDR)) + self.assertTrue(valid_trytes(tips_hash["branchTransaction"], LEN_ADDR)) + + # Ascii string (fail) + @test_logger + def test_ascii_string(self): + res = API("/tips/pair/", get_data=self.query_string[1]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Unicode string (fail) + @test_logger + def test_unicode_string(self): + res = API("/tips/pair/", get_data=self.query_string[2]) + self.assertEqual(STATUS_CODE_400, res["status_code"]) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + for i in range(TIMES_TOTAL): + start_time = time.time() + API("/tips/pair/", get_data="") + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "get tips pair") + + @classmethod + def setUpClass(cls): + rand_tag_27 = gen_rand_trytes(27) + cls.query_string = ["", rand_tag_27, "飛天義大利麵神教"] diff --git a/tests/regression/test_suite/get_transactions_object.py b/tests/regression/test_suite/get_transactions_object.py new file mode 100644 index 00000000..c0004761 --- /dev/null +++ b/tests/regression/test_suite/get_transactions_object.py @@ -0,0 +1,96 @@ +from common import * +import json +import unittest +import time +import logging + + +class GetTransactionsObject(unittest.TestCase): + + # 81 trytes transaction hash (pass) + @test_logger + def test_81_trytes_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[0]])) + self._verify_pass(res, 0) + + # Multiple 81 trytes transaction hash (pass) + @test_logger + def test_mult_81_trytes_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[1]])) + self._verify_pass(res, 1) + + # 20 trytes transaction hash (fail) + @test_logger + def test_20_trytes_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[2]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # 100 trytes transaction hash (fail) + @test_logger + def test_100_trytes_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[3]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Unicode transaction hash (fail) + @test_logger + def test_unicode_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[4]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Null Transaction hash (fail) + @test_logger + def test_null_hash(self): + res = API("/transaction/object", + post_data=map_field(self.post_field, [self.query_string[5]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + post_data_json = json.dumps({"hashes": self.query_string[0]}) + for i in range(TIMES_TOTAL): + start_time = time.time() + API("/transaction/object", post_data=post_data_json) + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "find transaction objects") + + @classmethod + def setUpClass(cls): + sent_txn_tmp = [] + for i in range(3): + tx_post_data = { + "value": 0, + "message": gen_rand_trytes(27), + "tag": gen_rand_trytes(30), + "address": gen_rand_trytes(81) + } + tx_post_data_json = json.dumps(tx_post_data) + sent_txn_obj = API("/transaction/", post_data=tx_post_data_json) + + logging.debug(f"sent_transaction_obj = {sent_txn_obj}") + + unittest.TestCase().assertEqual(STATUS_CODE_200, + sent_txn_obj["status_code"]) + sent_txn_obj_json = json.loads(sent_txn_obj["content"]) + sent_txn_tmp.append(sent_txn_obj_json) + cls.sent_txn = [[sent_txn_tmp[0]], [sent_txn_tmp[1], sent_txn_tmp[2]]] + cls.post_field = ["hashes"] + cls.response_field = [] + cls.query_string = [[sent_txn_tmp[0]["hash"]], + [sent_txn_tmp[1]["hash"], sent_txn_tmp[2]["hash"]], + gen_rand_trytes(19), + gen_rand_trytes(100), "工程師批哩趴啦的生活", ""] + + def _verify_pass(self, res, idx): + expected_txns = self.sent_txn[idx] + self.assertEqual(STATUS_CODE_200, res["status_code"]) + res_txn = json.loads(res["content"]) + for txn in expected_txns: + self.assertIn(txn, res_txn) diff --git a/tests/regression/test_suite/send_transfer.py b/tests/regression/test_suite/send_transfer.py new file mode 100644 index 00000000..a8e79d1d --- /dev/null +++ b/tests/regression/test_suite/send_transfer.py @@ -0,0 +1,141 @@ +from common import * +import json +import unittest +import time +import logging + + +class SendTransfer(unittest.TestCase): + + # Positive value, tryte maessage, tryte tag, tryte address (pass) + @test_logger + def test_normal(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[0])) + self._verify_pass(res) + + # Zero value, tryte message, tryte tag, tryte address (pass) + @test_logger + def test_zero_value(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[1])) + self._verify_pass(res) + + # Chinese value, tryte message, tryte tag, tryte address (pass) + @test_logger + def test_chinese_value(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[2])) + + # Zero value, chinese message, tryte tag, tryte address (fail) + @test_logger + def test_chinese_message(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[3])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Zero value, tryte message, chinese tag, tryte address (fail) + @test_logger + def test_chinese_tag(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[4])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Negative value, tryte maessage, tryte tag, tryte address (pass) + @test_logger + def test_negative_value(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[5])) + self._verify_pass(res) + + # No value, tryte maessage, tryte tag, tryte address (pass) + @test_logger + def test_no_value(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[6])) + self._verify_pass(res) + + # Zero value, no maessage, tryte tag, tryte address (pass) + @test_logger + def test_no_message(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[7])) + self._verify_pass(res) + + # Zero value, tryte maessage, no tag, tryte address (pass) + @test_logger + def test_no_tag(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[8])) + self._verify_pass(res) + + # Zero value, tryte maessage, tryte tag, no address (pass) + @test_logger + def test_no_address(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[9])) + self._verify_pass(res) + + # Zero value, tryte maessage, tryte tag, unicode address (pass) + @test_logger + def test_unicode_address(self): + res = API("/transaction/", + post_data=map_field(self.post_field, self.query_string[10])) + self._verify_pass(res) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + rand_msg = gen_rand_trytes(30) + rand_tag = gen_rand_trytes(27) + rand_addr = gen_rand_trytes(81) + for i in range(TIMES_TOTAL): + start_time = time.time() + post_data = { + "value": 0, + "message": rand_msg, + "tag": rand_tag, + "address": rand_addr + } + post_data_json = json.dumps(post_data) + API("/transaction/", post_data=post_data_json) + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "send transfer") + + @classmethod + def setUpClass(cls): + rand_msg = gen_rand_trytes(30) + rand_tag = gen_rand_trytes(27) + rand_addr = gen_rand_trytes(81) + cls.post_field = ["value", "message", "tag", "address"] + cls.query_string = [[420, rand_msg, rand_tag, rand_addr], + [0, rand_msg, rand_tag, rand_addr], + ["生而為人, 我很抱歉", rand_msg, rand_tag, rand_addr], + [0, "生而為人, 我很抱歉", rand_tag, rand_addr], + [0, rand_msg, "生而為人, 我很抱歉", rand_addr], + [-5, rand_msg, rand_tag, rand_addr], + [None, rand_msg, rand_tag, rand_addr], + [0, None, rand_tag, rand_addr], + [0, rand_msg, None, rand_addr], + [0, rand_msg, rand_tag, None], + [0, rand_msg, rand_tag, "我思故我在"]] + + def _verify_pass(self, res): + self.assertEqual(STATUS_CODE_200, res["status_code"]) + res_json = json.loads(res["content"]) + + # We only send zero tx at this moment + self.assertEqual(0, res_json["value"]) + self.assertTrue(valid_trytes(res_json["tag"], LEN_TAG)) + self.assertTrue(valid_trytes(res_json["address"], LEN_ADDR)) + self.assertTrue( + valid_trytes(res_json["trunk_transaction_hash"], LEN_ADDR)) + self.assertTrue( + valid_trytes(res_json["branch_transaction_hash"], LEN_ADDR)) + self.assertTrue(valid_trytes(res_json["bundle_hash"], LEN_ADDR)) + self.assertTrue(valid_trytes(res_json["hash"], LEN_ADDR)) + self.assertTrue( + valid_trytes(res_json["signature_and_message_fragment"], + LEN_MSG_SIGN)) diff --git a/tests/regression/test_suite/send_trytes.py b/tests/regression/test_suite/send_trytes.py new file mode 100644 index 00000000..7506f762 --- /dev/null +++ b/tests/regression/test_suite/send_trytes.py @@ -0,0 +1,94 @@ +from common import * +import json +import unittest +import time +import logging + + +class SendTrytes(unittest.TestCase): + + # Single 2673 trytes legal transaction object (pass) + @test_logger + def test_single_legal_txn(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[0]])) + self.assertEqual(STATUS_CODE_200, res["status_code"]) + + res_json = json.loads(res["content"]) + self.assertEqual(self.query_string[0], res_json[self.post_field[0]]) + + # Multiple 2673 trytes legal transaction object (pass) + @test_logger + def test_mult_legal_txn(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[1]])) + self.assertEqual(STATUS_CODE_200, res["status_code"]) + + res_json = json.loads(res["content"]) + self.assertEqual(self.query_string[1], res_json[self.post_field[0]]) + + # Single 200 trytes illegal transaction object (fail) + @test_logger + def test_200_trytes_txn(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[2]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Single 3000 trytes illegal transaction object (fail) + @test_logger + def test_3000_trytes_txn(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[3]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Single unicode illegal transaction object (fail) + @test_logger + def test_unicode_txn(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[4]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Empty trytes list (fail) + @test_logger + def test_empty_trytes(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[5]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Empty not trytes list object (fail) + @test_logger + def test_empty_not_trytes_list(self): + res = API("/tryte", + post_data=map_field(self.post_field, [self.query_string[6]])) + self.assertEqual(STATUS_CODE_500, res["status_code"]) + + # Time statistics + @test_logger + def test_time_statistics(self): + time_cost = [] + post_data = {"trytes": self.query_string[0]} + post_data_json = json.dumps(post_data) + for i in range(TIMES_TOTAL): + start_time = time.time() + API("/tryte", post_data=post_data_json) + time_cost.append(time.time() - start_time) + + eval_stat(time_cost, "send trytes") + + @classmethod + def setUpClass(cls): + rand_trytes = [] + for i in range(2): + all_9_context = fill_nines("", 2673 - 81 * 3) + res = API("/tips/pair/", get_data="") + unittest.TestCase().assertEqual(STATUS_CODE_200, + res["status_code"]) + res_json = json.loads(res["content"]) + rand_trytes.append(all_9_context + res_json["trunkTransaction"] + + res_json["branchTransaction"] + + fill_nines("", 81)) + + cls.query_string = [[rand_trytes[0]], rand_trytes, + [gen_rand_trytes(200)], [gen_rand_trytes(3000)], + ["逼類不司"], [""], ""] + cls.post_field = ["trytes"] diff --git a/tests/test_cache.c b/tests/test_cache.c index 95a49d3c..89a038f3 100644 --- a/tests/test_cache.c +++ b/tests/test_cache.c @@ -7,7 +7,7 @@ */ #include "test_define.h" -#include "utils/cache.h" +#include "utils/cache/cache.h" void test_cache_del(void) { const char* key = TRYTES_81_1; diff --git a/tests/test_common.cc b/tests/test_core.cc similarity index 99% rename from tests/test_common.cc rename to tests/test_core.cc index b4c2a91d..353a245e 100644 --- a/tests/test_common.cc +++ b/tests/test_core.cc @@ -8,7 +8,7 @@ #include #include -#include "accelerator/common_core.h" +#include "accelerator/core/core.h" #include "iota_api_mock.hh" using ::testing::_; diff --git a/tests/test_define.h b/tests/test_define.h index 25683782..46622f83 100644 --- a/tests/test_define.h +++ b/tests/test_define.h @@ -23,11 +23,11 @@ extern "C" { "TFKQZVPZVWLXBJGNEPPVZNZYJFFPDMEQGGDPGSRMNXAURIELGLUCSSPGDGEQQFANGOWVXPUHNI" \ "DOZ9999" #define TRYTES_81_2 \ - "ZPQAZKUPNDGFKGMOVUZVPDLJMOXLR9WCFJEWKPCNAHESFJHYYJQMGKTZQMZCHZRWDXNXWBIHPY" \ - "F999999" + "PQNAYDPZYRMEZMOKSDFYRKRHQCHKZ9HIKDPV99TTWNZSHXYAHUVEUJDXX9BUDRDJEEKTPCV9WT" \ + "BV99999" #define TRYTES_81_3 \ - "JQFZWSGFSRFSUXENQXFZYHGWHIR9ECZ9XQMSPTZN9WWXJGNCZQ9ICJJV9ADIGJSKKB9VFQLKYE" \ - "GN99999" + "LI9UAGLVZITPIDZCXNRKLSOEFRPUSRHYEYMWNOSIYEBKYZVGIJTJDF9RSBRKK9WPGVGPIHLMQJ" \ + "ITZ9999" #define BUNDLE_HASH \ "LVXEVZABVCIFEDSCONKEVEYBSIRMXGHLJDKSKQHTKZC9ULEAPSLKOOWCCZJGWSIISDDSEVUQHV" \ "GPFOSIW" diff --git a/tests/test_map_mode.c b/tests/test_map_mode.c deleted file mode 100644 index 3c94b547..00000000 --- a/tests/test_map_mode.c +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright (C) 2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -#include "map/mode.h" -#include "test_define.h" - -#define CHID "UANFAVTSAXZMYUWRECNAOJDAQVTTORVGJCCISMZYAFFU9EYLBMZKEJ9VNXVFFGUTCHONEYVWVUTBTDJLO" -#define NEW_CHID "ONMTPDICUWBGEGODWKGBGMLNAZFXNHCJITSSTBTGMXCXBXJFBPOPXFPOJTXKOOSAJOZAYANZZBFKYHJ9N" -#define EPID "KI99YKKLFALYRUVRXKKRJCPVFISPMNCQQSMB9BGUWIHZTYFQOBZWYSVRNKVFJLSPPLPSFNBNREJWOR99U" - -void test_channel_create(void) { - mam_api_t mam; - tryte_t channel_id[MAM_CHANNEL_ID_TRYTE_SIZE + 1]; - mam_api_init(&mam, (tryte_t *)TRYTES_81_1); - - map_channel_create(&mam, channel_id, 1); - channel_id[MAM_CHANNEL_ID_TRYTE_SIZE] = '\0'; - TEST_ASSERT_EQUAL_STRING(CHID, channel_id); - - mam_api_destroy(&mam); -} - -void test_announce_channel(void) { - mam_api_t mam; - bundle_transactions_t *bundle = NULL; - bundle_transactions_new(&bundle); - tryte_t channel_id[MAM_CHANNEL_ID_TRYTE_SIZE + 1]; - trit_t msg_id[MAM_MSG_ID_SIZE]; - mam_api_init(&mam, (tryte_t *)TRYTES_81_1); - - map_channel_create(&mam, channel_id, 1); - map_announce_channel(&mam, channel_id, bundle, msg_id, channel_id); - channel_id[MAM_CHANNEL_ID_TRYTE_SIZE] = '\0'; - TEST_ASSERT_EQUAL_STRING(NEW_CHID, channel_id); - - bundle_transactions_free(&bundle); - mam_api_destroy(&mam); -} - -void test_announce_endpoint(void) { - mam_api_t mam; - bundle_transactions_t *bundle = NULL; - bundle_transactions_new(&bundle); - tryte_t channel_id[MAM_CHANNEL_ID_TRYTE_SIZE + 1]; - trit_t msg_id[MAM_MSG_ID_SIZE]; - mam_api_init(&mam, (tryte_t *)TRYTES_81_1); - - map_channel_create(&mam, channel_id, 1); - // Channel_id is actually the new endpoint id - map_announce_endpoint(&mam, channel_id, bundle, msg_id, channel_id); - channel_id[MAM_CHANNEL_ID_TRYTE_SIZE] = '\0'; - TEST_ASSERT_EQUAL_STRING(EPID, channel_id); - - bundle_transactions_free(&bundle); - mam_api_destroy(&mam); -} - -void test_write_message(void) { - mam_api_t mam; - bundle_transactions_t *bundle = NULL; - bundle_transactions_new(&bundle); - tryte_t channel_id[MAM_CHANNEL_ID_TRYTE_SIZE + 1]; - trit_t msg_id[MAM_MSG_ID_SIZE]; - mam_api_init(&mam, (tryte_t *)TRYTES_81_1); - retcode_t ret = RC_ERROR; - - map_channel_create(&mam, channel_id, 1); - ret = map_write_header_on_channel(&mam, channel_id, bundle, msg_id); - TEST_ASSERT_EQUAL(RC_OK, ret); - - ret = map_write_packet(&mam, bundle, TEST_PAYLOAD, msg_id, true); - TEST_ASSERT_EQUAL(RC_OK, ret); - - bundle_transactions_free(&bundle); - mam_api_destroy(&mam); -} - -void test_bundle_read(void) { - retcode_t ret; - char *payload = NULL; - mam_api_t mam; - bundle_transactions_t *bundle = NULL; - bundle_transactions_new(&bundle); - - ret = mam_api_init(&mam, (tryte_t *)SEED); - TEST_ASSERT_EQUAL(RC_OK, ret); - - flex_trit_t chid_trits[NUM_TRITS_HASH]; - flex_trits_from_trytes(chid_trits, NUM_TRITS_HASH, (const tryte_t *)CHID_BUNDLE, NUM_TRYTES_HASH, NUM_TRYTES_HASH); - mam_api_add_trusted_channel_pk(&mam, chid_trits); - - flex_trit_t hash[NUM_TRITS_SERIALIZED_TRANSACTION]; - flex_trits_from_trytes(hash, NUM_TRITS_SERIALIZED_TRANSACTION, (tryte_t const *)TEST_MAM_TRANSACTION_TRYTES_1, - NUM_TRYTES_SERIALIZED_TRANSACTION, NUM_TRYTES_SERIALIZED_TRANSACTION); - iota_transaction_t *txn = transaction_deserialize(hash, false); - bundle_transactions_add(bundle, txn); - transaction_free(txn); - - flex_trits_from_trytes(hash, NUM_TRITS_SERIALIZED_TRANSACTION, (tryte_t const *)TEST_MAM_TRANSACTION_TRYTES_2, - NUM_TRYTES_SERIALIZED_TRANSACTION, NUM_TRYTES_SERIALIZED_TRANSACTION); - txn = transaction_deserialize(hash, false); - bundle_transactions_add(bundle, txn); - transaction_free(txn); - - flex_trits_from_trytes(hash, NUM_TRITS_SERIALIZED_TRANSACTION, (tryte_t const *)TEST_MAM_TRANSACTION_TRYTES_3, - NUM_TRYTES_SERIALIZED_TRANSACTION, NUM_TRYTES_SERIALIZED_TRANSACTION); - txn = transaction_deserialize(hash, false); - bundle_transactions_add(bundle, txn); - - ret = map_api_bundle_read(&mam, bundle, &payload); - TEST_ASSERT_EQUAL(RC_OK, ret); - - TEST_ASSERT_EQUAL_STRING(TEST_PAYLOAD, payload); - transaction_free(txn); - bundle_transactions_free(&bundle); - mam_api_destroy(&mam); - free(payload); -} - -int main(void) { - UNITY_BEGIN(); - RUN_TEST(test_channel_create); - RUN_TEST(test_announce_channel); - RUN_TEST(test_announce_endpoint); - RUN_TEST(test_write_message); - RUN_TEST(test_bundle_read); - return UNITY_END(); -} diff --git a/tests/test_pow.c b/tests/test_pow.c index 9204138d..087b396c 100644 --- a/tests/test_pow.c +++ b/tests/test_pow.c @@ -6,8 +6,8 @@ * "LICENSE" at the root of this distribution. */ +#include "accelerator/core/pow.h" #include "test_define.h" -#include "utils/pow.h" void test_pow_flex(void) { int mwm = 9; diff --git a/tests/test_scylladb.c b/tests/test_scylladb.c index 401e621b..3cfae96e 100644 --- a/tests/test_scylladb.c +++ b/tests/test_scylladb.c @@ -6,6 +6,8 @@ * "LICENSE" at the root of this distribution. */ +#ifdef DB_ENABLE + #include "storage/ta_storage.h" #include "test_define.h" @@ -136,7 +138,7 @@ void test_permanode(void) { size_t tx_num = sizeof(hashes) / (NUM_FLEX_TRITS_HASH); scylla_iota_transaction_t* transaction; db_client_service.host = strdup(host); - TEST_ASSERT_EQUAL_INT(db_client_service_init(&db_client_service), SC_OK); + TEST_ASSERT_EQUAL_INT(db_client_service_init(&db_client_service, DB_USAGE_NULL), SC_OK); TEST_ASSERT_EQUAL_INT(db_permanent_keyspace_init(&db_client_service, true, keyspace_name), SC_OK); new_scylla_iota_transaction(&transaction); @@ -176,8 +178,8 @@ void test_db_get_identity_objs_by_status(db_client_service_t* db_client_service) db_get_identity_uuid_string(itr, uuid_string); TEST_ASSERT_EQUAL_STRING(uuid_string, identities[idx].uuid_string); - TEST_ASSERT_EQUAL_MEMORY(db_ret_identity_hash(itr), (flex_trit_t*)identities[idx].hash, - sizeof(flex_trit_t) * NUM_FLEX_TRITS_HASH); + TEST_ASSERT_EQUAL_MEMORY(db_ret_identity_hash(itr), (cass_byte_t*)identities[idx].hash, + sizeof(cass_byte_t) * DB_NUM_TRYTES_HASH); idx++; } db_identity_array_free(&db_identity_array); @@ -192,8 +194,8 @@ void test_db_get_identity_objs_by_uuid_string(db_client_service_t* db_client_ser db_identity_t* itr; int idx = 0; IDENTITY_TABLE_ARRAY_FOREACH(db_identity_array, itr) { - TEST_ASSERT_EQUAL_MEMORY(db_ret_identity_hash(itr), (flex_trit_t*)identities[idx].hash, - sizeof(flex_trit_t) * NUM_FLEX_TRITS_HASH); + TEST_ASSERT_EQUAL_MEMORY(db_ret_identity_hash(itr), (cass_byte_t*)identities[idx].hash, + sizeof(cass_byte_t) * DB_NUM_TRYTES_HASH); TEST_ASSERT_EQUAL_INT(db_ret_identity_status(itr), identities[idx].status); idx++; } @@ -221,7 +223,7 @@ void test_db_get_identity_objs_by_hash(db_client_service_t* db_client_service) { void test_db_identity_table(void) { db_client_service_t db_client_service; db_client_service.host = strdup(host); - TEST_ASSERT_EQUAL_INT(db_client_service_init(&db_client_service), SC_OK); + TEST_ASSERT_EQUAL_INT(db_client_service_init(&db_client_service, DB_USAGE_NULL), SC_OK); TEST_ASSERT_EQUAL_INT(db_init_identity_keyspace(&db_client_service, true, keyspace_name), SC_OK); for (int i = 0; i < identity_num; i++) { db_insert_tx_into_identity(&db_client_service, identities[i].hash, identities[i].status, identities[i].uuid_string); @@ -232,12 +234,14 @@ void test_db_identity_table(void) { db_client_service_free(&db_client_service); } +#endif // DB_ENABLE int main(int argc, char** argv) { +#ifdef DB_ENABLE int cmdOpt; int optIdx; const struct option longOpt[] = { - {"host", required_argument, NULL, 'h'}, {"keyspace", required_argument, NULL, 'k'}, {NULL, 0, NULL, 0}}; + {"db_host", required_argument, NULL, 'h'}, {"keyspace", required_argument, NULL, 'k'}, {NULL, 0, NULL, 0}}; keyspace_name = "test_scylla"; /* Parse the command line options */ @@ -247,7 +251,7 @@ int main(int argc, char** argv) { if (cmdOpt == -1) break; /* Invalid option */ - if (cmdOpt == '?') break; + if (cmdOpt == '?') continue; if (cmdOpt == 'h') { host = optarg; @@ -264,7 +268,9 @@ int main(int argc, char** argv) { } scylladb_logger_init(); RUN_TEST(test_db_identity_table); - RUN_TEST(test_permanode); scylladb_logger_release(); return UNITY_END(); +#else + return 0; +#endif // DB_ENABLE } diff --git a/tests/test_serializer.c b/tests/test_serializer.c index f9a84c93..51a1944b 100644 --- a/tests/test_serializer.c +++ b/tests/test_serializer.c @@ -6,8 +6,10 @@ * "LICENSE" at the root of this distribution. */ +#ifdef MQTT_ENABLE #include "connectivity/mqtt/mqtt_common.h" -#include "serializer/serializer.h" +#endif +#include "accelerator/core/serializer/serializer.h" #include "test_define.h" void test_serialize_ta_generate_address(void) { @@ -327,7 +329,7 @@ void test_serialize_ta_send_trytes_res(void) { hash_array_free(trytes); free(json_result); } - +#ifdef MQTT_ENABLE void test_mqtt_device_id_deserialize(void) { const char* json = "{\"device_id\":\"" DEVICE_ID "\", \"trytes\":[\"" TRYTES_2673_1 "\",\"" TRYTES_2673_2 "\"]}"; const int id_len = 32; @@ -352,6 +354,7 @@ void test_mqtt_transaction_hash_req_deserialize(void) { TEST_ASSERT_EQUAL_STRING(hash, TRYTES_81_1); } +#endif void test_proxy_apis_command_req_deserialize(void) { const char* json = @@ -381,9 +384,11 @@ int main(void) { RUN_TEST(test_send_mam_message_request_deserialize); RUN_TEST(test_deserialize_ta_send_trytes_req); RUN_TEST(test_serialize_ta_send_trytes_res); +#ifdef MQTT_ENABLE RUN_TEST(test_mqtt_device_id_deserialize); RUN_TEST(test_mqtt_tag_req_deserialize); RUN_TEST(test_mqtt_transaction_hash_req_deserialize); +#endif RUN_TEST(test_proxy_apis_command_req_deserialize); serializer_logger_release(); return UNITY_END(); diff --git a/utils/BUILD b/utils/BUILD index 4fbb3489..beb78936 100644 --- a/utils/BUILD +++ b/utils/BUILD @@ -1,39 +1,10 @@ package(default_visibility = ["//visibility:public"]) -cc_library( - name = "cache", - srcs = ["backend_redis.c"], - hdrs = ["cache.h"], - deps = [ - ":ta_logger", - "//accelerator:ta_errors", - "@entangled//common/trinary:flex_trit", - "@hiredis", - ], -) - -cc_library( - name = "pow", - srcs = ["pow.c"], - hdrs = ["pow.h"], - deps = [ - ":ta_logger", - "//accelerator:ta_errors", - "//third_party:dcurl", - "@com_github_uthash//:uthash", - "@entangled//common/helpers:digest", - "@entangled//common/model:bundle", - "@entangled//common/trinary:flex_trit", - "@entangled//utils:time", - ], -) - cc_library( name = "fill_nines", - srcs = ["fill_nines.c"], - hdrs = ["fill_nines.h"], + srcs = ["fill_nines.h"], deps = [ - "//accelerator:ta_errors", + "//common:ta_errors", "@entangled//common/model:transaction", ], ) @@ -47,14 +18,6 @@ cc_library( ], ) -cc_library( - name = "ta_logger", - srcs = ["logger.h"], - deps = [ - "@entangled//utils:logger_helper", - ], -) - cc_library( name = "hash_algo_djb2", hdrs = ["hash_algo_djb2.h"], @@ -69,7 +32,7 @@ cc_library( "-lrt", ], deps = [ - ":ta_logger", - "//accelerator:ta_errors", + "//common:ta_errors", + "//common:ta_logger", ], ) diff --git a/utils/bundle_array.h b/utils/bundle_array.h index 2de7d2e3..d595488c 100644 --- a/utils/bundle_array.h +++ b/utils/bundle_array.h @@ -17,7 +17,7 @@ extern "C" { #include "utarray.h" /** - * @file bundle_array.h + * @file utils/bundle_array.h * @brief Implementation of bundle array object. This object would be used when we fetch multiple * `bundle_transactions_t` objects. It provides an easier way to save and traverse all the bundles. */ diff --git a/utils/cache/BUILD b/utils/cache/BUILD new file mode 100644 index 00000000..a3b8dec8 --- /dev/null +++ b/utils/cache/BUILD @@ -0,0 +1,13 @@ +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "cache", + srcs = ["backend_redis.c"], + hdrs = ["cache.h"], + deps = [ + "//common:ta_errors", + "//common:ta_logger", + "@entangled//common/trinary:flex_trit", + "@hiredis", + ], +) diff --git a/utils/backend_redis.c b/utils/cache/backend_redis.c similarity index 99% rename from utils/backend_redis.c rename to utils/cache/backend_redis.c index 13ebf585..8e01e0f0 100644 --- a/utils/backend_redis.c +++ b/utils/cache/backend_redis.c @@ -8,7 +8,7 @@ #include #include "cache.h" -#include "utils/logger.h" +#include "common/logger.h" #define BR_LOGGER "backend_redis" diff --git a/utils/cache.h b/utils/cache/cache.h similarity index 95% rename from utils/cache.h rename to utils/cache/cache.h index 1f1bcd53..4bded4b3 100644 --- a/utils/cache.h +++ b/utils/cache/cache.h @@ -13,7 +13,7 @@ #include #include #include -#include "accelerator/errors.h" +#include "common/ta_errors.h" #include "common/trinary/flex_trit.h" #ifdef __cplusplus @@ -21,8 +21,8 @@ extern "C" { #endif /** - * @file cache.h - * @brief Implementation of cache interface + * @file utils/cache/cache.h + * @brief Caching service interface * @example test_cache.c */ diff --git a/utils/fill_nines.c b/utils/fill_nines.c deleted file mode 100644 index c91f2dcb..00000000 --- a/utils/fill_nines.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2019 BiiLabs Co., Ltd. and Contributors - * All Rights Reserved. - * This is free software; you can redistribute it and/or modify it under the - * terms of the MIT license. A copy of the license can be found in the file - * "LICENSE" at the root of this distribution. - */ - -#include "fill_nines.h" - -status_t fill_nines(char* new_str, const char* const old_str, size_t new_str_len) { - if (!new_str || !old_str || new_str_len != NUM_TRYTES_TAG) { - return SC_SERIALIZER_NULL; - } - - int old_str_len = strlen(old_str); - strncpy(new_str, old_str, old_str_len); - - int diff = new_str_len - old_str_len; - if (diff) { - memset((new_str + old_str_len), '9', diff); - } else { - return SC_UTILS_WRONG_REQUEST_OBJ; - } - new_str[new_str_len] = '\0'; - - return SC_OK; -} diff --git a/utils/fill_nines.h b/utils/fill_nines.h index 651fb31e..c2ff17c4 100644 --- a/utils/fill_nines.h +++ b/utils/fill_nines.h @@ -12,8 +12,8 @@ #include #include #include -#include "accelerator/errors.h" #include "common/model/transaction.h" +#include "common/ta_errors.h" #ifdef __cplusplus extern "C" { @@ -21,6 +21,11 @@ extern "C" { /** * @file utils/fill_nines.h + * @brief Padding string with 9s to assigned length + * + * 9 would be mapped to 0 in the trinary system of IOTA. Therefore, when the legnth of the input string is less than the + * length of a certain IOTA transaction field, the user can use this function to use 9's as padding and make the input + * string long enough. */ /** @@ -34,7 +39,25 @@ extern "C" { * - SC_OK on success * - non-zero on error */ -status_t fill_nines(char* new_str, const char* const old_str, size_t new_str_len); +static inline status_t fill_nines(char* new_str, const char* const old_str, size_t new_str_len) { + if (!new_str || !old_str || new_str_len != NUM_TRYTES_TAG) { + return SC_SERIALIZER_NULL; + } + + int old_str_len = strlen(old_str); + strncpy(new_str, old_str, old_str_len); + + int diff = new_str_len - old_str_len; + if (diff) { + memset((new_str + old_str_len), '9', diff); + } else { + return SC_UTILS_WRONG_REQUEST_OBJ; + } + new_str[new_str_len] = '\0'; + + return SC_OK; +} + #ifdef __cplusplus } #endif diff --git a/utils/hash_algo_djb2.h b/utils/hash_algo_djb2.h index ce36453d..d78e76ef 100644 --- a/utils/hash_algo_djb2.h +++ b/utils/hash_algo_djb2.h @@ -5,13 +5,18 @@ * terms of the MIT license. A copy of the license can be found in the file * "LICENSE" at the root of this distribution. */ -#ifndef HASH_ALGO_DJB2_H_ -#define HASH_ALGO_DJB2_H_ +#ifndef UTILS_HASH_ALGO_DJB2_H_ +#define UTILS_HASH_ALGO_DJB2_H_ #ifdef __cplusplus extern "C" { #endif +/** + * @file utils/hash_algo_djb2.h + * @brief Hash function DJB2 + */ + // source http://www.cse.yorku.ca/~oz/hash.html static inline uint32_t hash_algo_djb2(char const* str) { uint32_t hash = 5381; @@ -28,4 +33,4 @@ static inline uint32_t hash_algo_djb2(char const* str) { } #endif -#endif // HASH_ALGO_DJB2_H_ +#endif // UTILS_HASH_ALGO_DJB2_H_ diff --git a/utils/timer.c b/utils/timer.c index f1405e32..903f2dba 100644 --- a/utils/timer.c +++ b/utils/timer.c @@ -7,7 +7,7 @@ */ #include "timer.h" -#include "utils/logger.h" +#include "common/logger.h" #define TIMER_LOGGER "timer" diff --git a/utils/timer.h b/utils/timer.h index 2bd6fb39..cb3b2e8b 100644 --- a/utils/timer.h +++ b/utils/timer.h @@ -14,9 +14,11 @@ extern "C" { #endif /** - * @file timer.h - * @brief Implementation of one-shot timer. The wrapper wraps and executes the callback, - * executes in a different thread, and cancels the thread after the given tiemout. + * @file utils/timer.h + * @brief Implementation of one-shot timer. + * + * The wrapper wraps and executes the callback, executes in a different thread, and cancels the thread after the given + * tiemout. */ #include @@ -24,7 +26,7 @@ extern "C" { #include #include #include -#include "accelerator/errors.h" +#include "common/ta_errors.h" /** * Initialize logger