diff --git a/docs/roadmap.md b/docs/roadmap.md index 4b2811acfc..4ce525df63 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -4,7 +4,7 @@ sidebar_position: 1 # Development Roadmap -The development roadmap of Apache EventMesh is an overview of the planned features and milestones involved in the next several releases. The recent features and bug fixes are documented in the [Release Notes](https://eventmesh.apache.org/events/release-notes/v1.9.0/). The order of the features listed below doesn't correspond to their priorities. +The development roadmap of Apache EventMesh is an overview of the planned features and milestones involved in the next several releases. The recent features and bug fixes are documented in the [Release Notes](https://eventmesh.apache.org/events/release-notes/v1.10.0/). The order of the features listed below doesn't correspond to their priorities. ## List of Features and Milestones diff --git a/docusaurus.config.js b/docusaurus.config.js index d04e122098..1d3b8d4d74 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -62,7 +62,7 @@ module.exports = { { type: 'doc', docsPluginId: 'events', - docId: 'release-notes/v1.9.0', + docId: 'release-notes/v1.10.0', position: 'left', label: 'Events', }, @@ -107,7 +107,7 @@ module.exports = { }, { label: 'Events', - to: '/events/release-notes/v1.9.0', + to: '/events/release-notes/v1.10.0', }, { label: 'Releases', diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0.json new file mode 100644 index 0000000000..13166fff17 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0.json @@ -0,0 +1,30 @@ +{ + "version.label": { + "message": "v1.10.0", + "description": "The label for version v1.10.0" + }, + "sidebar.tutorialSidebar.category.Installation and Deployment": { + "message": "安装与部署", + "description": "The label for category Installation and Deployment in sidebar tutorialSidebar" + }, + "sidebar.tutorialSidebar.category.EventMesh SDK for Java": { + "message": "EventMesh SDK for Java", + "description": "The label for category EventMesh SDK for Java in sidebar tutorialSidebar" + }, + "sidebar.tutorialSidebar.category.Design Document": { + "message": "设计文档", + "description": "The label for category Design Document in sidebar tutorialSidebar" + }, + "sidebar.tutorialSidebar.category.Upgrade Guide": { + "message": "升级指南", + "description": "The label for category Upgrade Guide in sidebar tutorialSidebar" + }, + "sidebar.tutorialSidebar.category.Event Handling and Integration": { + "message": "事件处理和集成", + "description": "The label for category Event Handling and Integration in sidebar tutorialSidebar" + }, + "sidebar.tutorialSidebar.category.Observability": { + "message": "可观测性", + "description": "The label for category Observability in sidebar tutorialSidebar" + } +} diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md new file mode 100644 index 0000000000..66615e53aa --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md @@ -0,0 +1,418 @@ +# TCP 协议文档 + +#### 1. 协议格式 + +**消息组成详解:** + +``` +魔术字:9位,当前值为“EventMesh” + +通信协议版本号:4位,当前值为“0000” + +消息总长度值(length):4位,int类型 + +消息头长度值(headerLength):4位,int类型 + +消息头(header):长度 = headerLength + +消息体(body):长度 = length - headerLength - 4 - 4 +``` + +#### 2. 业务逻辑层 + ++ 消息组成 + +消息头(header)+ 消息体(body) + +```java +public class Package { + + private Header header; + private Object body; +} + + +public class Header { + + private Command cmd; + private int code; + private String msg; + private String seq; +} +``` + ++ 详解 + +消息头(header):类型为Header,Header中有Command字段,用于区分不同的消息类型 + +消息体(body):对于不同的消息类型,body的类型不同 + +| 消息命令字 | body类型 | +| ------------------------------------------------------------ | ------------ | +| HEARTBEAT_REQUEST, HEARTBEAT_RESPONSE, HELLO_RESPONSE, CLIENT_GOODBYE_REQUEST, CLIENT_GOODBYE_RESPONSE, SERVER_GOODBYE_REQUEST, SERVER_GOODBYE_RESPONSE, LISTEN_REQUEST, LISTEN_RESPONSE, UNSUBSCRIBE_REQUEST, SUBSCRIBE_RESPONSE, UNSUBSCRIBE_RESPONSE, ASYNC_MESSAGE_TO_SERVER_ACK, BROADCAST_MESSAGE_TO_SERVER_ACK | 无 | +| HELLO_REQUEST | UserAgent | +| SUBSCRIBE_REQUEST | Subscription | +| REQUEST_TO_SERVER, REQUEST_TO_CLIENT, RESPONSE_TO_SERVER, RESPONSE_TO_CLIENT, ASYNC_MESSAGE_TO_SERVER, ASYNC_MESSAGE_TO_CLIENT, BROADCAST_MESSAGE_TO_SERVER, BROADCAST_MESSAGE_TO_CLIENT, ASYNC_MESSAGE_TO_CLIENT_ACK, BROADCAST_MESSAGE_TO_CLIENT_ACK, RESPONSE_TO_CLIENT_ACK, REQUEST_TO_CLIENT_ACK | OpenMessage | +| REDIRECT_TO_CLIENT | RedirectInfo | + +#### 3. Client 与 Eventmesh-Runtime(Server)交互场景详解 + +```java +public enum Command { + + //心跳 + HEARTBEAT_REQUEST(0), //client发给server的心跳包 + HEARTBEAT_RESPONSE(1), //server回复client的心跳包 + + //握手 + HELLO_REQUEST(2), //client发给server的握手请求 + HELLO_RESPONSE(3), //server回复client的握手请求 + + //断连 + CLIENT_GOODBYE_REQUEST(4), //client主动断连时通知server + CLIENT_GOODBYE_RESPONSE(5), //server回复client的主动断连通知 + SERVER_GOODBYE_REQUEST(6), //server主动断连时通知client + SERVER_GOODBYE_RESPONSE(7), //client回复server的主动断连通知 + + //订阅管理 + SUBSCRIBE_REQUEST(8), //client发给server的订阅请求 + SUBSCRIBE_RESPONSE(9), //server回复client的订阅请求 + UNSUBSCRIBE_REQUEST(10), //client发给server的取消订阅请求 + UNSUBSCRIBE_RESPONSE(11), //server回复client的取消订阅请求 + + //监听 + LISTEN_REQUEST(12), //client发给server的启动监听请求 + LISTEN_RESPONSE(13), //server回复client的监听请求 + + //RR + REQUEST_TO_SERVER(14), //client将RR请求发送给server + REQUEST_TO_CLIENT(15), //server将RR请求推送给client + REQUEST_TO_CLIENT_ACK(16), //client收到RR请求后ACK给server + RESPONSE_TO_SERVER(17), //client将RR回包发送给server + RESPONSE_TO_CLIENT(18), //server将RR回包推送给client + RESPONSE_TO_CLIENT_ACK(19), //client收到回包后ACK给server + + //异步事件 + ASYNC_MESSAGE_TO_SERVER(20), //client将异步事件发送给server + ASYNC_MESSAGE_TO_SERVER_ACK(21), //server收到异步事件后ACK给client + ASYNC_MESSAGE_TO_CLIENT(22), //server将异步事件推送给client + ASYNC_MESSAGE_TO_CLIENT_ACK(23), //client收到异步事件后ACK给server + + //广播 + BROADCAST_MESSAGE_TO_SERVER(24), //client将广播消息发送给server + BROADCAST_MESSAGE_TO_SERVER_ACK(25), //server收到广播消息后ACK给client + BROADCAST_MESSAGE_TO_CLIENT(26), //server将广播消息推送给client + BROADCAST_MESSAGE_TO_CLIENT_ACK(27), //client收到广播消息后ACK给server + + //重定向指令 + REDIRECT_TO_CLIENT(30), //server将重定向指令推动给client +} +``` + +#### 4. Client发起交互 + +| 场景 | Client向Server发送消息命令字 | Server回复Client消息的命令字 | 说明 | +| -------------- | ---------------------------- | ------------------------------- | ---- | +| 握手 | HELLO_REQUEST | HELLO_RESPONSE | | +| 心跳 | HEARTBEAT_REQUEST | HEARTBEAT_RESPONSE | | +| 订阅 | SUBSCRIBE_REQUEST | SUBSCRIBE_RESPONSE | | +| 取消订阅 | UNSUBSCRIBE_REQUEST | UNSUBSCRIBE_RESPONSE | | +| 开始监听消息 | LISTEN_REQUEST | LISTEN_RESPONSE | | +| 发送RR请求 | REQUEST_TO_SERVER | RESPONSE_TO_CLIENT | | +| 发送RR回包 | RESPONSE_TO_SERVER | 无 | | +| 发送异步事件 | ASYNC_MESSAGE_TO_SERVER | ASYNC_MESSAGE_TO_SERVER_ACK | | +| 发送广播事件 | BROADCAST_MESSAGE_TO_SERVER | BROADCAST_MESSAGE_TO_SERVER_ACK | | +| 客户端主动断连 | CLIENT_GOODBYE_REQUEST | CLIENT_GOODBYE_RESPONSE | | + +#### 5. Server发起交互 + +| 场景 | Server向Client发送消息命令字 | Client回复Server消息命令字 | 说明 | +| ------------------ | ---------------------------- | ------------------------------- | ---- | +| 客户端接收RR请求 | REQUEST_TO_CLIENT | REQUEST_TO_CLIENT_ACK | | +| 客户端接收RR回包 | RESPONSE_TO_CLIENT | RESPONSE_TO_CLIENT_ACK | | +| 客户端接收异步事件 | ASYNC_MESSAGE_TO_CLIENT | ASYNC_MESSAGE_TO_CLIENT_ACK | | +| 客户端接收广播事件 | BROADCAST_MESSAGE_TO_CLIENT | BROADCAST_MESSAGE_TO_CLIENT_ACK | | +| 服务端主动断连 | SERVER_GOODBYE_REQUEST | 无 | | +| 服务端进行重定向 | REDIRECT_TO_CLIENT | 无 | | +| | | | | + +#### 6. 消息类型 + ++ 发送RR消息 + +![rr-msg](/images/design-document/sync-message.png) + ++ 发送异步单播消息 + +![async-msg](/images/design-document/async-message.png) + ++ 发送广播消息 + +![broadcast-msg](/images/design-document/broadcast-message.png) + +## HTTP协议文档 + +Java类`LiteMessage`的`content`字段表示一个特殊的协议,因此,如果您要使用eventmesh-sdk-java的http-client,则只需设计协议的`content`即可。`LiteMessage`组成如下: + +```java +public class LiteMessage { + + private String bizSeqNo; + + private String uniqueId; + + private String topic; + + private String content; + + private Map prop; + + private long createTime = System.currentTimeMillis(); +} +``` + +#### 1. 消息发送方式与组成 + +**消息发送方式**:POST方式 + +**消息组成**:请求头(RequestHeader) + 请求体(RequestBody) + ++ 心跳消息 + +**RequestHeader** + +| Key | 说明 | +| -------- | ---------------- | +| Env | client所属环境 | +| Region | client所属区域 | +| Idc | client所属IDC | +| Dcn | client所在DCN | +| Sys | client所属子系统 | +| Pid | client进程号 | +| Ip | client Ip | +| Username | client 用户名 | +| Passwd | client 密码 | +| Version | 协议版本 | +| Language | 语言描述 | +| Code | 请求码 | + +**RequestBody** + +| Key | 说明 | +| ----------------- | ------------------------------ | +| clientType | 客户端类型 | +| heartbeatEntities | 心跳实体,包含topic、url等信息 | + ++ 订阅消息: + +**RequestHeader** + +与心跳消息一致 + +**RequestBody** + +| Key | 说明 | +| ----- | ----------------- | +| topic | 客户端订阅的topic | +| url | topic对应的url | + ++ 取消订阅消息: + +**RequestHeader** + +与心跳消息一致 + +**RequestBody** + +与订阅消息一致 + ++ 发送异步事件: + +**RequestHeader** + +与心跳消息一致 + +**RequestBody** + +| Key | 说明 | +| -------- | ----------------------- | +| topic | 客户端请求的topic | +| content | 客户端发送的topic的内容 | +| ttl | 客户端请求超时时间 | +| bizSeqNo | 客户端请求业务流水号 | +| uniqueId | 客户端请求消息唯一标识 | + +#### 2. Client发起交互 + +| 场景 | Client向Server发送消息请求码 | Server回复Client消息的响应码 | 说明 | +| ------------ | ---------------------------- | --------------------------------------- | ---- | +| 心跳 | HEARTBEAT(203) | SUCCESS(0)/EVENTMESH_HEARTBEAT_ERROR(19) | | +| 订阅 | SUBSCRIBE(206) | SUCCESS(0)/EVENTMESH_SUBSCRIBE_ERROR(17) | | +| 取消订阅 | UNSUBSCRIBE(207) | SUCCESS(0)/EVENTMESH_UNSUBSCRIBE_ERROR(18) | | +| 发送异步事件 | MSG_SEND_ASYNC(104) | SUCCESS(0)/EVENTMESH_SEND_ASYNC_MSG_ERR(14) | | + +#### 3. Server发起交互 + +| 场景 | Server向Client发送消息请求码 | Client回复Server消息响应码 | 说明 | +| ------------------ | ---------------------------- | -------------------------- | ---------------------- | +| 客户端接收异步事件 | HTTP_PUSH_CLIENT_ASYNC(105) | retCode | retCode值为0时代表成功 | + +## gRPC 协议文档 + +#### 1. protobuf + +在 `eventmesh-protocol-gprc` 模块有 Eventmesh gRPC 客户端的 protobuf 文件. the protobuf 文件路径是 `/src/main/proto/eventmesh-client.proto`. + +用gradle build 生成 gRPC 代码在 `/build/generated/source/proto/main`. 生成代码用于 `eventmesh-sdk-java` 模块. + +#### 2. gRPC 数据模型 + ++ 消息 + +以下消息数据模型用于 `publish()`, `requestReply()` 和 `broadcast()` APIs. + +``` +message RequestHeader { + string env = 1; + string region = 2; + string idc = 3; + string ip = 4; + string pid = 5; + string sys = 6; + string username = 7; + string password = 8; + string language = 9; + string protocolType = 10; + string protocolVersion = 11; + string protocolDesc = 12; +} + +message SimpleMessage { + RequestHeader header = 1; + string producerGroup = 2; + string topic = 3; + string content = 4; + string ttl = 5; + string uniqueId = 6; + string seqNum = 7; + string tag = 8; + map properties = 9; +} + +message BatchMessage { + RequestHeader header = 1; + string producerGroup = 2; + string topic = 3; + + message MessageItem { + string content = 1; + string ttl = 2; + string uniqueId = 3; + string seqNum = 4; + string tag = 5; + map properties = 6; + } + + repeated MessageItem messageItem = 4; +} + +message Response { + string respCode = 1; + string respMsg = 2; + string respTime = 3; +} +``` + ++ 订阅 + +以下订阅数据模型用于 `subscribe()` 和 `unsubscribe()` APIs. + +``` +message Subscription { + RequestHeader header = 1; + string consumerGroup = 2; + + message SubscriptionItem { + enum SubscriptionMode { + CLUSTERING = 0; + BROADCASTING = 1; + } + + enum SubscriptionType { + ASYNC = 0; + SYNC = 1; + } + + string topic = 1; + SubscriptionMode mode = 2; + SubscriptionType type = 3; + } + + repeated SubscriptionItem subscriptionItems = 3; + string url = 4; +} +``` + ++ 心跳 + +以下心跳数据模型用于 `heartbeat()` API. + +``` +message Heartbeat { + enum ClientType { + PUB = 0; + SUB = 1; + } + + RequestHeader header = 1; + ClientType clientType = 2; + string producerGroup = 3; + string consumerGroup = 4; + + message HeartbeatItem { + string topic = 1; + string url = 2; + } + + repeated HeartbeatItem heartbeatItems = 5; +} +``` + +#### 3. gRPC 服务接口 + ++ 事件生产端服务 APIs + +``` +service PublisherService { + # 异步事件生产 + rpc publish(SimpleMessage) returns (Response); + + # 同步事件生产 + rpc requestReply(SimpleMessage) returns (Response); + + # 批量事件生产 + rpc batchPublish(BatchMessage) returns (Response); +} +``` + ++ 事件消费端服务 APIs + +``` +service ConsumerService { + # 所消费事件通过 HTTP Webhook推送事件 + rpc subscribe(Subscription) returns (Response); + + # 所消费事件通过 TCP stream推送事件 + rpc subscribeStream(Subscription) returns (stream SimpleMessage); + + rpc unsubscribe(Subscription) returns (Response); +} +``` + ++ 客户端心跳服务 API + +``` +service HeartbeatService { + rpc heartbeat(Heartbeat) returns (Response); +} +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md new file mode 100644 index 0000000000..63b9a315db --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md @@ -0,0 +1,32 @@ +# HTTPS + +1.在eventmesh-runtime 中配置 + +``` +eventMesh.properties (添加如下配置) +eventMesh.server.useTls.enabled=true // 默认值 false + + +config env varible +-Dssl.server.protocol=TLSv1.1 // 默认值 TLSv1.1 +-Dssl.server.cer=sChat2.jks // 把文件放到启动脚本start.sh 指定的conPath目录下 +-Dssl.server.pass=sNetty +``` + +2.在eventmesh-sdk-java 中配置 + +``` +// 创建producer +LiteClientConfig eventMeshHttpClientConfig = new eventMeshHttpClientConfig(); +... + +// 设置开启TLS +eventMeshHttpClientConfig.setUseTls(true); +LiteProducer producer = new LiteProducer(eventMeshHttpClientConfig); + + +// 配置环境变量 +-Dssl.client.protocol=TLSv1.1 // 默认值 TLSv1.1 +-Dssl.client.cer=sChat2.jks // 把文件放到应用指定的conPath目录下 +-Dssl.client.pass=sNetty +``` \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md new file mode 100644 index 0000000000..fb9d163793 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md @@ -0,0 +1,102 @@ +# CloudEvents 集成 + +## 介绍 + +[CloudEvents](https://github.com/cloudevents/spec) 是一种描述事件数据的格式规范,它提供了跨服务、平台与系统的互操作性。 + +截止至 2021 年 5 月,EventMesh 包含了以下主要组件:`eventmesh-runtime`, `eventmesh-sdk-java` 和 `eventmesh-connector-rocketmq`。 + +对于使用 EventMesh 的用户,`eventmesh-runtime` 可以被部署为微服务来在生产者和消费者间传输用户的事件。 +用户的应用程序可以通过 `eventmesh-sdk-java` 来与 `eventmesh-runtime` 进行交互,即发布或订阅指定主题的事件。 + +EventMesh 的用户非常渴望能得到对 CloudEvents 的支持。有许多理由使得用户倾向于使用集成了 CloudEvents 支持的 SDK: + +- CloudEvents 是一种更为广泛接受和支持的描述事件的方式。目前,`eventmesh-sdk-java` 使用的是 `LiteMessage` 结构 + 来描述事件,其标准化程度较低。 +- CloudEvents 的 Java SDK 有更广泛的分发方式。比如,目前 EventMesh 的用户需要使用 SDK 的 tar 包,或对每个 EventMesh 的 + 发布版本从源码编译。有了 CloudEvents 的支持,用户可以更方便地通过 CloudEvents 的公开分发(比如,配置 Maven)来添加 + EventMesh SDK 依赖项。 +- CloudEvents 的 SDK 支持多种语言。尽管目前 EventMesh 只提供了 Java SDK,但在未来,如果要为更多语言提供支持,将 Java SDK + 与 CloudEvents 绑定的经验将使工作变得容易。 + +## 需求 + +### 功能需求 + +| 需求 ID | 需求描述 | 备注 | +| ------ | ------- | --- | +| F-1 | EventMesh 用户应能使用公共 SDK 依赖项来发布或订阅 CloudEvents 格式的事件 | 功能性 | +| F-2 | EventMesh 用户应能在提供了 CloudEvents 支持的 SDK 中继续使用现有的 EventMesh 客户端功能(如负载均衡) | 功能等价 | +| F-3 | EventMesh 的开发者应不需要付出特别多努力/痛苦来在 `eventmesh-sdk-java` 和提供了 CloudEvents 支持的 SDK 之间同步 | 可维护性 | +| F-4 | EventMesh 支持可插拔的协议,以便开发者整合其他协议(例如:CloudEvents / EventMesh MessageOpenMessage / MQTT...) | 功能性 | +| F-5 | EventMesh 支持统一的 API 以供从/向事件库发布或订阅事件 | 功能性 | + +### 性能需求 + +| 需求 ID | 需求描述 | 备注 | +| ------ | ------- | --- | +| P-1 | 提供了 CloudEvents 支持的 SDK 应具有与目前的 SDK 相近的客户端延迟 | | + +## 设计细节 + +与 CloudEvents 的 Java SDK 绑定(这与 Kafka 已经完成的工作类似,请在附录中的参考资料了解更多细节)是达成上述需求的一种简单方法。 + +### 可插拔协议 + +![可插拔协议](/images/design-document/cloudevents-pluggable-protocols.png) + +### EventMesh 集成 CloudEvents 进度表 + +#### TCP + +##### SDK 端发布 + +- 在 `package` 首部中添加 CloudEvents 标识符 +- 使用 `CloudEventBuilder` 构造 CloudEvent,并将其放入 `package` 体中 + +##### SDK 端订阅 + +- 在 `ReceiveMsgHook` 接口下添加 `convert` 函数,其用于将 `package` 体转换为具有 `package` 首部标识符的特定协议 +- 不同协议应实现 `ReceiveMsgHook` 接口 + +##### 服务端发布 + +- 设计包含 `decodeMessage` 接口的协议转换 API,其可以把包体转换为 CloudEvent +- 更新 `MessageTransferTask` 下的 `Session.upstreamMsg()`,将入参 `Message` 改为 `CloudEvent`,这使用了 + 上一步的 `decodeMessage` API 来进行对 CloudEvent 的转换 +- 更新 `SessionSender.send()`,将入参 `Message` 改为 `CloudEvent` +- 更新 `MeshMQProducer` API,支持在运行时发送 `CloudEvents` +- 在 `connector-plugin` 中实现支持向 EventStore 中发送 `CloudEvents` + +##### 服务端订阅 + +- 支持将连接器插件中的 `RocketMessage` 改为 `CloudEvent +- 重写 `AsyncMessageListener.consume()` 函数,将入参 `Message` 改为 `CloudEvent` +- 更新 `MeshMQPushConsumer.updateOffset()`,将入参 `Message` 改为 `CloudEvent` +- 更新 `DownStreamMsgContext`,将入参 `Message` 改为 `CloudEvent`,更新 `DownStreamMsgContext.ackMsg` + +#### HTTP + +##### SDK 端发布 + +- 支持 `LiteProducer.publish(cloudEvent)` +- 在 http 请求头中添加 CloudEvents 标识符 + +##### SDK 端订阅 + +##### 服务端发布 + +- 支持根据 `HttpCommand` 首部中的协议类型,通过可插拔的协议插件构造 `HttpCommand.body` +- 支持在消息处理器中发布 CloudEvent + +##### 服务端订阅 + +- 更新 `EventMeshConsumer.subscribe()` +- 更新 `HandleMsgContext`, 将入参 `Message` 改为 `CloudEvent` +- 更新 `AsyncHttpPushRequest.tryHTTPRequest()` + +## 附录 + +### 参考资料 + +- diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md new file mode 100644 index 0000000000..0bacec2808 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md @@ -0,0 +1,156 @@ +# Event Bridge + +![event-bridge](/images/eventmesh-bridge.png) + +Event Bridge 可以支持跨mesh集群的消息投递,下面展示这一功能的详细设计与体验步骤 + +![event-bridge-detail](/images/design-document/event-bridge-detail.png) + +> 注:在本地体验这一功能时需要启动两台eventmesh实例,同时要修改`eventmesh-runtime`目录下的`eventmesh.properties`文件中的端口配置,避免端口冲突。便于下文描述,event-bridge特性按照上图信息进行表述。 + +## 01 远程订阅 + +**描述**:向cluster2 eventmesh发起远程订阅指令,cluster2 eventmesh收到指令后会携带订阅信息调用cluster1 eventmesh的本地订阅接口 + +**URL**: http://{cluster2 address}/eventmesh/subscribe/remote + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------------------------------ | +| url | String | 是 | 标识订阅url信息,暂时无用,后续可移除,目前仅为强校验,实际会被(/eventmesh/bridge/publish)替换 | +| consumerGroup | String | 是 | 标识消费组信息,实际会被cluster2的eventmesh配置信息替换 | +| topic | List | 是 | 标识订阅信息列表 | +| mode | String | 是 | 标识消费模式,分为集群模式和广播模式 | +| topic | String | 是 | 标识订阅的topic | +| type | String | 是 | 标识消费类型,分为同步和异步 | +| remoteMesh | String | 否 | 标识远程mesh地址,优先根据topic从注册中心获取,获取不到使用该字段替换 | + +**请求样例:** + +```json +{ + "url": "http://127.0.0.1:8088/sub/test", + "consumerGroup": "TEST-GROUP", + "topic": [ + { + "mode": "CLUSTERING", + "topic": "TEST-TOPIC-HTTP-ASYNC", + "type": "ASYNC" + } + ], + "remoteMesh" : "http://127.0.0.1:10105/eventmesh/subscribe/local" +} +``` + +## 02 本地订阅 + +**描述**:向cluster2的EventMesh实例发起本地订阅指令,cluster2的EventMesh收到订阅指令后会启动本地监听从event store收下来的消息,并推送给订阅信息中的url。 + +**URL**: http://{cluster2 address}/eventmesh/subscribe/local + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------ | +| url | String | 是 | 标识订阅url信息 | +| consumerGroup | String | 是 | 标识消费组信息 | +| topic | List | 是 | 标识订阅信息列表 | +| mode | String | 是 | 标识消费模式,分为集群模式和广播模式 | +| topic | String | 是 | 标识订阅的topic | +| type | String | 是 | 标识消费类型,分为同步和异步 | + +**请求样例:** + +```JSON +{ + "url": "http://127.0.0.1:8088/sub/test", + "consumerGroup": "TEST-GROUP", + "topic": [ + { + "mode": "CLUSTERING", + "topic": "TEST-TOPIC-HTTP-ASYNC", + "type": "ASYNC" + } + ] +} +``` + +## 03 发送消息 + +**描述**:向cluster1的EventMesh实例发送消息,cluster1的EventMesh收到消息后会发送到event store,再从event store收下来消息推送给cluster2的EventMesh url `/eventmesh/bridge/publish`。 + +**URL**: http://{cluster1 address}/eventmesh/publish/TEST-TOPIC-HTTP-ASYNC + +**请求方式**:POST + +**请求参数:**application/json 格式 + +**请求样例:** + +```json +{ + "name":"test", + "age":"19" +} +``` + +## 04远程去订阅 + +**描述**:向cluster2的EventMesh实例发送去除订阅指令,cluster2的EventMesh收到指令后会发送cluster1的EventMesh,cluster1的EventMesh会本地执行去除订阅 + +**URL**: http://{cluster2 address}/eventmesh/unsubscribe/remote + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------------------------------ | +| url | String | 是 | 标识要去除订阅url信息,暂时无用,后续可移除,目前仅为强校验,实际会被(/eventmesh/bridge/publish)替换 | +| consumerGroup | String | 是 | 标识要去除的消费组信息,实际会使用EventMesh cluster2的group信息替换 | +| topic | List | 是 | 标识订阅topic信息列表 | + +**请求样例:** + +```json +{ + "consumerGroup": "EventMeshTest-consumerGroup", + "url": "http://127.0.0.1:8088/sub/test", + "topic": [ + "TEST-TOPIC-HTTP-ASYNC" + ] +} +``` + +## 05本地去订阅 + +**描述**:向cluster2的EventMesh实例发送去除订阅指令,cluster2的EventMesh收到指令后会本地执行去除订阅 + +**URL**: http://{cluster2 address}/eventmesh/unsubscribe/local + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ---------------------- | +| url | String | 是 | 标识要去除订阅url信息 | +| consumerGroup | String | 是 | 标识要去除的消费组信息 | +| topic | List | 是 | 标识订阅topic信息列表 | + +**请求样例:** + +```json +{ + "consumerGroup": "EventMeshTest-consumerGroup", + "url": "http://127.0.0.1:8088/sub/test", + "topic": [ + "TEST-TOPIC-HTTP-ASYNC" + ] +} +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md new file mode 100644 index 0000000000..9fb85a898b --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md @@ -0,0 +1,332 @@ +# 使用 Webhook 订阅事件 + +## Webhook 使用流程 + +### 第一步:在 eventmesh 配置 Webhook 相关信息并且启动 + +配置说明: + +``` +# 是否启动Webhook admin服务 +eventMesh.webHook.admin.start=true + +# Webhook事件配置存储模式。目前只支持file与nacos +eventMesh.webHook.operationMode=file +# 文件存储模式的文件存放路径,如果写上#{eventMeshHome},在eventMesh根目录 +eventMesh.webHook.fileMode.filePath= #{eventMeshHome}/webhook + +# nacos存储模式,配置命名规则是eventMesh.webHook.nacosMode.{nacos 原生配置key} 具体的配置请看 [nacos github api](https://github.com/alibaba/nacos/blob/develop/api/src/main/java/com/alibaba/nacos/api/SystemPropertyKeyConst.java) +## nacos的地址 +eventMesh.webHook.nacosMode.serverAddr=127.0.0.1:8848 + +# Webhook CloudEvent 发送模式。与 eventMesh.connector.plugin.type 配置一样 +eventMesh.webHook.producer.connector=standalone +``` + +### 第二步:添加 Webhook 配置信息 + +配置信息说明: + +```java + /** + * 厂商发送事件时调用的地址。[http or https]://[domain or IP]:[port]/webhook/[callbackPath] + * 在厂商的Webhook配置中需要填写完整url,比如:http://127.0.0.1:10504/webhook/test/event + * callbackPath 唯一 + * manufacturer callback path + */ + private String callbackPath; + + /** + * 厂商的名字 + * manufacturer name ,like github + */ + private String manufacturerName; + + /** + * 厂商的域名 + * manufacturer domain name, like www.github.com + */ + private String manufacturerDomain; + + /** + * 厂商的事件名 + * Webhook event name ,like rep-push + */ + private String manufacturerEventName; + + /** + * + * http header content type + */ + private String contentType = "application/json"; + + /** + * 说明 + * description of this WebHookConfig + */ + private String description; + + /** + * 有一些厂商使用验签方式, + * secret key ,for authentication + */ + private String secret; + + /** + * 有一些厂商使用验签方式,使用账户密码方式 + * userName ,for HTTP authentication + */ + private String userName; + + /** + * 有一些厂商使用验签方式,使用账户密码方式 + * password ,for HTTP authentication + */ + private String password; + + + + /** + * 事件发送到那个 topic + * roll out event name ,like topic to mq + */ + private String cloudEventName; + + /** + * roll out data format -> CloudEvent serialization mode + * If HTTP protocol is used, the request header contentType needs to be marked + */ + private String dataContentType = "application/json"; + + /** + * cloudEvent 事件对象唯一标识符识别方式,uuid 或者 manufacturerEventId(厂商 id) + * id of cloudEvent ,like uuid/manufacturerEventId + */ + private String cloudEventIdGenerateMode; + +``` + +#### 添加接口 + +路径: /webhook/insertWebHookConfig + +方法:POST + +contentType: application/json + +输入参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| -- | -- | -- | -- | -- | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 厂商名 | string | 是 | null | +| manufacturerDomain | 厂商的域名 | string | 是 | null | +| manufacturerEventName | 厂商事件名 | string | 是 | null | +| contentType | http connettype | string | 否 | application/json | +| description | 配置说明 | string | 否 | null | +| secret | 验签密钥 | string | 是 | null | +| userName | 用户名 | string | 否 | null | +| password | 用户密码 | string | 否 | null | +| cloudEventName | 事件名 | string | 是 | null | +| cloudEventIdGenerateMode | cloudEvent 事件对象唯一标识符识别方式,uuid 或者 manufacturerEventId(厂商 id) | string | 否 | manufacturerEventId | + +例子: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github", + "manufacturerDomain":"www.github.com", + "manufacturerEventName":"all", + "cloudEventName":"github-eventmesh", + "secret": "testSecret" +} +``` + +输出参数:1 成功,0 失败 + +#### 通过 callbackPath 查询 WebHookConfig + +路径: /webhook/queryWebHookConfigById + +方法:POST + +contentType: application/json + +输入参数: +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| -- | -- | -- | -- | -- | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 调用地址的提供方 | string | 是 | null | + +例子: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github" +} +``` + +输出参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| -- | -- | -- | -- | -- | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 厂商名 | string | 是 | null | +| manufacturerDomain | 厂商的域名 | string | 是 | null | +| manufacturerEventName | 厂商事件名 | string | 是 | null | +| contentType | http connettype | string | 否 | application/json | +| description | 配置说明 | string | 否 | null | +| secret | 验签密钥 | string | 是 | null | +| userName | 用户名 | string | 否 | null | +| password | 用户密码 | string | 否 | null | +| cloudEventName | 事件名() | string | 是 | null | +| cloudEventIdGenerateMode | cloudEvent 事件对象唯一标识符识别方式,uuid 或者 manufacturerEventId(厂商 id) | string | 否 | manufacturerEventId | + +#### 通过 manufacturer 查询 WebHookConfig 列表 + +路径: /webhook/queryWebHookConfigByManufacturer + +方法:POST + +contentType: application/json + +输入参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| -- | -- | -- | -- | -- | +| manufacturerName | 厂商名 | string | 是 | null | +| pageNum | 分页查询中的页数 | string | 是 | null | +| pageSize | 每一页的结果数量 | string | 是 | null | + +例子: + +```json +{ + "manufacturerName":"github", + "pageNum":1, + "pageSize":2 +} +``` + +输出参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| -- | -- | -- | -- | -- | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 厂商名 | string | 是 | null | +| manufacturerDomain | 厂商的域名 | string | 是 | null | +| manufacturerEventName | 厂商事件名 | string | 是 | null | +| contentType | http connettype | string | 否 | application/json | +| description | 配置说明 | string | 否 | null | +| secret | 验签密钥 | string | 是 | null | +| userName | 用户名 | string | 否 | null | +| password | 用户密码 | string | 否 | null | +| cloudEventName | 事件名() | string | 是 | null | +| cloudEventIdGenerateMode | cloudEvent 事件对象唯一标识符识别方式,uuid 或者 manufacturerEventId(厂商 id) | string | 否 | manufacturerEventId | + +#### 更新接口 + +路径: /webhook/updateWebHookConfig + +方法:POST + +contentType: application/json + +输入参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| ------------------------ | ------------------------------------------------------------ | ------ | ---- | ------------------- | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 厂商名 | string | 是 | null | +| manufacturerDomain | 厂商的域名 | string | 是 | null | +| manufacturerEventName | 厂商事件名 | string | 是 | null | +| contentType | http connettype | string | 否 | application/json | +| description | 配置说明 | string | 否 | null | +| secret | 验签密钥 | string | 是 | null | +| userName | 用户名 | string | 否 | null | +| password | 用户密码 | string | 否 | null | +| cloudEventName | 事件名 | string | 是 | null | +| cloudEventIdGenerateMode | cloudEvent 事件对象唯一标识符识别方式,uuid 或者 manufacturerEventId(厂商 id) | string | 否 | manufacturerEventId | + +例子: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github", + "manufacturerDomain":"www.github.com", + "manufacturerEventName":"all", + "cloudEventName":"github-eventmesh", + "secret": "testSecret" +} +``` + +输出参数:1 成功,0 失败 + +#### 删除接口 + +路径: /webhook/deleteWebHookConfig + +方法:POST + +contentType: application/json + +输入参数: + +| 字段 | 说明 | 类型 | 必须 | 默认值 | +| ---------------- | ------------------ | ------ | ---- | ------ | +| callbackPath | 调用地址,唯一地址 | string | 是 | null | +| manufacturerName | 调用地址的提供方 | string | 是 | null | + +例子: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github" +} +``` + +输出参数:1 成功,0 失败 + +### 第三步:查看配置是否成功 + +1. file 存储模式。请到 eventMesh.webHook.fileMode.filePath 目录下查看。文件名为`/`转换为`.`的 callbackPath。 +2. nacos 存储模式。请到 eventMesh.webHook.nacosMode.serverAddr 配置的 nacos 服务中查看。 + +### 第四步:配置 cloudevent 的消费者 + + +### 第五步:在厂商配置 Webhook 相关信息 + +> 厂商操作请看[厂商 Webhook 操作说明](#厂商-Webhook-操作说明) + +## 厂商 Webhook 操作说明 + +### github 注册 + +#### 第一步:进入对应的项目 + +#### 第二步:点击setting + +![](/images/design-document/webhook/webhook-github-setting.png) + +#### 第三步:点击Webhooks + +![](/images/design-document/webhook/webhook-github-webhooks.png) + +#### 第四步:点击 Add webhook + +![](/images/design-document/webhook/webhook-github-add.png) + +#### 第五步: 填写webhook信息 + +![](/images/design-document/webhook/webhook-github-info.png) + +Payload URL: EventMesh 服务地址和调用地址,需包含协议头。例如,当调用地址 `callbackPath` 为 `/webhook/github/eventmesh/all` 时,Payload URL 为 `http://www.example.com:10105/webhook/github/eventmesh/all` + +Content Type: http header content type + +Secret: 验签字符串 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md new file mode 100644 index 0000000000..0eb2cc8a20 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md @@ -0,0 +1,259 @@ +# EventMesh 工作流 + +## 业务场景 + +图中你正在构建一个简单的电商订单管理系统,系统能够接收和调配新的订单,调配流程需要处理所有的订单创建,付款处理以及发货处理。 + +为了实现高可用和高性能,你可以使用事件驱动架构(EDA)构建微服务应用去处理商店前端,订单管理,支付处理和发货管理。你可以在云上部署整个系统。要处理高并发,你可以利用消息系统缓冲,并扩展多个微服务实例。架构类似于: + +![Workflow Use Case](/images/design-document/workflow-use-case.jpg) + +当每个微服务都在自己的事件通道上运行时,EventMesh在执行事件编排方面发挥着至关重要的作用。 + +我们使用 [CNCF Serverless工作流](https://serverlessworkflow.io/) 来描述此事件工作流编排。 + +## CNCF Serverless工作流 + +CNCF Serverless工作流定义了一个厂商中立、开源和完全社区驱动的生态系统,用于定义和运行针对Serverless技术领域的基于DSL的工作流。 + +Serverless工作流定义了一种领域特定语言(DSL)来描述有状态和无状态的基于工作流的serverless函数和微服务编排。 + +详见[官方github](https://github.com/serverlessworkflow/specification) + +## EventMesh工作流 + +我们利用Serverless工作流DSL来描述EventMesh工作流。根据其规范,工作流由一系列用于描述控制流逻辑的工作流状态组成。目前,我们仅支持与事件相关的工作流状态。请参见[工作流DSL设计](#workflow-dsl-design-wip)中支持的状态。 + +`工作流状态`可以包含通用的`操作`,或在工作流执行期间应调用的服务/函数。这些`操作`可以引用可复用的`函数`定义(应如何调用这些函数/服务),还可以引用触发基于事件的服务调用的事件,以及要等待的事件,这些事件表示这种基于事件的服务调用完成。 + +在EDA解决方案中,我们通常使用AsyncAPI定义事件驱动的微服务。Serverless工作流“函数”定义支持使用AsyncAPI定义调用语义。有关详细信息,请参见[Using Funtions for AsyncAPI Service](https://github.com/serverlessworkflow/specification/blob/main/specification.md#using-functions-for-async-api-service-invocations)。 + +### AsyncAPI + +AsyncAPI是一项开源计划,旨在改善事件驱动体系结构(EDA)的当前状态。我们的长期目标是让使用EDA和使用REST API一样容易。包括从文档到代码生成、发现到事件管理。现在应用于REST API的大多数流程也适用于事件驱动/异步API。 + +详见[AsyncAPI官网](https://www.asyncapi.com/docs/guides) + +### 工作流示例 + +在本示例中,我们构建了上面订单管理系统的事件驱动工作流。 + +首先,我们需要为我们的微服务应用定义AsyncAPI。 + +- 在线商店应用程序 + +```yaml +asyncapi: 2.2.0 +info: + title: Online Store application + version: '0.1.0' +channels: + store/order: + subscribe: + operationId: newStoreOrder + message: + $ref : '#/components/NewOrder' + +``` + +- 订单服务 + +```yaml +asyncapi: 2.2.0 +info: + title: Order Service + version: '0.1.0' +channels: + order/inbound: + publish: + operationId: sendOrder + message: + $ref : '#/components/Order' + order/outbound: + subscribe: + operationId: processedOrder + message: + $ref : '#/components/Order' +``` + +- 支付服务 + +```yaml +asyncapi: 2.2.0 +info: + title: Payment Service + version: '0.1.0' +channels: + payment/inbound: + publish: + operationId: sendPayment + message: + $ref : '#/components/OrderPayment' + payment/outbound: + subscribe: + operationId: paymentReceipt + message: + $ref : '#/components/OrderPayment' +``` + +- 物流服务 + +```yaml +asyncapi: 2.2.0 +info: + title: Shipment Service + version: '0.1.0' +channels: + shipment/inbound: + publish: + operationId: sendShipment + message: + $ref : '#/components/OrderShipment' +``` + +接下来,定义描述订单管理业务逻辑的订单工作流。 + +```yaml +id: storeorderworkflow +version: '1.0' +specVersion: '0.8' +name: Store Order Management Workflow +states: + - name: Receive New Order Event + type: event + onEvents: + - eventRefs: + - NewOrderEvent + actions: + - eventRef: + triggerEventRef: OrderServiceSendEvent + resultEventRef: OrderServiceResultEvent + - eventRef: + triggerEventRef: PaymentServiceSendEvent + resultEventRef: PaymentServiceResultEvent + transition: Check Payment Status + - name: Check Payment Status + type: switch + dataConditions: + - name: Payment Successfull + condition: "${ .payment.status == 'success' }" + transition: Send Order Shipment + - name: Payment Denied + condition: "${ .payment.status == 'denied' }" + end: true + defaultCondition: + end: true + - name: Send Order Shipment + type: operation + actions: + - eventRef: + triggerEventRef: ShipmentServiceSendEvent + end: true +events: + - name: NewOrderEvent + source: file://onlineStoreApp.yaml#newStoreOrder + type: asyncapi + kind: consumed + - name: OrderServiceSendEvent + source: file://orderService.yaml#sendOrder + type: asyncapi + kind: produced + - name: OrderServiceResultEvent + source: file://orderService.yaml#processedOrder + type: asyncapi + kind: consumed + - name: PaymentServiceSendEvent + source: file://paymentService.yaml#sendPayment + type: asyncapi + kind: produced + - name: PaymentServiceResultEvent + source: file://paymentService.yaml#paymentReceipt + type: asyncapi + kind: consumed + - name: ShipmentServiceSendEvent + source: file://shipmentService.yaml#sendShipment + type: asyncapi + kind: produced +``` + +对应的工作流图如下: + +![Workflow Diagram](/images/design-document/workflow-diagram.png) + +## EventMesh工作流引擎 + +在下面的体系结构图中, EventMesh目录, EventMesh工作流引擎 和 EventMesh Runtime在三个不同的处理器中运行。 + +![Workflow Architecture](/images/design-document/workflow-architecture.jpg) + +运行工作流的步骤如下: + +1. 在环境中部署发布者和订阅者应用程序。 + 使用AsyncAPI描述应用程序API,生成asyncAPI yaml。 + 使用AsyncAPI在EventMesh目录中注册发布者和订阅者应用程序。 + +2. 在EventMesh工作流引擎中注册Serverless工作流DSL。 + +3. 工作流引擎从EventMesh目录查询发布服务器和订阅服务器的需要的工作流DSL`函数`。 + +4. 事件驱动App将事件发布到EventMesh Runtime触发工作流。EventMesh工作流引擎发布和订阅事件、编排事件。 + +### EventMesh Catalog 设计 + +EventMesh目录存储发布者、订阅者和通道元数据。由以下模块组成: + +- AsyncAPI解析器 + + 使用AsyncAPI社区提供的SDK ([tool list](https://www.asyncapi.com/docs/community/tooling)), + 解析并验证AsyncAPI yaml输入,并生成AsyncAPI定义。 + +- 发布者, 通道, 订阅者模块 + + 从AsyncAPI定义存储发布者、订阅者和通道信息。 + +### EventMesh工作流引擎设计 + +工作流引擎由以下模块组成: + +- 工作流解析器 + + 使用Serverless Workflow社区提供的SDK([SDKs](https://github.com/serverlessworkflow/specification#sdks)), + 解析和验证工作流DSL输入,并生成工作流定义。 + +- 工作流模块 + + 管理工作流实例的生命周期,从创建、启动、停止到销毁。 + +- 状态模块 + + 管理工作流状态生命周期。支持与事件相关的状态,and the supported state list below is Work-in-Progress. + + | 工作流状态 | 描述 | + | --- | --- | + | Operation | 执行Actions中定义的AsyncAPI函数 | + | Event | 检查定义的事件是否匹配,如果匹配,执行定义的AsyncAPI函数 | + | Switch | 检查事件是否与事件条件匹配,并执行定义的AsyncAPI函数 | + | Parallel | 并行执行定义的AsyncAPI函数 | + | ForEach | 迭代输入集合并执行定义的AsyncAPI函数 | + +- 行为模块 + + 管理函数中的行为。 + +- 函数模块 + + 通过在EventMesh Runtime中创建发布者和/或订阅者来管理AsyncAPI函数,并管理发布者/订阅者生命周期。 + + | AsyncAPI 操作 | EventMesh Runtime | + | --- | --- | + | Publish | Publisher | + | Subscribe | Subscriber | + +- 事件模块 + + 使用工作流DSL中定义的规则管理CloudEvent数据模型,包括事件过滤器、关联和转换。 + +- 重试模块 + + 管理事件发布到EventMesh Runtime的重试逻辑。 + \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md new file mode 100644 index 0000000000..d7c67380ea --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md @@ -0,0 +1,91 @@ +# Knative Connector插件 + +## 准备 +### 创建Knative Source和Sink +我们使用 *cloudevents-player* [Knative服务](https://knative.dev/docs/serving/)作为例子。如果您不知道如何创建 *cloudevents-player* Knative服务作为source和sink,请按照这个[链接](https://knative.dev/docs/getting-started/first-source/#creating-your-first-source)的步骤进行创建。 + +### EventMesh配置文件 +- 将以下配置加入 [eventmesh-starter/build.gradle](https://github.com/apache/eventmesh/blob/master/eventmesh-starter/build.gradle) 文件 +```bash +plugins { + id 'application' +} + +application { + mainClass = project.hasProperty("mainClass") ? project.getProperty("mainClass") : 'org.apache.eventmesh.starter.StartUp' + applicationDefaultJvmArgs = [ + '-Dlog4j.configurationFile=../eventmesh-runtime/conf/log4j2.xml', '-Deventmesh.log.home=../eventmesh-runtime/logs', '-Deventmesh.home=../eventmesh-runtime', '-DconfPath=../eventmesh-runtime/conf' + ] +} + +dependencies { + implementation project(":eventmesh-connector-plugin:eventmesh-connector-knative") + implementation project(":eventmesh-runtime") +} +``` +- 将以下配置加入 [eventmesh-examples/build.gradle](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/build.gradle)文件 +```bash +plugins { + id 'application' +} + +application { + mainClass = project.hasProperty("mainClass") ? project.getProperty("mainClass") : 'NULL' +} +``` +- 在 [eventmesh-runtime/conf/eventmesh.properties](https://github.com/apache/eventmesh/blob/master/eventmesh-runtime/conf/eventmesh.properties) 文件中设置```eventMesh.connector.plugin.type=knative```变量 + +## 演示 +### Knative发布事件消息/EventMesh订阅 +#### 步骤1:启动一台EventMesh服务器 +```bash +$ cd eventmesh-starter +$ ../gradlew -PmainClass=org.apache.eventmesh.starter.StartUp run +``` + +#### 步骤2:从Knative Source发布一条消息 +```bash +$ curl -i http://cloudevents-player.default.127.0.0.1.sslip.io -H "Content-Type: application/json" -H "Ce-Id: 123456789" -H "Ce-Specversion: 1.0" -H "Ce-Type: some-type" -H "Ce-Source: command-line" -d '{"msg":"Hello CloudEvents!"}' +``` + +#### 步骤3:从EventMesh订阅 +- 在 [ExampleConstants.java](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/common/ExampleConstants.java) 文件中设置 ```public static final String EVENTMESH_HTTP_ASYNC_TEST_TOPIC = "messages";```变量 +```bash +$ cd eventmesh-examples +$ ../gradlew -PmainClass=org.apache.eventmesh.http.demo.sub.SpringBootDemoApplication run +``` +#### 预期结果 +以下```data```为```Hello CloudEvents!```的消息将会打印在EventMesh服务器的控制台上。 +```bash +2022-09-05 16:37:58,237 INFO [eventMesh-clientManage-] DefaultConsumer(DefaultConsumer.java:60) - \ +[{"event":{"attributes":{"datacontenttype":"application/json","id":"123456789","mediaType":"application/json",\ +"source":"command-line","specversion":"1.0","type":"some-type"},"data":{"msg":"Hello CloudEvents!"},"extensions":{}},\ +"id":"123456789","receivedAt":"2022-09-05T10:37:49.537658+02:00[Europe/Madrid]","type":"RECEIVED"}] +``` + +### EventMessh发布事件消息/Knative订阅 +#### 步骤1:启动一台EventMesh服务器 +```bash +$ cd eventmesh-starter +$ ../gradlew -PmainClass=org.apache.eventmesh.starter.StartUp run +``` + +#### 步骤2:从EventMesh发布一条消息 +我们用Knative Connector的测试程序来演示这个功能。 +```bash +$ cd eventmesh-connector-plugin/eventmesh-connector-knative +$ ../../gradlew clean test --tests KnativeProducerImplTest.testPublish +``` + +#### 步骤3:从Knative订阅 +```bash +$ curl http://cloudevents-player.default.127.0.0.1.sslip.io/messages +``` + +#### 预期结果 +以下```data```为```Hello Knative from EventMesh!```的消息将会打印在EventMesh服务器的控制台上。 +```bash +2022-09-05 16:52:41,633 INFO [eventMesh-clientManage-] DefaultConsumer(DefaultConsumer.java:60) - \ +[{"event":{"attributes":{"datacontenttype":"application/json","id":"1234","mediaType":"application/json",\ +"source":"java-client","specversion":"1.0","type":"some-type"},"data":{"msg":["Hello Knative from EventMesh!"]},"extensions":{}},"id":"1234","receivedAt":"2022-09-05T10:52:32.999273+02:00[Europe/Madrid]","type":"RECEIVED"}] +``` \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json new file mode 100644 index 0000000000..7706f45770 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Event Handling and Integration", + "collapsed": false +} diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-spi.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-spi.md new file mode 100644 index 0000000000..5fb301f408 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/01-spi.md @@ -0,0 +1,111 @@ +# EventMesh SPI + +## 介绍 + +为了提高扩展性,EventMesh通过引入SPI(Service Provider Interface)机制,能够在运行时自动寻找扩展接口的具体实现类,动态加载。 +在EventMesh中,一切扩展点都利用SPI采用插件的实现方式,用户可以通过实现扩展接口,开发自定义的插件,在运行时通过简单的配置,声明式的选择所需要运行的插件。 + +## eventmesh-spi模块 + +SPI相关的代码位于eventmesh-spi模块下,其中主要包括EventMeshExtensionFactory, EventMeshSPI, ExtensionClassLoader这三个类。 + +### EventMeshSPI + +EventMeshSPI是SPI注解,所有需要采用SPI实现扩展的接口都需要使用@EventMeshSPI注解标记。 + +```java +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE}) +public @interface EventMeshSPI { + + /** + * If true, the spi instance is singleton + */ + boolean isSingleton() default false; + +} +``` + +这么做的原因是可以通过注解的方式声明接口为SPI扩展接口,提高代码的可读性。同时,@EventMeshSPI注解中包含一个isSingleton属性, +用来声明该扩展接口是否采用单例的实现方式,如果为true,那么该接口的实现类将会使用单例的实现方式,在一个JVM进程中全局唯一。 + +### EventMeshExtensionFactory + +EventMeshExtensionFactory是SPI实现类的获取工厂,包含一个静态方法`getExtension(Class extensionType, String extensionName)`, +接收扩展接口字节码对象和扩展实例名称,用于获取扩展接口的具体实现类。 + +```java +public enum EventMeshExtensionFactory { + ; + /** + * @param extensionType extension plugin class type + * @param extensionName extension instance name + * @param the type of the plugin + * @return plugin instance + */ + public static T getExtension(Class extensionType, String extensionName) { + } +} +``` + +所有需要获取扩展实现的地方都应该通过EventMeshExtensionFactory获取。 + +### ExtensionClassLoader + +ExtensionClassLoader是扩展接口实现类的加载接口,包含两个实现子类MetaInfExtensionClassLoader和JarExtensionClassLoader。 + +```java +/** + * Load extension class + *
    + *
  • {@link MetaInfExtensionClassLoader}
  • + *
  • {@link JarExtensionClassLoader}
  • + *
+ */ +public interface ExtensionClassLoader { + + /** + * load + * + * @param extensionType extension type class + * @param extension type + * @return extension instance name to extension instance class + */ + Map> loadExtensionClass(Class extensionType); +} +``` + +MetaInfExtensionClassLoader用于从classPath直接加载实现类,JarExtensionClassLoader用于从配置目录下通过加载Jar包的方式加载实现类,未来可能还会提供通过从Maven仓库下加载实现类。 + +## SPI使用示例 + +下面以eventmesh-connector-plugin为例,介绍SPI具体的使用过程。 + +首先定义一个eventmesh-connector-api模块,并且定义扩展接口MeshMQProducer。在MeshMQProducer接口上使用@EventMeshSPI注解进行声明,表明该接口是一个SPI扩展接口 + +```java +@EventMeshSPI(isSingleton = false) +public interface MeshMQProducer extends Producer { +... +} +``` + +eventmesh-connector-rocketmq模块中包含采用rocketmq的具体实现方式RocketMQProducerImpl。 + +```java +public class RocketMQProducerImpl implements MeshMQProducer { +... +} +``` + +同时,还需要在eventmesh-connector-rocketmq模块中resource/META-INF/eventmesh目录下创建文件名为SPI接口全限定名的文件 +org.apache.eventmesh.api.producer.Producer + +文件内容为扩展实例名和对应的实例全类名 + +```properties +rocketmq=org.apache.eventmesh.connector.rocketmq.producer.RocketMQProducerImpl +``` + +至此,一个SPI扩展模块就完成了。在使用的时候只需要通过EventMeshExtensionFactory.getExtension(MeshMQProducer.class, “rocketmq”)就可以获取RocketMQProducerImpl实现类。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md new file mode 100644 index 0000000000..8120cdbcb3 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md @@ -0,0 +1,47 @@ +# EventMesh 指标(OpenTelemetry 和 Prometheus) + +## 介绍 + +[EventMesh](https://github.com/apache/eventmesh) 是一个动态的云原生事件基础设施。 + +## OpenTelemetry 概述 + +OpenTelemetry 是工具、API 和 SDK 的集合。您可以使用它来检测、生成、收集和导出遥测数据(指标、日志和跟踪)以进行分析,以便了解您的软件的性能和行为。 + +## 概述 Prometheus + +使用领先的开源监控解决方案为您的指标和警报提供支持。 + +- 尺寸数据 +- 强大的查询 +- 伟大的可视化 +- 高效存储 +- 操作简单 +- 精准预警 +- 许多客户端库 +- 许多集成 + +## 要求 + +### 功能要求 + +| Requirement ID | Requirement Description | Comments | +| :------------- | ------------------------------------------------------------ | ------------- | +| F-1 | EventMesh users should be able to observe HTTP metrics from Prometheus | Functionality | +| F-2 | EventMesh users should be able to observe TCP metrics from Prometheus | Functionality | + +## 设计 细节 + +使用由提供的儀表儀器 OpenTelemetry 觀察指標存在於 EventMesh 然後導出到 Prometheus. + +1、初始化儀表儀器 + +2、設置 Prometheus 服務器 + +3、创建了不同的指标观察者 + +## 附录 + +### 参考资料 + + diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/02-tracing.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/02-tracing.md new file mode 100644 index 0000000000..8a3addea6d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/02-tracing.md @@ -0,0 +1,87 @@ +# 分布式追踪 + +## OpenTelemetry概述 + +OpenTelemetry是一组API和SDK的工具,您可以使用它来仪器化、生成、收集和导出遥测数据(指标、日志和追踪),以便进行分析,以了解您的软件性能和行为。 + +## 需求 + +- 设置追踪器 +- 不同的导出器 +- 在服务器中开始和结束跨度 + +## 设计细节 + +- 跨度处理器:BatchSpanProcessor + +- 导出器:默认为日志,可以从属性中更改 + +```java +// Configure the batch spans processor. This span processor exports span in batches. +BatchSpanProcessor batchSpansProcessor = + BatchSpanProcessor.builder(exporter) + .setMaxExportBatchSize(512) // set the maximum batch size to use + .setMaxQueueSize(2048) // set the queue size. This must be >= the export batch size + .setExporterTimeout( + 30, TimeUnit.SECONDS) // set the max amount of time an export can run before getting + // interrupted + .setScheduleDelay(5, TimeUnit.SECONDS) // set time between two different exports + .build(); +OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder().addSpanProcessor(batchSpansProcessor).build()) + .build(); +``` + +1. 当使用`EventMeshHTTPServer`类的`init()`方法时,类`AbstractHTTPServer`将获取跟踪器。 + +```java +super.openTelemetryTraceFactory = new OpenTelemetryTraceFactory(eventMeshHttpConfiguration); +super.tracer = openTelemetryTraceFactory.getTracer(this.getClass().toString()); +super.textMapPropagator = openTelemetryTraceFactory.getTextMapPropagator(); +``` + +2. 然后,在类`AbstractHTTPServer`中的跟踪将起作用。 + +## 问题 + +### 如何在类“OpenTelemetryTraceFactory”中设置不同的导出器?(已解决) + +在从属性中获取导出器类型之后,如何处理它。 + +`logExporter`只需要创建新实例即可。 + +但是,“zipkinExporter”需要新建并使用“getZipkinExporter()”方法。 + +## 解决方案 + +### 不同导出器的解决方案 + +使用反射获取导出器。 + +首先,不同的导出器必须实现接口“EventMeshExporter”。 + +然后,我们从配置中获取导出器名称,并反射到该类。 + +```java +//different spanExporter +String exporterName = configuration.eventMeshTraceExporterType; +//use reflection to get spanExporter +String className = String.format("org.apache.eventmesh.runtime.exporter.%sExporter",exporterName); +EventMeshExporter eventMeshExporter = (EventMeshExporter) Class.forName(className).newInstance(); +spanExporter = eventMeshExporter.getSpanExporter(configuration); +``` + +另外,这将包含try catch。如果无法成功获取指定的导出器,则将使用默认的日志导出器。 + +#### 不同导出器的改进 + +SPI(待完成) + +## 附录 + +### 参考资料 + +- + +- diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/03-prometheus.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/03-prometheus.md new file mode 100644 index 0000000000..0d796e7a67 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/03-prometheus.md @@ -0,0 +1,36 @@ +# 通过 Prometheus 观察 Metrics + +## 下载 Prometheus + +官网:https://prometheus.io/ + +本地下载Prometheus:https://prometheus.io/download/ + +选择自己电脑对应的版本下载并解压缩 + +### 2、在prometheus.yml中添加配置 + +如果你是Prometheus的新手,可以直接复制eventmesh-runtime/conf/prometheus.yml替换 + +如果你十分了解Prometheus,可以自行配置,eventmesh默认的导出的端口为19090。 + +ps:如果需要更换端口的话,请修改eventmesh-runtime/conf/eventmesh.properties中的 + +```properties +#prometheusPort +eventMesh.metrics.prometheus.port=19090 +``` + +## 运行 Prometheus 和 EventMesh + +双击Prometheus.exe运行 + +运行eventmesh-starter(参考[eventmesh-runtime-quickstart](../../instruction/03-runtime.md)) + +运行eventmesh-example(参考[eventmesh-sdk-java-quickstart](../../instruction/05-demo.md)) + +打开浏览器访问:http://localhost:9090/ + +### 输入想观察的 Metrics + +输入’**eventmesh_**‘ 就会出现相关的指标的提示 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/04-zipkin.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/04-zipkin.md new file mode 100644 index 0000000000..08f66eb3a5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/04-zipkin.md @@ -0,0 +1,49 @@ +# 通过 Zipkin 观察 Trace + +### 1、下载和运行Zipkin + +请参考https://zipkin.io/pages/quickstart.html + + + +### 2、运行eventmesh + +运行eventmesh-starter(参考[eventmesh-runtime-quickstart](../../instruction/03-runtime.md)) + +运行eventmesh-example(参考[eventmesh-sdk-java-quickstart](../../instruction/05-demo.md)) + + + +### 3、相关的设置 + +eventmesh-runtime/conf/eventmesh.properties中: + +默认的exporter是log,需要手动改成Zipkin + +```properties +#trace exporter +eventmesh.trace.exporter.type=Zipkin +``` +下面是关于Zipkin的各种配置 +```properties +#set the maximum batch size to use +eventmesh.trace.exporter.max.export.size=512 +#set the queue size. This must be >= the export batch size +eventmesh.trace.exporter.max.queue.size=2048 +#set the max amount of time an export can run before getting(TimeUnit=SECONDS) +eventmesh.trace.exporter.export.timeout=30 +#set time between two different exports(TimeUnit=SECONDS) +eventmesh.trace.exporter.export.interval=5 + +#zipkin +eventmesh.trace.export.zipkin.ip=localhost +eventmesh.trace.export.zipkin.port=9411 +``` + +以上都是相关的配置,如果你十分熟悉Zipkin的话可以自行修改。 + + + +### 4、观察 + +浏览器打开: **localhost:9411** diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/05-jaeger.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/05-jaeger.md new file mode 100644 index 0000000000..3e04ac41fe --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/05-jaeger.md @@ -0,0 +1,44 @@ +# 通过 Jaeger 观察 Trace + +## Jaeger + +[Jaeger](https://www.jaegertracing.io/) 是 [Uber](https://uber.github.io/) 开发的分布式跟踪系统,现已成为 [CNCF](https://cncf.io/) 开源项目,其灵感来源于 Google 的 [Dapper](https://research.google.com/pubs/pub36356.html) 和 Twitter 的 [Zipkin](https://zipkin.io/),用于监控基于微服务的分布式系统。 + +Jaeger 的安装可以参考[官方文档](https://www.jaegertracing.io/docs/latest/getting-started/),推荐使用官方的 Docker 镜像 `jaegertracing/all-in-one` 来快速搭建环境进行测试。 + +## 配置 + +为了启用 EventMesh Runtime 的 trace exporter,请将 `conf/eventmesh.properties` 文件中的 `eventMesh.server.trace.enabled` 字段设置为 true。 + +```conf +# Trace plugin +eventMesh.server.trace.enabled=true +eventMesh.trace.plugin=jaeger +``` + +为了定义 trace exporter 的行为,如超时时间或导出间隔,请编辑 `exporter.properties` 文件。 + +```conf +# Set the maximum batch size to use +eventmesh.trace.max.export.size=512 +# Set the queue size. This must be >= the export batch size +eventmesh.trace.max.queue.size=2048 +# Set the max amount of time an export can run before getting(TimeUnit=SECONDS) +eventmesh.trace.export.timeout=30 +# Set time between two different exports (TimeUnit=SECONDS) +eventmesh.trace.export.interval=5 +``` + +为了将导出的 trace 数据发送到 Jaeger,请编辑 `conf/jaeger.properties` 文件中的 `eventmesh.trace.jaeger.ip` 和 `eventmesh.trace.jaeger.port` 字段,来匹配 Jaeger 服务器的配置。 + +```conf +# Jaeger's IP and Port +eventmesh.trace.jaeger.ip=localhost +eventmesh.trace.jaeger.port=14250 +``` + +## 从 Zipkin 迁移 + +Jaeger 采集器服务暴露了与 Zipkin 兼容的 REST API,`/api/v1/spans` 可以接收 Thrift 和 JSON,`/api/v2/spans` 可以接收 JSON 和 Proto。 + +因此你也可以使用 `eventmesh-trace-zipkin` 插件来通过 Jaeger 观察 trace,具体配置细节请参考 `eventmesh-trace-zipkin` 的文档。默认情况下这个特性在 Jaeger 中是关闭的,可以通过 `--collector.zipkin.host-port=:9411` 启用。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/_category_.json new file mode 100644 index 0000000000..9a251b1a4d --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-observability/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Observability", + "collapsed": false +} diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-stream.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-stream.md new file mode 100644 index 0000000000..20e6412f2a --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/02-stream.md @@ -0,0 +1,115 @@ +# EventMesh Stream + +## 事件流概述 + +事件流是发布/订阅架构模式的一种实现,它包括以下几个部分 + +- 消息或事件:状态变化。 + +- 主题:消息中间件代理中的分区。 + +- 消费者:可以从代理主题订阅读取事件。 + +- 生产者:生成事件 + +事件流是事件的连续流动,为了维持事件之间的秩序,事件流应该以特定的方式从生产者流向消费者。 + +## 要求 + +### 功能要求 + +| 需求编号 | 需求描述 | 注释 | +| -------------- | ----------------------- | -------- | +| F-1 | EventMesh用户应该能够在 EventMesh 中实现事件流功能 | 功能性 | +| F-2 | EventMesh用户可以为路由、过滤、转换等应用动态用户特定逻辑 | 功能性 | + +## 设计细节 + +我们引入了 EventMesh Stream 组件,允许我们在 Apache Camel 中本地使用来自 Spring Cloud Stream 的编程模型和绑定器抽象。 + +[Spring-Cloud-Stream](https://spring.io/projects/spring-cloud-stream) Spring Cloud Stream是一个用于构建 +与共享消息传递系统连接的、高度可扩展的事件驱动微服务框架。 + +[Apache Camel](https://camel.apache.org/) Camel 是一个开源集成框架,使您能够快速轻松地集成各种消费或生产数据的系统。 + +## 架构 + +![Stream Architecture](/images/design-document/stream-architecture.png) + +## 设计 + +### EventMesh-Stream 组件 + +- Event(事件) +- Event Channel(事件通道) +- Event EndPoint(事件端点) +- Event Pipes & Filters(事件管道和过滤器) +- Event Routes(事件路由器) +- Event Converter(事件转换器) + +#### Event(事件) + +> 事件是系统中传输数据的最小单位。它的结构分为标题、正文和附件。 + +#### Event Channel(事件通道) + +> 事件通道是系统中的逻辑通道,我们是通过 Spring Cloud Stream 编程模型实现的,它具有围绕消息通道的抽象功能(截至目前用的是 Spring `MessageChannel`)。 + +#### Event EndPoint(事件端点) + +> 事件端点是应用程序和消息传递系统之间的接口。我们可以定义两种类型的端点 + +- 消费者端点 - 出现在路由开始并从传入通道读取传入事件。 +- 生产者端点 - 出现在路由的末尾并将传入事件写入传出通道。 + +#### Event Pipes & Filters(事件管道和过滤器) + +> 我们可以通过创建过滤器链(Apache Camel `Processor`)来构建路由,其中一个过滤器的输出被用于管道中下一个过滤器的输入。管道的主要优点是可以创建复杂的事件处理逻辑。 + +#### Event Routes(事件路由器) + +> 事件路由器是消费者的一种过滤器,并根据决策标准将它们重定向到适当的目标端点。 + +#### Event Converter(事件转换器) + +> 事件转换器用于修改事件内容,将其转换为不同的格式(换而言之 cloudevents -> Event (Camel) -> Binder Message(Spring Message),反之亦然)。 + +## EventMesh-Stream 组件接口 + +### Component(组件) + +Component 接口是主要的入口点,您可以使用 Component 对象作为工厂来创建 EndPoint 对象。 + +![Stream Component Interface](/images/design-document/stream-component-interface.png) + +### EndPoint(端点) + +EndPoint 作为创建消费者、生产者和事件对象的工厂。 + +- `createConsumer()` — 创建消费者端点,该端点表示路由开始的源端点。 +- `createProducer()` — 创建生产者端点,该端点表示路由末端的目标端点。 + +![Stream Component Routes](/images/design-document/stream-component-routes.png) + +#### Producer(生产者) + +用户可以创建以下类型的生产者 +> 同步生产者:处理线程阻塞,直到生产者完成事件处理。 + +![Stream Sync Producer](/images/design-document/stream-sync-producer.png) + +未来将会实现的生产者类型: + +- 异步生产者 - 生产者在子线程中处理事件。 + +#### Consumer(消费者) + +用户可以创建以下类型的消费者 +> 事件驱动的消费者:当消息绑定器调用消费者中的方法时,开始处理传入请求。 + +![Stream Event-Driven Consumer](/images/design-document/stream-event-driven-consumer.png) + +未来将会实现的消费者类型: + +- 定时轮训消费者 +- 自定义轮询消费者 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/03-schema-registry.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/03-schema-registry.md new file mode 100644 index 0000000000..bfe925c476 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/03-schema-registry.md @@ -0,0 +1,135 @@ +# EventMesh 模式注册中心 (OpenSchema) + +## Schema 和 Schema 注册概述 + +### Schema + +模式代表对序列化实例(字符串/流/s文件/……)的描述,具有两个属性。首先,它也是序列化类型的格式。其次,它定义了这些序列化实例应满足的要求。 + +除了描述序列化实例,模式还可用于验证实例是否合法。因为它定义了序列化实例的 ```type```(和其他属性)以及内部的键。以 JSON 模式为例,它不仅可用于描述 JSON 字符串,还可用于验证字符串是否满足模式[[1]](#References).中定义的属性。 + +常见的模式有 JSON 模式、Protobuf 模式和 Avro 模式。 + +### Schema 注册中心 + +模式注册中心是一个提供 RESTful 接口的服务器。它可以接收和存储来自客户端的模式,并为其他客户端从中检索模式提供接口。 + +它可用于验证过程和(去)序列化过程 + +### 不同项目中 Schema 注册表的比较 + +项目 | 应用程序 +:---: | :--- +EMQ[[2]](#References) | 主要用于(去)序列化过程。使用 "模式注册表 "和 "规则匹配 "将信息从一种序列化格式传输到另一种序列化格式。serialization format to another. +Pulsar[[3]](#References) | 主要用于验证过程。使用 "模式注册表 "验证报文。 +Confluentinc[[4]](#References) | 在验证和(去)序列化过程中。 + +## OpenSchema 概览 + +OpenSchema[[5]](#References) 提出了在越来越多的现代云原生应用程序中交换消息和事件时的数据模式规范。它从三个方面(主题/模式/兼容性)设计了用于存储和检索 Avro、JSON Schema 和 Protobuf3 模式的 RESTful 接口。 + + +## 需求(目标) + +| 需求 ID | 需求描述 | 评价 | +| :------------- | ------------------------------------------------------------ | ------------- | +| F-1 | 在传输过程中,信息无需包含模式信息,从而提高效率。| 功能性 | +| F-2 | 可根据模式验证来自生产者的信息内容是否正确序列化。 | 功能性 | + +## 详细设计 + +### 架构设计 + +![OpenSchema](/images/design-document/schema-registry-architecture.png) + +### Schema Registry 下的信息传输过程 + +![Process](/images/design-document/schema-registry-process.jpg) + +信息传输的高级流程包括以下 10 个步骤: + +- 1: 消费者从 EventMesh 订阅 "主题 "信息。 +- 2: 生产者向 EventMesh 注册模式。 +- 3: EventMesh 向模式注册中心注册模式。 +- 4: 模式注册中心返回新创建模式的 ID;EventMesh 缓存该 ID 和模式。 +- 5: EventMesh 将模式的 ID 返回给生产者。 +- 6: Producer 在信息前面修补 ID,并将信息发送到 EventMesh。 +- 7: EventMesh 验证入口端口中的报文并将其发送到 EventStore;EventMesh 从 EventStore 中检索报文。 +- 8: EventMesh 解封 id 并将其发送至模式注册表(如果本地缓存中不存在此类``)。 +- 9: Schema Registry 返回模式,EventMesh 对其进行缓存。 +- 10: EventMesh 在消息前修补模式,并将其推送给消费者。 + +## 当前进度 + +### 状态 + +**当前状态**: 开发中 + +**讨论issue**: ISSUE #339 + +### 修改建议 + +该提案有两个方面。 + +首先是一个独立的开放模式注册表,其中包括模式的存储和兼容性检查。 +该提案正在制定中。 + +其次是 Eventmesh 中 Open Schema 的集成,其中包括架构验证。 该提案有待制定。 + +对于第一个提案,一些进展情况如下。 + +#### 状态代码和异常代码 + +No. | 状态码 | 异常码 | 描述 | 状态 +--- | :---: | :---: | :---: | :---: +1 | 401 | 40101 | 未授权异常 | ✔ +2 | 404 | 40401 | Schema 不存在异常 | ✔ +3 | ^ | 40402 | Subject 不存在异常 | ✔ +4 | ^ | 40403 | 版本不存在异常 | ✔ +5 | 409 | 40901 | 兼容性异常 | ✔ +6 | 422 | 42201 | Schema 格式异常 | ✔ +7 | ^ | 42202 | Subject 格式异常 | ✔ +8 | ^ | 42203 | 版本格式异常 | ✔ +9 | ^ | 42204 | 兼容性格式异常 | ✔ +10 | 500 | 50001 | 存储服务异常 | ✔ +11 | ^ | 50002 | 超时异常 | ✔ + +#### API 开发状态 + +No. | 类型 | URL | 响应 | 异常 | 代码是否完成 | 测试是否完成 +--- | --- | --- | --- | --- | --- | --- +1 | GET | /schemas/ids/{string: id} | `Schema.class` | 40101\40401\50001 | ✔ | ❌ +2 | GET | /schemas/ids/{string: id}/subjects | `SubjectAndVersion.class` | 40101\40401\50001 | ✔ | ❌ +3 | GET | /subjects | `List\` | 40101\50001 | ✔ | ❌ +4 | GET | /subjects/{string: subject}/versions | `List\` | 40101\40402\50001 | ✔ | ❌ +5 | DELETE | /subjects/(string: subject) | `List\` | 40101\40402\50001 | ✔ | ❌ +6 | GET | /subjects/(string: subject) | `Subject.class` | 40101\40402\50001 | ✔ | ❌ +7 | GET | /subjects/(string: subject)/versions/(version: version)/schema | `SubjectWithSchema.class` | 40101\40402\40403\50001 | ✔ | ❌ +8 | POST | /subjects/(string: subject)/versions | `SchemaIdResponse.class` | 40101\40901\42201\50001\50002 | - | ❌ +9 | POST | /subjects/(string: subject)/ | `Subject.class` | 40101\40901\42202\50001\50002 | ✔ | ❌ +10 | DELETE | /subjects/(string: subject)/versions/(version: version) | `int` | 40101\40402\40403\40901\50001| - | ❌ +11 | POST | /compatibility/subjects/(string: subject)/versions/(version: version) | `CompatibilityResultResponse.class` | 40101\40402\40403\42201\42203\50001| - | ❌ +12 | GET | /compatibility/(string: subject) | `Compatibility.class` | 40101\40402\50001 | ✔ | ❌ +13 | PUT | /compatibility/(string: subject) | `Compatibility.class` | 40101\40402\40901\42204\50001 | - | ❌ + +#### 项目总体结构 + +```SchemaController.java```+```SchemaService.java``` : ```OpenSchema 7.1.1~7.1.2 (API 1~2)``` + +```SubjectController.java```+```SubjectService.java``` : ```OpenSchema 7.2.1~7.2.8 (API 3~10)``` + +```CompatibilityController.java```+```CompatibilityService.java``` : ```OpenSchema 7.3.1~7.3.3 (API 11~13)``` + ```Check for Compatibility``` + +![项目结构](/images/design-document/schema-registry-project-structure.png) + +## 参考文献 + +[1] [Schema 验证器 (github.com)](https://github.com/search?q=schema+validator) + +[2] [EMQ: Schema Registry](https://www.jianshu.com/p/33e0655c642b) + +[3] [Pulsar: Schema Registry](https://mp.weixin.qq.com/s/PaB66-Si00cX80py5ig5Mw) + +[4] [confluentinc/schema-registry](https://github.com/confluentinc/schema-registry) + +[5] [openmessaging/openschema](https://github.com/openmessaging/openschema) diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/_category_.json new file mode 100644 index 0000000000..2a0150571e --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/design-document/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 6, + "label": "开发文档", + "collapsed": false +} diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/00-eclipse.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/00-eclipse.md new file mode 100644 index 0000000000..a69af82c36 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/00-eclipse.md @@ -0,0 +1,38 @@ +# 导入 Eclipse 快速入门说明 + +我们推荐使用 `Intellij IDEA` 进行开发,如果您希望使用 `Eclipse`,可以参考下面的步骤导入项目。 + +### 依赖 + +``` +64位JDK 1.8+; +Gradle至少为7.0, 推荐 7.0.* +eclipse 已安装gradle插件或者eclipse自带gradle插件 +``` + +### 下载源码 + +git init + +git clone + +### 项目编译eclipse环境 + +打开命令行终端,运行gradlew cleanEclipse eclipse + +### 配置修改 + +修改工程名称和settings.gradle 配置文件参数rootProject.name 参数一致 + +### 修改eclipse.init配置文件,配置lombok以1.18.8版本为例 + +-javaagent:lombok-1.18.8.jar +-XBootclasspath/a:lombok-1.18.8.jar + +### 202106版本eclipse,eclipse.init增加配置参数 + +--illegal-access=permit + +### 导入gradle + +打开eclipse,导入gradle项目到IDE里 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/01-store.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/01-store.md new file mode 100644 index 0000000000..79ee18cc86 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/01-store.md @@ -0,0 +1,49 @@ +# 部署 EventMesh 的事件存储 + +## 1 依赖 + +``` +建议使用64位操作系统,建议使用Linux/Unix; +64位JDK 1.8+; +Gradle至少为7.0, 推荐7.0.* +4g+可用磁盘用于eventmesh-store服务器 +eventmesh在非standalone模式下,依赖RocketMQ作为存储层;若采用standalone模式,则可跳过该步,直接进行runtime的部署 +``` + +### 2 下载 + +从[RocketMQ官方网站](https://rocketmq.apache.org/download/) 下载Binary代码(推荐使用4.9.*版本),这里以4.9.4为例: + +``` +unzip rocketmq-all-4.9.4-bin-release.zip +cd rocketmq-all-4.9.4-bin-release/ +``` + +![rocketmq_1](/images/install/rocketmq_1.png) + +### 3 启动 + +启动Name Server: + +``` +nohup sh bin/mqnamesrv & tail -f ~/logs/rocketmqlogs/namesrv.log +``` + +如果在看到The Name Server boot success...,则说明Name Server启动成功。 + +![rocketmq_2](/images/install/rocketmq_2.png) + +启动Broker: + +``` +nohup sh bin/mqbroker -n localhost:9876 & +tail -f ~/logs/rocketmqlogs/broker.log +``` + +如果在看到The broker boot success...,则说明Broker启动成功 + +至此eventmesh-store的部署已完成,请转至下一步完成 [eventmesh-runtime](https://github.com/apache/incubator-eventmesh/blob/master/docs/zh/instruction/03-runtime.md) 的部署 + + +## 参考 +关于RocketMQ的其他更多资料,请参考 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/02-store-with-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/02-store-with-docker.md new file mode 100644 index 0000000000..beea3a5e5a --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/02-store-with-docker.md @@ -0,0 +1,71 @@ +# 部署 EventMesh 的事件存储(使用 Docker) + +eventmesh在非standalone模式下,依赖RocketMQ作为存储层;若采用standalone模式,则可跳过该步,直接进行runtime的部署。 + +## 1. 依赖 + +``` +建议使用64位操作系统,建议使用Linux/Unix; +64位JDK 1.8+; +Gradle至少为7.0, 推荐7.0.*; +4g+可用磁盘用于eventmesh-store服务器; +``` + +## 2. Docker部署 + +### 2.1 拉取镜像 +在命令行输入如下命令直接从 docker hub 上获取 RocketMQ 镜像: + +```shell +#获取 rocketmq 镜像 +sudo docker pull apache/rocketmq:4.9.4 +``` + +您可以使用以下命令列出并查看本地已有的镜像: + +```shell +sudo docker images +``` + +如果终端显示如下所示的镜像信息,则说明 RocketMQ 镜像已经成功下载到本地。 + +```shell +REPOSITORY TAG IMAGE ID CREATED SIZE +apache/rocketmq 4.9.4 a2a50ca263c3 13 months ago 548MB +``` + +![rocketmq_docker_1](/images/install/rocketmq_docker_1.png) + +### 2.2 运行容器 + +运行namerv容器和broker容器: + +```shell +sudo docker run -d -p 9876:9876 \ + -v `pwd`/data/namesrv/logs:/root/logs \ + -v `pwd`/data/namesrv/store:/root/store \ + --name rmqnamesrv \ + apache/rocketmq:4.9.4 \ + sh mqnamesrv +``` + +运行broker容器: + +```shell +sudo docker run -d -p 10911:10911 -p 10909:10909 \ + -v `pwd`/data/broker/logs:/root/logs \ + -v `pwd`/data/broker/store:/root/store \ + --name rmqbroker \ + --link rmqnamesrv:namesrv \ + -e "NAMESRV_ADDR=namesrv:9876" \ + apache/rocketmq:4.9.4 \ + sh mqbroker -c ../conf/broker.conf + +``` + +![rocketmq_docker_2](/images/install/rocketmq_docker_2.png) + +请注意 **rocketmq-broker ip** 是 **pod ip**, 如果你想修改这个ip, 可以通过挂载容器中 **broker.conf** 文件的方式并修改文件中的 **brokerIP1** 配置项为自定义值。 + + +至此eventmesh-store的部署已完成,请转至下一步完成 [eventmesh-runtime](04-runtime-with-docker.md) 的部署。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/03-runtime.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/03-runtime.md new file mode 100644 index 0000000000..02756b4c01 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/03-runtime.md @@ -0,0 +1,208 @@ +# Eventmesh-runtime 快速入门说明 + +EventMesh Runtime 是 EventMesh 集群中有状态的 Mesh 节点,负责 Source Connector 与 Sink Connector 之间的事件传输,并可以使用 EventMesh Storage 作为事件的存储队列。 + +## 1 本地构建运行 + +### 1.1 源码启动 + +#### 1.1.1 依赖 + +``` +建议使用64位操作系统,建议使用Linux / Unix; +64位JDK 1.8+; +Gradle至少为7.0, 推荐 7.0.* +``` + +#### 1.1.2 下载源码 + +从 [EventMesh download](https://eventmesh.apache.org/download) 下载并提取最新版本的源代码。比如目前最新版,您将获得`apache-eventmesh-1.9.0-source.tar.gz`。 + +#### 1.1.3 本地启动 + +**1.1.3.1 项目结构说明:** + +- eventmesh-common : eventmesh公共类与方法模块 +- eventmesh-connector-api : eventmesh connector插件接口定义模块 +- eventmesh-connector-plugin : eventmesh connector插件模块 +- eventmesh-runtime : eventmesh运行时模块 +- eventmesh-sdk-java : eventmesh java客户端sdk +- eventmesh-starter : eventmesh本地启动运行项目入口 +- eventmesh-spi : eventmesh SPI加载模块 + +> 注:插件模块遵循 eventmesh 定义的SPI规范, 自定义的SPI接口需要使用注解 @EventMeshSPI 标识. +> 插件实例需要在对应模块中的 /main/resources/META-INF/eventmesh 下配置相关接口与实现类的映射文件,文件名为SPI接口全类名. +> 文件内容为插件实例名到插件实例的映射, 具体可以参考 eventmesh-connector-rocketmq 插件模块 + +**1.1.3.2 插件说明** + +***1.1.3.2.1 安装插件*** + +有两种方式安装插件 + +- classpath加载:本地开发可以通过在 eventmesh-starter 模块 build.gradle 中进行声明,例如声明使用 rocketmq 插件 + +```gradle + implementation project(":eventmesh-connectors:eventmesh-connector-rocketmq") +``` + +- 文件加载:通过将插件安装到插件目录,EventMesh 在运行时会根据条件自动加载插件目录下的插件,可以通过执行以下命令安装插件 + +```shell +./gradlew clean jar dist && ./gradlew installPlugin +``` + +***1.1.3.2.2 使用插件*** + +EventMesh 会默认加载 dist/plugin 目录下的插件,可以通过`-DeventMeshPluginDir=your_plugin_directory`来改变插件目录。运行时需要使用的插件实例可以在 +`confPath`目录下面的`eventmesh.properties`中进行配置。例如通过以下设置声明在运行时使用rocketmq插件。 + +```properties +#connector plugin +eventMesh.connector.plugin.type=rocketmq +``` + +**1.1.3.3 配置VM启动参数** + +```properties +-Dlog4j.configurationFile=eventmesh-runtime/conf/log4j2.xml +-Deventmesh.log.home=eventmesh-runtime/logs +-Deventmesh.home=eventmesh-runtime +-DconfPath=eventmesh-runtime/conf +``` + +> 注:如果操作系统为Windows, 可能需要将文件分隔符换成'\' + +**1.1.3.4 启动运行** + +``` +运行org.apache.eventmesh.starter.StartUp的主要方法 +``` + +### 1.2 本地二进制构建 + +#### 1.2.1 依赖 + +``` +建议使用64位操作系统,建议使用Linux / Unix; +64位JDK 1.8+; +Gradle至少为7.0, 推荐 7.0.* +``` + +Gradle 是 Apache EventMesh 使用的构建自动化工具。请参考 [官方指南](https://docs.gradle.org/current/userguide/installation.html) 安装最新版本的 Gradle。 + +#### 1.2.2 下载源码 + +从 [EventMesh download](https://eventmesh.apache.org/download) 下载并提取最新版本的源代码。比如目前最新版,您将获得`apache-eventmesh-1.9.0-source.tar.gz`。 + +```console +tar -xvzf apache-eventmesh-1.9.0-source.tar.gz +cd apache-eventmesh-1.9.0-src/ +``` + +使用 Gradle 构建源代码。 + +```console +gradle clean dist +``` + +![runtime_2](/images/install/runtime_2.png) + +编辑 `eventmesh.properties` 以更改 EventMesh Runtime 的配置(如 TCP 端口、客户端黑名单)。 + +```console +cd dist +vim conf/eventmesh.properties +``` + +#### 1.2.3 构建并加载插件 + +Apache EventMesh引入了 SPI 机制,使 EventMesh 能够在运行时发现并加载插件。有两种方式安装插件: + +1. Gradle 依赖项: 在 `eventmesh-starter/build.gradle` 中将插件声明为构建依赖项。 + +```gradle +dependencies { + implementation project(":eventmesh-runtime") + + // 示例: 加载 RocketMQ 插件 + implementation project(":eventmesh-connectors:eventmesh-connector-rocketmq") +} +``` + +2. 插件目录: EventMesh 会根据 `eventmesh.properties` 加载 `dist/plugin` 目录中的插件。Gradle 的 `installPlugin` 任务会构建插件并将其移动到 `dist/plugin` 目录中。 + +```console +gradle installPlugin +``` + +#### 1.2.4 启动Runtime + +执行 `start.sh` 脚本启动 EventMesh Runtime 服务器。 + +```console +bash bin/start.sh +``` +![runtime_4](/images/install/runtime_4.png) + +查看输出日志: + +```console +tail -f logs/eventmesh.out +``` +![runtime_3](/images/install/runtime_3.png) + +## 2 远程部署 + +### 2.1 依赖 + +``` +建议使用64位操作系统,建议使用Linux / Unix; +64位JDK 1.8+; +Gradle至少为7.0, 推荐 7.0.* +``` + +### 2.2 下载 + +在 [EventMesh download](https://eventmesh.apache.org/download) 页面选择1.5.0版本的 Binary Distribution 进行下载, 您将获得`apache-eventmesh-1.9.0-bin.tar.gz`。 + +```console +# 解压 +tar -xvzf apache-eventmesh-1.9.0-bin.tar.gz +cd apache-eventmesh-1.9.0 +``` + +### 2.3 部署 + +编辑 `eventmesh.properties` 以更改 EventMesh Runtime 的配置(如 TCP 端口、客户端黑名单)。 + +```console +vim conf/eventmesh.properties +``` + +执行 `start.sh` 脚本启动 EventMesh Runtime 服务器。 + +```console +bash bin/start.sh +``` +如果看到"EventMeshTCPServer[port=10000] started....",则说明设置成功。 + +![runtime_6](/images/install/runtime_6.png) + + +查看输出日志: + +```console +cd /root/apache-eventmesh-1.9.0/logs +tail -f eventmesh.out +``` +![runtime_7](/images/install/runtime_7.png) + +停止: + +```console +bash bin/stop.sh +``` + +![runtime_8](/images/install/runtime_8.png) +![runtime_9](/images/install/runtime_9.png) diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/04-runtime-with-docker.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/04-runtime-with-docker.md new file mode 100644 index 0000000000..f15bf57fb0 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/04-runtime-with-docker.md @@ -0,0 +1,157 @@ +# 使用 Docker 快速入门 EventMesh(暂时只支持到1.4.0版本) + +本篇快速入门将详细介绍使用 docker 部署 EventMesh,以 RocketMQ 作为对接的中间件。 + +## 1. 前提 + +1. 建议使用64位的 linux 系统; +2. 请预先安装 Docker Engine。 Docker 的安装过程可以参考 [docker 官方文档](https://docs.docker.com/engine/install/); +3. 建议掌握基础的 docker 概念和命令行,例如注册中心、挂载等等。不过这不是必须的,因为本次操作所需的命令都已为您列出; +4. 若您选择非standalone模式,请确保 [RocketMQ 已成功启动](https://rocketmq.apache.org/docs/quick-start/) 并且可以使用 ip 地址访问到;若您选择standalone模式,则无需启动 RocketMQ 。 + +## 2. 获取 EventMesh 镜像 + +首先,你可以打开一个命令行,并且使用下面的 ```pull``` 命令从 [Docker Hub](https://registry.hub.docker.com/r/eventmesh/eventmesh/tags) 中下载[最新发布的 EventMesh](https://eventmesh.apache.org/events/release-notes/v1.3.0/) 。 + +```shell +sudo docker pull eventmesh/eventmesh:v1.4.0 +``` + +您可以使用以下命令列出并查看本地已有的镜像。 + +```shell +sudo docker images +``` + +如果终端显示如下所示的镜像信息,则说明 EventMesh 镜像已经成功下载到本地。 + +```shell +$ sudo docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +eventmesh/eventmesh v1.4.0 6e2964599c78 16 months ago 937MB +``` + +![runtime_docker_1](/images/install/runtime_docker_1.png) + +## 3. 创建配置文件 + +在根据 EventMesh 镜像运行对应容器之前,你需要创建两个配置文件,分别是:```eventMesh.properties``` 和 ```rocketmq-client.properties```。 + +首先,你需要使用下面的命令创建这两个文件。 + +```shell +sudo mkdir -p /data/eventmesh/rocketmq/conf +cd /data/eventmesh/rocketmq/conf +sudo touch eventmesh.properties +sudo touch rocketmq-client.properties +``` +![runtime_docker_2](/images/install/runtime_docker_2.png) + +### 4. 配置 eventMesh.properties + +这个配置文件中包含 EventMesh 运行时环境和集成进来的其他插件所需的参数。 + +使用下面的 ```vim``` 命令编辑 ```eventmesh.properties```。 + +```shell +sudo vim eventmesh.properties +``` + +你可以直接将 GitHub 仓库中的对应配置文件中的内容复制过来,链接为: 。 + +请检查配置文件里的默认端口是否已被占用,如果被占用请修改成未被占用的端口: + +| 属性 | 默认值 | 备注 | +|----------------------------|-------|----------------------------| +| eventMesh.server.http.port | 10105 | EventMesh http server port | +| eventMesh.server.tcp.port | 10000 | EventMesh tcp server port | +| eventMesh.server.grpc.port | 10205 | EventMesh grpc server port | + +### 5. 配置 rocketmq-client.properties + +这个配置文件中包含 RocketMQ nameserver 的信息。 + +使用下面的 ```vim``` 命令编辑 ```rocketmq-client.properties```。 + +```shell +sudo vim rocketmq-client.properties +``` + +你可以直接将 GitHub 仓库中的对应配置文件中的内容复制过来,链接为: 。请注意,如果您正在运行的 namesetver 地址不是配置文件中的默认值,请将其修改为实际正在运行的nameserver地址。 + +请检查配置文件里的默认namesrvAddr是否已被占用,如果被占用请修改成未被占用的地址: + +| 属性 | 默认值 | 备注 | +|---------------------------------------|-------------------------------|----------------------------------| +| eventMesh.server.rocketmq.namesrvAddr | 127.0.0.1:9876;127.0.0.1:9876 | RocketMQ namesrv default address | + +## 6. 运行 EventMesh + +现在你就可以开始根据下载好的 EventMesh 镜像运行容器了。 + +使用到的命令是 ```docker run```,有以下两点内容需要格外注意。 + +1. 绑定容器端口和宿主机端口:使用 ```docker run``` 的 ```-p``` 选项。 +2. 将宿主机中的两份配置文件挂在到容器中:使用 ```docker run``` 的 ```-v``` 选项。 + +综合一下,对应的启动命令为: + +```shell +sudo docker run -d \ + -p 10000:10000 -p 10105:10105 \ + -v /data/eventmesh/rocketmq/conf/eventMesh.properties:/data/app/eventmesh/conf/eventMesh.properties \ + -v /data/eventmesh/rocketmq/conf/rocketmq-client.properties:/data/app/eventmesh/conf/rocketmq-client.properties \ + eventmesh/eventmesh:v1.4.0 +``` + +如果运行命令之后看到新输出一行字符串,那么运行 EventMesh 镜像的容器就启动成功了。 + +接下来,你可以使用下面的命令查看容器的状态。 + +```shell +sudo docker ps +``` + +如果成功的话,你会看到终端打印出了如下所示容器的信息,其中就有运行 EventMesh 镜像的容器。 + +```shell +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +5bb6b6092672 eventmesh/eventmesh:v1.4.0 "/bin/sh -c 'sh star…" 5 seconds ago Up 3 seconds 0.0.0.0:10000->10000/tcp, :::10000->10000/tcp, 0.0.0.0:10105->10105/tcp, :::10105->10105/tcp eager_driscoll +``` + +![runtime_docker_3](/images/install/runtime_docker_3.png) + +从这个信息中可以看出,```container id``` 是 ```5bb6b6092672```,```name``` 是 ```eager_driscoll```,它们都可以用来唯一标识这个容器。**注意**:在你的电脑中,它们的值可能跟这里的不同。 + +## 7. 管理 EventMesh 容器 + +在成功的运行了 EventMesh 容器后,你可以通过进入容器、查看日志、删除容器等方式管理容器。 + +**进入容器** 命令示例: + +```shell +sudo docker exec -it [your container id or name] /bin/bash +``` + +在容器中 **查看日志** 命令示例: + +```shell +cd ../logs +tail -f eventmesh.out +``` + +![runtime_docker_4](/images/install/runtime_docker_4.png) + +**删除容器** 命令示例: + +```shell +sudo docker rm -f [your container id or name] +``` + +![runtime_docker_5](/images/install/runtime_docker_5.png) + +## 8. 探索更多 + +现在 EventMesh 已经通过容器运行了,你可以参考 [```eventmesh-examples``` 模块](https://github.com/apache/eventmesh/tree/master/eventmesh-examples) 编写并测试自己的代码了。 + +希望你享受这个过程并获得更多收获! diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/05-demo.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/05-demo.md new file mode 100644 index 0000000000..6a08fcbf19 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/05-demo.md @@ -0,0 +1,236 @@ +# 运行 eventmesh-sdk-java demo + +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java) + +> EventMesh-sdk-java作为客户端,与eventmesh-runtime通信,用于完成消息的发送和接收。 +> +> EventMesh-sdk-java支持异步消息和广播消息。异步消息表示生产者只发送消息,不关心回复消息。广播消息表示生产者发送一次消息,所有订阅广播主题的消费者都将收到消息 +> +> EventMesh-sdk-java支持HTTP,TCP 和 GRPC 协议。 + +TCP, HTTP 和 GRPC 示例都在**eventmesh-examples**模块下 + +## 1. TCP +### 1.1 异步消息 + +- 创建主题TEST-TOPIC-TCP-ASYNC,可以通过 rocketmq-console 或者 rocketmq tools 命令 + +- 启动消费者,订阅上一步骤已经创建的Topic + +``` +运行 org.apache.eventmesh.tcp.demo.sub.eventmeshmessage.AsyncSubscribe 的main方法 +``` + +- 启动发送端,发送消息 + +``` +运行 org.apache.eventmesh.tcp.demo.pub.eventmeshmessage.AsyncPublish 的main方法 +``` + +### 1.2 广播消息 + +- 创建主题TEST-TOPIC-TCP-BROADCAST,可以通过 rocketmq-console 或者 rocketmq tools 命令 + +- 启动消费端,订阅上一步骤已经创建的Topic + +``` +运行 org.apache.eventmesh.tcp.demo.sub.eventmeshmessage.AsyncSubscribeBroadcast 的main方法 +``` + +- 启动发送端,发送广播消息 + +``` +运行 org.apache.eventmesh.tcp.demo.pub.eventmeshmessage.AsyncPublishBroadcast 的main方法 +``` + +更多关于TCP部分的内容,请参考 [EventMesh TCP](../sdk-java/03-tcp.md) + +## 2. HTTP + +> 对于HTTP,eventmesh-sdk-java对对于异步事件实现了发送与订阅 +> +>在演示中,Java类`LiteMessage`的`content`字段表示一个特殊的协议,因此,如果您要使用eventmesh-sdk-java的http-client,则只需设计协议的内容并在同一时间提供消费者的应用程序。 + +### 2.1 异步事件 + +> 生产者将事件发送给下游即可,无需等待响应 + +- 创建主题TEST-TOPIC-HTTP-ASYNC,可以通过rocketmq-console或者rocketmq tools 命令 + +- 启动消费端,订阅Topic + + 异步事件消费端为spring boot demo,运行demo即可启动服务并完成Topic订阅 + +``` +运行 org.apache.eventmesh.http.demo.sub.SpringBootDemoApplication 的main方法 +``` + +- 启动发送端,发送消息 + +``` +运行 org.apache.eventmesh.http.demo.pub.eventmeshmessage.AsyncPublishInstance 的main方法 +``` +更多关于HTTP部分的内容,请参考 [EventMesh HTTP](../sdk-java/02-http.md) + +## 3. GRPC + +> eventmesh-sdk-java 实现了 gRPC 协议. 它能异步和同步发送事件到 eventmesh-runtime. +> 它可以通过webhook和事件流方式订阅消费事件, 同时也支持 CNCF CloudEvents 协议. + +### 3.1 异步事件发送 和 webhook订阅 + +> Async生产者 异步发送事件到 eventmesh-runtime, 不需要等待事件储存到 `event-store` +> 在webhook 消费者, 事件推送到消费者的http endpoint url。这个URL在消费者的 `Subscription` 模型定于. 这方法跟前面的Http eventmsh client类似。 + +- 在rocketmq 创建主题 TEST-TOPIC-GRPC-ASYNC +- 启动 publisher 发送事件 + +``` +运行 org.apache.eventmesh.grpc.pub.eventmeshmessage.AsyncPublishInstance 的main方法 +``` + +- 启动 webhook 消费者 + +``` +运行 org.apache.eventmesh.grpc.sub.app.SpringBootDemoApplication 的main方法 +``` + +### 3.2 同步事件发送和事件流订阅 + +> 同步生产者 发送事件到 eventmesh-runtime, 同时等待事件储存到 `event-store` +> 在事件流消费者,事件以流的形式推送到 `ReceiveMsgHook` 客户端。 这方法类似 eventmesh client. + +- 在rocketmq 创建主题 TEST-TOPIC-GRPC-RR +- 启动 Request-Reply publisher 发送事件 + +``` +运行 org.apache.eventmesh.grpc.pub.eventmeshmessage.RequestReplyInstance 的main方法 +``` + +- 启动 stream subscriber + +``` +运行 org.apache.eventmesh.grpc.sub.EventmeshAsyncSubscribe 的main方法 +``` + +### 3.3 批量事件发布 + +> 批量发布多个事件到 eventmesh-runtime. 这是异步操作 + +- 在rocketmq 创建主题 TEST-TOPIC-GRPC-ASYNC +- 启动 publisher 来批量发布事件 + +``` +运行 org.apache.eventmesh.grpc.pub.eventmeshmessage.BatchPublishInstance 的main方法 +``` + +更多关于 gRPC 部分的内容,请参考 [EventMesh gRPC](../sdk-java/04-grpc.md) + +## 4. 测试 + +请参考[EventMesh Store](./01-store.md) 和 [EventMesh Runtime](./03-runtime.md) 完成运行环境的部署 + +完成 store 和 runtime 的部署后,就可以在 eventmesh-examples 模块下运行我们的 demo 来体验 eventmesh 了: + +gradle编译: + +```shell +cd apache-eventmesh-1.9.0-src/eventmesh-examples +gradle clean dist + +cd ./dist/bin +``` + +![demo_1](/images/install/demo_1.png) + +### 4.1 TCP + +TCP Sub + +```shell +bash tcp_eventmeshmessage_sub.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_pub.out +``` +![demo_2](/images/install/demo_2.png) + + +TCP Pub + +```shell +bash tcp_pub_eventmeshmessage.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_sub.out +``` + +![demo_3](/images/install/demo_3.png) + +### 4.2 TCP Broadcast + +TCP Sub Broadcast + +```shell +sh tcp_sub_eventmeshmessage_broadcast.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_sub_broadcast.out +``` + +![demo_4](/images/install/demo_3.png) + +TCP Pub Broadcast + +```shell +sh tcp_pub_eventmeshmessage_broadcast.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_pub_broadcast.out +``` + +![demo_5](/images/install/demo_5.png) + +### 4.3 HTTP + +HTTP Sub + +```shell +sh http_sub.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_http_sub.out +``` + +![demo_6](/images/install/demo_6.png) + +HTTP Pub + +```shell +sh http_pub_eventmeshmessage.sh +``` + +打开对应log文件查看日志: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_http_pub.out +``` + +![demo_7](/images/install/demo_7.png) + +你可以在 `/logs` 目录下面看到不同模式的运行日志。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/_category_.json new file mode 100644 index 0000000000..bc305c5ad5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/instruction/_category_.json @@ -0,0 +1,6 @@ +{ + "position": 2, + "label": "安装部署", + "collapsed": false +} + diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/introduction.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/introduction.md new file mode 100644 index 0000000000..de44fcb2c5 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/introduction.md @@ -0,0 +1,33 @@ +--- +sidebar_position: 0 +--- + +# Apache EventMesh + +[![CI status](https://img.shields.io/github/actions/workflow/status/apache/eventmesh/ci.yml?logo=github&style=for-the-badge)](https://github.com/apache/eventmesh/actions/workflows/ci.yml) +[![CodeCov](https://img.shields.io/codecov/c/gh/apache/eventmesh/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/apache/eventmesh) +[![License](https://img.shields.io/github/license/apache/eventmesh?style=for-the-badge)](https://www.apache.org/licenses/LICENSE-2.0.html) +[![GitHub Release](https://img.shields.io/github/v/release/apache/eventmesh?style=for-the-badge)](https://github.com/apache/eventmesh/releases) +[![Slack Status](https://img.shields.io/badge/slack-join_chat-blue.svg?logo=slack&style=for-the-badge)](https://join.slack.com/t/the-asf/shared_invite/zt-1y375qcox-UW1898e4kZE_pqrNsrBM2g) + +**Apache EventMesh** 是一个动态的云原生事件驱动架构基础设施,用于分离应用程序和后端中间件层,它支持广泛的用例,包括复杂的混合云、使用了不同技术栈的分布式架构。 + +## 特性 + +- **通信协议**: EventMesh 可以使用 TCP、HTTP 或 gRPC 与客户端通信。 +- **CloudEvents**: EventMesh 支持[CloudEvents](https://cloudevents.io) 规范作为事件的格式。CloudEvents 是一种描述事件数据的公共格式的规范,用于在服务、平台和系统之间提供互操作性。 +- **Schema 注册**: EventMesh 实现了schema注册,该schema注册可以接收并存储来自客户端的模式,并提供其他客户端检索模式的接口。 +- **可观察性**: EventMesh 暴露了一系列metrics,例如 HTTP 协议的平均延迟和传递消息数。这些metrics可以使用 Prometheus 或 OpenTelemetry 收集和分析。 +- **事件工作流程编排**:EventMesh Workflow 可以接收事件,并根据工作流定义和当前工作流状态决定触发哪个命令。工作流定义可以使用 [Serverless Workflow](https://serverlessworkflow.io) DSL 编写。 + +## 组件 + +Apache EventMesh 由多个组件组成,这些组件集成了不同的中间件和消息协议,以增强应用程序运行时的功能。 + +- **eventmesh-runtime**:中间件,在生产者和消费者之间传输事件,支持云原生应用程序和微服务。 +- **eventmesh-sdk-java**:支持HTTP,TCP和[gRPC](https://grpc.io/)协议的Java SDK。 +- **eventmesh-connector-plugin**:插件集合,连接中间件,例如[Apache Kafka](https://kafka.apache.org/),[Apache RocketMQ](https://rocketmq.apache.org/),[Apache Pulsar](https://pulsar.apache.org/)和[Redis](https://redis.io/)。 +- **eventmesh-registry-plugin**:插件集合,集成服务注册表,例如[Nacos](https://nacos.io/)和[etcd](https://etcd.io/)。 +- **eventmesh-security-plugin**:插件集合,实现安全机制,例如ACL(访问控制列表),身份验证和授权。 +- **eventmesh-protocol-plugin**:插件集合,实现消息协议,例如[CloudEvents](https://cloudevents.io/)和[MQTT](https://mqtt.org/)。 +- **eventmesh-admin**:控制面板,管理客户端,主题和订阅。 diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/roadmap.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/roadmap.md new file mode 100644 index 0000000000..43c766b378 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/roadmap.md @@ -0,0 +1,46 @@ +--- +sidebar_position: 1 +--- + +# EventMesh产品路线图 + +下表列出了EventMesh的新特性和bug修复情况,详情请参考 [release notes](https://eventmesh.apache.org/events/release-notes/v1.10.0/). + +## List of Features and Milestones + +| Status | Description | Reference | +|-------------------------------------------|---------------------------------| --- | +| **Implemented in 1.0.0** | Support HTTP | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.0.0** | Support TCP | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.0.0** | Support Pub/Sub Event | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.1.1** | Provide Java SDK | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.1.1** | Support HTTPS | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.2.0** | Support RocketMQ as EventStore | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.2.0** | Support Heartbeat | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.3.0** | Integrate with OpenSchema | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.3.0** | Integrate with OpenTelemetry | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.3.0** | Support CloudEvents | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.4.0** | Support gRPC | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.5.0** | Provide Golang SDK | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.5.0** | Support Nacos Registry | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.5.0** | Support Mesh Bridge | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.5.0** | Support Federal Government | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.6.0 (to be released)** | Integrate with Consul | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.6.0 (to be released)** | Support Webhook | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **Implemented in 1.6.0 (to be released)** | Support etcd | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **In Progress** | Knative Eventing Infrastructure | [GitHub Issue](https://github.com/apache/eventmesh/issues/790), [GSoC '22](https://issues.apache.org/jira/browse/COMDEV-463) | +| **In Progress** | Dashboard | [GitHub Issue](https://github.com/apache/eventmesh/issues/700), [GSoC '22](https://issues.apache.org/jira/browse/COMDEV-465) | +| **In Progress** | Support Kafka as EventStore | [GitHub Issue](https://github.com/apache/eventmesh/issues/676) | +| **In Progress** | Support Pulsar as EventStore | [GitHub Issue](https://github.com/apache/eventmesh/issues/676) | +| **In Progress** | Support Dledger | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **In Progress** | Workflow | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **In Progress** | Support Redis | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **In Progress** | Support Mesh Bridge | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| **In Progress** | Support Zookeeper | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| Planned | Provide NodeJS SDK | [GitHub Issue](https://github.com/apache/eventmesh/issues/417) | +| Planned | Transaction Event | [GitHub Issue](https://github.com/apache/eventmesh/issues/697) | +| Planned | Event Query Language (EQL) | [GitHub Issue](https://github.com/apache/eventmesh/issues/778) | +| Planned | Metadata consistency persistent | [GitHub Issue](https://github.com/apache/eventmesh/issues/817) | +| Planned | Rust SDK | [GitHub Issue](https://github.com/apache/eventmesh/issues/815) | +| Planned | WebAssembly Runtime | [GitHub Issue](https://github.com/apache/eventmesh/issues/576) | +| Planned | Filter Chain | [GitHub Issue](https://github.com/apache/eventmesh/issues/664) | diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/01-intro.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/01-intro.md new file mode 100644 index 0000000000..80a0db79a9 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/01-intro.md @@ -0,0 +1,47 @@ +# 安装 SDK + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java/badge.svg?style=for-the-badge)](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java) + +EventMesh Java SDK 是在一个 Java 应用中集成 Eventmesh 所需的 Java 组件集合。SDK 支持使用 TCP、HTTP 和 gRPC 协议来发送和接收同步消息、异步消息和广播消息。SDK 实现了 EventMesh 消息、CloudEvents 和 OpenMessaging 形式。您可以在 [`eventmesh-example`](https://github.com/apache/eventmesh/tree/master/eventmesh-examples) 模块中查看示例项目。 + + + + + + + + +​ 使用 Gradle 安装 EventMesh Java SDK,您需要在模块的 `build.gradle` 文件的依赖块中将 `org.apache.eventmesh:eventmesh-sdk-java` 声明为 `implementation`。 + +```groovy +dependencies { + implementation 'org.apache.eventmesh:eventmesh-sdk-java:1.4.0' +} +``` + + + + + + + + +使用 Maven 安装 EventMesh Java SDK,您需要在项目 `pom.xml` 文件的依赖块中声明 `org.apache.eventmesh:eventmesh-sdk-java`。 + +```xml + + + org.apache.eventmesh + eventmesh-sdk-java + 1.4.0 + + +``` + + + + + \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/02-http.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/02-http.md new file mode 100644 index 0000000000..0ce80ee406 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/02-http.md @@ -0,0 +1,118 @@ +# HTTP 协议 + +EventMesh Java SDK 实现了 HTTP 异步消息的生产者和消费者。二者都需要一个 `EventMeshHttpClientConfig` 类实例来指定 EventMesh HTTP 客户端的配置信息。其中的 `liteEventMeshAddr`、`userName` 和 `password` 字段需要和 EventMesh runtime `eventmesh.properties` 文件中的相匹配。 + +```java +import org.apache.eventmesh.client.http.conf.EventMeshHttpClientConfig; +import org.apache.eventmesh.common.utils.IPUtils; +import org.apache.eventmesh.common.utils.ThreadUtils; + +public class HTTP { + public static void main(String[] args) throws Exception { + EventMeshHttpClientConfig eventMeshClientConfig = EventMeshHttpClientConfig.builder() + .liteEventMeshAddr("localhost:10105") + .producerGroup("TEST_PRODUCER_GROUP") + .env("env") + .idc("idc") + .ip(IPUtils.getLocalAddress()) + .sys("1234") + .pid(String.valueOf(ThreadUtils.getPID())) + .userName("eventmesh") + .password("password") + .build(); + /* ... */ + } +} +``` + +## HTTP 消费者 + +类 `EventMeshHttpConsumer` 实现了 `heartbeat`、`subscribe` 和 `unsubscribe` 方法。`subscribe` 方法接收一个 `SubscriptionItem` 对象的列表,其中定义了要订阅的话题和回调的 URL 地址。 + +```java +import org.apache.eventmesh.client.http.consumer.EventMeshHttpConsumer; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import com.google.common.collect.Lists; + +public class HTTP { + final String url = "http://localhost:8080/callback"; + final List topicList = Lists.newArrayList( + new SubscriptionItem("eventmesh-async-topic", SubscriptionMode.CLUSTERING, SubscriptionType.ASYNC) + ); + + public static void main(String[] args) throws Exception { + /* ... */ + eventMeshHttpConsumer = new EventMeshHttpConsumer(eventMeshClientConfig); + eventMeshHttpConsumer.heartBeat(topicList, url); + eventMeshHttpConsumer.subscribe(topicList, url); + /* ... */ + eventMeshHttpConsumer.unsubscribe(topicList, url); + } +} +``` + +EventMesh runtime 将发送一个包含 [CloudEvents 格式](https://github.com/cloudevents/spec) 信息的 POST 请求到这个回调的 URL 地址。类 [SubController.java](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/http/demo/sub/controller/SubController.java) 实现了 Spring Boot controller,它将接收并解析回调信息。 + +## HTTP 生产者 + +类 `EventMeshHttpProducer` 实现了 `publish` 方法。`publish` 方法接收将被发布的消息和一个可选的 timeout 值。消息应是下列类的一个实例: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` +- `io.openmessaging.api.Message` + +```java +import org.apache.eventmesh.client.http.producer.EventMeshHttpProducer; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.Constants; +import org.apache.eventmesh.common.utils.JsonUtils; + +import io.cloudevents.CloudEvent; +import io.cloudevents.core.builder.CloudEventBuilder; + +public class HTTP { + public static void main(String[] args) throws Exception { + /* ... */ + EventMeshHttpProducer eventMeshHttpProducer = new EventMeshHttpProducer(eventMeshClientConfig); + Map content = new HashMap<>(); + content.put("content", "testAsyncMessage"); + + CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject("eventmesh-async-topic") + .withSource(URI.create("/")) + .withDataContentType("application/cloudevents+json") + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + eventMeshHttpProducer.publish(event); + } +} +``` + +## 使用Curl 命令 + +本段落介绍通过Curl命令体验事件的收发功能 + +### 事件发送 + +启动EventMesh Runtime服务后,可以使用Curl命令将事件用HTTP POST方法发布到指定的主题,Body内容必须是JSON格式,执行命令示例如下: + +```shell +curl -H "Content-Type:application/json" -X POST -d '{"name": "admin", "pass":"12345678"}' http://127.0.0.1:10105/eventmesh/publish/TEST-TOPIC-HTTP-ASYNC +``` + + + +### 事件订阅 + +启动EventMesh Runtime服务后,可以使用Curl命令用HTTP POST方法订阅指定的主题列表,Body内容必须是JSON格式,执行命令示例如下: + +```shell +curl -H "Content-Type:application/json" -X POST -d '{"url": "http://127.0.0.1:8088/sub/test", "consumerGroup":"TEST-GROUP", "topic":[{"mode":"CLUSTERING","topic":"TEST-TOPIC-HTTP-ASYNC","type":"ASYNC"}]}' http://127.0.0.1:10105/eventmesh/subscribe/local +``` + +你可以在项目`eventmesh-examples`模块中看到这个例子。 \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/03-tcp.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/03-tcp.md new file mode 100644 index 0000000000..3027b53dc6 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/03-tcp.md @@ -0,0 +1,118 @@ +# TCP 协议 + +EventMesh Java SDK 实现了同步、异步和广播 TCP 消息的生产者和消费者。 二者都需要一个 `EventMeshHttpClientConfig` 类实例来指定 EventMesh TCP 客户端的配置信息。其中的 `host` 和 `port` 字段需要和 EventMesh runtime `eventmesh.properties` 文件中的相匹配。 + +```java +import org.apache.eventmesh.client.tcp.conf.EventMeshTCPClientConfig; +import org.apache.eventmesh.client.tcp.common.ReceiveMsgHook; +import io.cloudevents.CloudEvent; + +public class AsyncSubscribe implements ReceiveMsgHook { + public static void main(String[] args) throws InterruptedException { + EventMeshTCPClientConfig eventMeshTcpClientConfig = EventMeshTCPClientConfig.builder() + .host(eventMeshIp) + .port(eventMeshTcpPort) + .userAgent(userAgent) + .build(); + /* ... */ + } +} +``` + +## TCP 消费者 + +消费者应该实现 `ReceiveMsgHook` 类,其被定义在 [ReceiveMsgHook.java](https://github.com/apache/eventmesh/blob/master/eventmesh-sdk-java/src/main/java/org/apache/eventmesh/client/tcp/common/ReceiveMsgHook.java)。 + +```java +public interface ReceiveMsgHook { + Optional handle(ProtocolMessage msg); +} +``` + +类 `EventMeshTCPClient` 实现了 `subscribe` 方法。该方法接收话题、`SubscriptionMode` 和 `SubscriptionType`。`handle` 方法将会在消费者从订阅的话题中收到消息时被调用。如果 `SubscriptionType` 是 `SYNC`,`handle` 的返回值将被发送回生产者。 + +```java +import org.apache.eventmesh.client.tcp.EventMeshTCPClient; +import org.apache.eventmesh.client.tcp.EventMeshTCPClientFactory; +import org.apache.eventmesh.client.tcp.common.ReceiveMsgHook; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import io.cloudevents.CloudEvent; + +public class TCPConsumer implements ReceiveMsgHook { + public static TCPConsumer handler = new TCPConsumer(); + private static EventMeshTCPClient client; + + public static void main(String[] args) throws Exception { + client = EventMeshTCPClientFactory.createEventMeshTCPClient( + eventMeshTcpClientConfig, + CloudEvent.class + ); + client.init(); + + client.subscribe( + "eventmesh-sync-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.SYNC + ); + + client.registerSubBusiHandler(handler); + client.listen(); + } + + @Override + public Optional handle(CloudEvent message) { + log.info("Messaged received: {}", message); + return Optional.of(message); + } +} +``` + +## TCP 生产者 + +### 异步生产者 + +类 `EventMeshTCPClient` 实现了 `public` 方法。该方法接收将被发布的消息和一个可选的 timeout 值,并返回来自消费者的响应消息。 + +```java +/* ... */ +client = EventMeshTCPClientFactory.createEventMeshTCPClient(eventMeshTcpClientConfig, CloudEvent.class); +client.init(); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); +client.publish(event, 1000); +``` + +### 同步生产者 + +类 `EventMeshTCPClient` 实现了 `rr` 方法。该方法接收将被发布的消息和一个可选的 timeout 值,并返回来自消费者的响应消息。 + +```java +/* ... */ +client = EventMeshTCPClientFactory.createEventMeshTCPClient(eventMeshTcpClientConfig, CloudEvent.class); +client.init(); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + +Package response = client.rr(event, 1000); +CloudEvent replyEvent = EventFormatProvider + .getInstance() + .resolveFormat(JsonFormat.CONTENT_TYPE) + .deserialize(response.getBody().toString().getBytes(StandardCharsets.UTF_8)); +``` \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/04-grpc.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/04-grpc.md new file mode 100644 index 0000000000..ce8d241d41 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/04-grpc.md @@ -0,0 +1,174 @@ +# gRPC 协议 + +EventMesh Java SDK 实现了 gRPC 同步、异步和广播消息的生产者和消费者。二者都需要一个 `EventMeshHttpClientConfig` 类实例来指定 EventMesh gRPC 客户端的配置信息。其中的 `liteEventMeshAddr`、`userName` 和 `password` 字段需要和 EventMesh runtime `eventmesh.properties` 文件中的相匹配。 + +```java +import org.apache.eventmesh.client.grpc.config.EventMeshGrpcClientConfig; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import io.cloudevents.CloudEvent; + +public class CloudEventsAsyncSubscribe implements ReceiveMsgHook { + public static void main(String[] args) throws InterruptedException { + EventMeshGrpcClientConfig eventMeshClientConfig = EventMeshGrpcClientConfig.builder() + .serverAddr("localhost") + .serverPort(10205) + .consumerGroup(ExampleConstants.DEFAULT_EVENTMESH_TEST_CONSUMER_GROUP) + .env("env").idc("idc") + .sys("1234").build(); + /* ... */ + } +} +``` + +## gRPC 消费者 + +### 流消费者 + +EventMesh runtime 会将来自生产者的信息作为一系列事件流向流消费者发送。消费者应实现 `ReceiveHook` 类,其被定义在 [ReceiveMsgHook.java](https://github.com/apache/eventmesh/blob/master/eventmesh-sdk-java/src/main/java/org/apache/eventmesh/client/grpc/consumer/ReceiveMsgHook.java)。 + +```java +public interface ReceiveMsgHook { + Optional handle(T msg) throws Throwable; + String getProtocolType(); +} +``` + +类 `EventMeshGrpcConsumer` 实现了 `registerListener`、`subscribe` 和 `unsubscribe` 方法。`subscribe` 方法接收一个 `SubscriptionItem` 对象的列表,其中定义了要订阅的话题。`registerListener` 接收一个实现了 `ReceiveMsgHook` 的实例。`handle` 方法将会在消费者收到订阅的主题消息时被调用。如果 `SubscriptionType` 是 `SYNC`,`handle` 的返回值将被发送回生产者。 + +```java +import org.apache.eventmesh.client.grpc.consumer.EventMeshGrpcConsumer; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import io.cloudevents.CloudEvent; + +public class CloudEventsAsyncSubscribe implements ReceiveMsgHook { + public static CloudEventsAsyncSubscribe handler = new CloudEventsAsyncSubscribe(); + public static void main(String[] args) throws InterruptedException { + /* ... */ + SubscriptionItem subscriptionItem = new SubscriptionItem( + "eventmesh-async-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.ASYNC + ); + EventMeshGrpcConsumer eventMeshGrpcConsumer = new EventMeshGrpcConsumer(eventMeshClientConfig); + + eventMeshGrpcConsumer.init(); + eventMeshGrpcConsumer.registerListener(handler); + eventMeshGrpcConsumer.subscribe(Collections.singletonList(subscriptionItem)); + /* ... */ + eventMeshGrpcConsumer.unsubscribe(Collections.singletonList(subscriptionItem)); + } + + @Override + public Optional handle(CloudEvent message) { + log.info("Messaged received: {}", message); + return Optional.empty(); + } + + @Override + public String getProtocolType() { + return EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME; + } +} +``` + +### Webhook 消费者 + +类 `EventMeshGrpcConsumer` 的 `subscribe` 方法接收一个 `SubscriptionItem` 对象的列表,其中定义了要订阅的主题和一个可选的 timeout 值。如果提供了回调 URL,EventMesh runtime 将向回调 URL 地址发送一个包含 [CloudEvents 格式](https://github.com/cloudevents/spec) 消息的 POST 请求。[SubController.java](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/grpc/sub/app/controller/SubController.java) 实现了一个接收并解析回调信息的 Spring Boot controller。 + +```java +import org.apache.eventmesh.client.grpc.consumer.EventMeshGrpcConsumer; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; + +@Component +public class SubService implements InitializingBean { + final String url = "http://localhost:8080/callback"; + + public void afterPropertiesSet() throws Exception { + /* ... */ + eventMeshGrpcConsumer = new EventMeshGrpcConsumer(eventMeshClientConfig); + eventMeshGrpcConsumer.init(); + + SubscriptionItem subscriptionItem = new SubscriptionItem( + "eventmesh-async-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.ASYNC + ); + + eventMeshGrpcConsumer.subscribe(Collections.singletonList(subscriptionItem), url); + /* ... */ + eventMeshGrpcConsumer.unsubscribe(Collections.singletonList(subscriptionItem), url); + } +} +``` + +## gRPC 生产者 + +### 异步生产者 + +类 `EventMeshGrpcProducer` 实现了 `publish` 方法。`publish` 方法接收将被发布的消息和一个可选的 timeout 值。消息应是下列类的一个实例: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +```java +/* ... */ +EventMeshGrpcProducer eventMeshGrpcProducer = new EventMeshGrpcProducer(eventMeshClientConfig); +eventMeshGrpcProducer.init(); + +Map content = new HashMap<>(); +content.put("content", "testAsyncMessage"); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); +eventMeshGrpcProducer.publish(event); +``` + +### 同步生产者 + +类 `EventMeshGrpcProducer` 实现了 `requestReply` 方法。`requestReply` 方法接收将被发布的消息和一个可选的 timeout 值。方法会返回消费者返回的消息。消息应是下列类的一个实例: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +### 批量生产者 + +类 `EventMeshGrpcProducer` 重写了 `publish` 方法,该方法接收一个将被发布的消息列表和一个可选的 timeout 值。列表中的消息应是下列类的一个实例: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +```java +/* ... */ +List cloudEventList = new ArrayList<>(); +for (int i = 0; i < 5; i++) { + CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + + cloudEventList.add(event); +} + +eventMeshGrpcProducer.publish(cloudEventList); +/* ... */ +``` \ No newline at end of file diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/_category_.json new file mode 100644 index 0000000000..9d0a27c562 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/sdk-java/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "EventMesh SDK for Java", + "collapsed": false +} diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md new file mode 100644 index 0000000000..733b5e1901 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md @@ -0,0 +1,16 @@ +# EventMesh 升级指引 + +> 本文简单介绍EventMesh从1.2.0版本升级到最新版本的注意事项。 + +## 1. 注意事项 + +**如果您是首次接触并使用EventMesh,您可以忽略该章节。** + +## 2. 服务升级安装 + +EventMesh运行时模块的升级和启动可以按照 [设计文档](https://eventmesh.apache.org/docs/instruction/runtime) 完成. + +版本之间的差异和变化,请参考不同版本的[release notes](https://eventmesh.apache.org/events/release-notes)。可以满足不同版本间的兼容性。 + +如果需要使用最新的功能,按照版本说明升级到相应的版本即可,不同的插件模块组件可以单独打包配置。可以参考相应的[功能设计文档和指南](https://eventmesh.apache.org/docs/design-document/) + diff --git a/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/_category_.json b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/_category_.json new file mode 100644 index 0000000000..af764643e9 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/version-v1.10.0/upgrade-guide/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 7, + "label": "Upgrade Guide", + "collapsed": false +} diff --git a/src/pages/download.tsx b/src/pages/download.tsx index 4024b0fdd9..00e0196792 100644 --- a/src/pages/download.tsx +++ b/src/pages/download.tsx @@ -27,6 +27,21 @@ interface Release { } const releaseList: Release[] = [ + { + version: 'v1.10.0', + date: new Date(2023, 11, 25), + notes: 'https://eventmesh.apache.org/events/release-notes/v1.10.0', + source: { + tar: 'https://www.apache.org/dyn/closer.lua/eventmesh/1.10.0/apache-eventmesh-1.10.0-source.tar.gz', + asc: 'https://downloads.apache.org/eventmesh/1.10.0/apache-eventmesh-1.10.0-source.tar.gz.asc', + sha512: 'https://downloads.apache.org/eventmesh/1.10.0/apache-eventmesh-1.10.0-source.tar.gz.sha512', + }, + binary: { + tar: 'https://www.apache.org/dyn/closer.lua/eventmesh/1.10.0/apache-eventmesh-1.10.0-bin.tar.gz', + asc: 'https://downloads.apache.org/eventmesh/1.10.0/apache-eventmesh-1.10.0-bin.tar.gz.asc', + sha512: 'https://downloads.apache.org/eventmesh/1.10.0/apache-eventmesh-1.10.0-bin.tar.gz.sha512', + }, + }, { version: 'v1.9.0', date: new Date(2023, 6, 4), diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md new file mode 100644 index 0000000000..a5056ce3cc --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/01-runtime-protocol.md @@ -0,0 +1,401 @@ +# EventMesh Runtime Protocol + +## TCP Protocol + +### Protocol Format + +|Name|Size|Description| +|-|-|-| +|Magic Code|9 bytes|Default: `EventMesh`| +|Protocol Version|4 bytes|Default: `0000`| +|Message Size|4 bytes|The total length of the message| +|Header Size|4 bytes|The length of the message header| +|Message Body||The content of the message| + +### Message Object in the Business Logic Layer + +#### Message Composition + +The `Package` class in the [`Package.java` file](https://github.com/apache/eventmesh/blob/master/eventmesh-common/src/main/java/org/apache/eventmesh/common/protocol/tcp/Package.java) is the TCP message object used in business logic layer. The class contains the `header` and `body` fields. + +```java +public class Package { + private Header header; + private Object body; +} + +public class Header { + private Command cmd; + private int code; + private String msg; + private String seq; +} +``` + +#### Specification + +- Message Header (the `header` field): The `cmd` field in the `Header` class specifies the different types of messages. +- Message Body (the `body` field): The type of the message body should be defined based on `cmd` field in the `Header` class. + +|Command|Type of Body| +|-|-| +| HEARTBEAT_REQUEST, HEARTBEAT_RESPONSE, HELLO_RESPONSE, CLIENT_GOODBYE_REQUEST, CLIENT_GOODBYE_RESPONSE, SERVER_GOODBYE_REQUEST, SERVER_GOODBYE_RESPONSE, LISTEN_REQUEST, LISTEN_RESPONSE, UNSUBSCRIBE_REQUEST, SUBSCRIBE_RESPONSE, UNSUBSCRIBE_RESPONSE, ASYNC_MESSAGE_TO_SERVER_ACK, BROADCAST_MESSAGE_TO_SERVER_ACK|N/A| +|HELLO_REQUEST|UserAgent| +|SUBSCRIBE_REQUEST|Subscription| +| REQUEST_TO_SERVER, REQUEST_TO_CLIENT, RESPONSE_TO_SERVER, RESPONSE_TO_CLIENT, ASYNC_MESSAGE_TO_SERVER, ASYNC_MESSAGE_TO_CLIENT, BROADCAST_MESSAGE_TO_SERVER, BROADCAST_MESSAGE_TO_CLIENT, ASYNC_MESSAGE_TO_CLIENT_ACK, BROADCAST_MESSAGE_TO_CLIENT_ACK, RESPONSE_TO_CLIENT_ACK, REQUEST_TO_CLIENT_ACK|OpenMessage| +|REDIRECT_TO_CLIENT|RedirectInfo| + +### Example of Client-Server Interaction + +```java +public enum Command { + // Heartbeat + HEARTBEAT_REQUEST(0), // Client send heartbeat request to server + HEARTBEAT_RESPONSE(1), // Server reply heartbeat response to client + + // Hello + HELLO_REQUEST(2), // Client send connect request to server + HELLO_RESPONSE(3), // Server reply connect response to client + + // Disconncet + CLIENT_GOODBYE_REQUEST(4), // Client send disconnect request to server + CLIENT_GOODBYE_RESPONSE(5), // Server reply disconnect response to client + SERVER_GOODBYE_REQUEST(6), // Server send disconncet request to client + SERVER_GOODBYE_RESPONSE(7), // Client reply disconnect response to server + + // Subscribe and UnSubscribe + SUBSCRIBE_REQUEST(8), // Slient send subscribe request to server + SUBSCRIBE_RESPONSE(9), // Server reply subscribe response to client + UNSUBSCRIBE_REQUEST(10), // Client send unsubscribe request to server + UNSUBSCRIBE_RESPONSE(11), // Server reply unsubscribe response to client + + // Listen + LISTEN_REQUEST(12), // Client send listen request to server + LISTEN_RESPONSE(13), // Server reply listen response to client + + // Send sync message + REQUEST_TO_SERVER(14), // Client (Producer) send sync message to server + REQUEST_TO_CLIENT(15), // Server push sync message to client(Consumer) + REQUEST_TO_CLIENT_ACK(16), // Client (Consumer) send ack of sync message to server + RESPONSE_TO_SERVER(17), // Client (Consumer) send reply message to server + RESPONSE_TO_CLIENT(18), // Server push reply message to client(Producer) + RESPONSE_TO_CLIENT_ACK(19), // Client (Producer) send acknowledgement of reply message to server + + // Send async message + ASYNC_MESSAGE_TO_SERVER(20), // Client send async msg to server + ASYNC_MESSAGE_TO_SERVER_ACK(21), // Server reply ack of async msg to client + ASYNC_MESSAGE_TO_CLIENT(22), // Server push async msg to client + ASYNC_MESSAGE_TO_CLIENT_ACK(23), // Client reply ack of async msg to server + + // Send broadcast message + BROADCAST_MESSAGE_TO_SERVER(24), // Client send broadcast msg to server + BROADCAST_MESSAGE_TO_SERVER_ACK(25), // Server reply ack of broadcast msg to client + BROADCAST_MESSAGE_TO_CLIENT(26), // Server push broadcast msg to client + BROADCAST_MESSAGE_TO_CLIENT_ACK(27), // Client reply ack of broadcast msg to server + + // Redirect + REDIRECT_TO_CLIENT(30), // Server send redirect instruction to client +} +``` + +### Client-Initiated Interaction + +|Scene|Client Request|Server Response| +|-|-|-| +| Hello | HELLO_REQUEST | HELLO_RESPONSE | | +| Heartbeat | HEARTBEAT_REQUEST | HEARTBEAT_RESPONSE | | +| Subscribe | SUBSCRIBE_REQUEST | SUBSCRIBE_RESPONSE | | +| Unsubscribe | UNSUBSCRIBE_REQUEST | UNSUBSCRIBE_RESPONSE | | +| Listen | LISTEN_REQUEST | LISTEN_RESPONSE | | +| Send sync message | REQUEST_TO_SERVER | RESPONSE_TO_CLIENT | | +| Send the response of sync message| RESPONSE_TO_SERVER | N/A | | +| Send async message | ASYNC_MESSAGE_TO_SERVER | ASYNC_MESSAGE_TO_SERVER_ACK | | +| Send broadcast message | BROADCAST_MESSAGE_TO_SERVER | BROADCAST_MESSAGE_TO_SERVER_ACK | | +| Client start to disconnect | CLIENT_GOODBYE_REQUEST | CLIENT_GOODBYE_RESPONSE | | + +### Server-Initiated Interaction + +|Scene|Server Request|Client Response|Remark| +|-|-| ------------------------------- | ---- | +| Push sync message to client | REQUEST_TO_CLIENT | REQUEST_TO_CLIENT_ACK | | +| Push the response message of sync message to client | RESPONSE_TO_CLIENT | RESPONSE_TO_CLIENT_ACK | | +| Push async message to client | ASYNC_MESSAGE_TO_CLIENT | ASYNC_MESSAGE_TO_CLIENT_ACK | | +| Push broadcast message to client | BROADCAST_MESSAGE_TO_CLIENT | BROADCAST_MESSAGE_TO_CLIENT_ACK | | +| Server start to disconnect | SERVER_GOODBYE_REQUEST | -- | | +| Server send redirect | REDIRECT_TO_CLIENT | -- | | + +### Type of Message + +#### Sync Message + +![Sync Message](/images/design-document/sync-message.png) + +#### Async Message + +![Async Message](/images/design-document/async-message.png) + +#### Boardcast Message + +![Boardcast Message](/images/design-document/broadcast-message.png) + +## HTTP Protocol + +### Protocol Format + +The `EventMeshMessage` class in the [`EventMeshMessage.java` file](https://github.com/apache/eventmesh/blob/master/eventmesh-common/src/main/java/org/apache/eventmesh/common/EventMeshMessage.java) is the HTTP message definition of EventMesh Runtime. + +```java +public class EventMeshMessage { + private String bizSeqNo; + + private String uniqueId; + + private String topic; + + private String content; + + private Map prop; + + private final long createTime = System.currentTimeMillis(); +} +``` + +### HTTP Post Request + +#### Heartbeat Message + +##### Request Header + +| Key | Description | +| -------- | ---------------- | +| Env | Enviroment of Client | +| Region | Region of Client | +| Idc | IDC of Client | +| Dcn | DCN of Client | +| Sys | Subsystem ID of Client | +| Pid | Client Process ID | +| Ip | Client Ip | +| Username | Client username | +| Passwd | Client password | +| Version | Protocol version | +| Language | Develop language | +| Code | Request Code | + +##### Request Body + +|Key|Description| +|-|-| +|`clientType`|`ClientType.PUB` for Producer, `ClientType.SUB` for Consumer| +|`heartbeatEntities`|Topic, URL, etc.| + +#### Subscribe Message + +##### Request Header + +The request header of the Subscribe message is identical to the request header of the Heartbeat message. + +##### Request Body + +|Key|Description| +|-|-| +|`topic`|The topic that the client requested to subscribe to| +|`url`|The callback URL of the client| + +#### Unsubscribe Message + +##### Request Header + +The request header of the Unsubscribe message is identical to the request header of the Heartbeat message. + +##### Request Body + +The request body of the Unsubscribe message is identical to the request body of the Subscribe message. + +#### Send Async Message + +##### Request Header + +The request header of the Send Async message is identical to the request header of the Heartbeat message. + +##### Request Body + +|Key|Description| +|-|-| +|`topic`|Topic of the message| +|`content`|The content of the message| +|`ttl`|The time-to-live of the message| +|`bizSeqNo`|The biz sequence number of the message| +|`uniqueId`|The unique ID of the message| + +### Client-Initiated Interaction + +|Scene|Client Request|Server Response|Remark| +|-|-|-|-| +| Heartbeat | HEARTBEAT(203) | SUCCESS(0) or EVENTMESH_HEARTBEAT_ERROR(19) | | +| Subscribe | SUBSCRIBE(206) | SUCCESS(0) or EVENTMESH_SUBSCRIBE_ERROR(17) | | +| Unsubscribe | UNSUBSCRIBE(207) | SUCCESS(0) or EVENTMESH_UNSUBSCRIBE_ERROR(18) | | +| Send async message | MSG_SEND_ASYNC(104) | SUCCESS(0) or EVENTMESH_SEND_ASYNC_MSG_ERR(14) | | + +### Server-Initiated Interaction + +|Scene|Client Request|Server Response|Remark| +|-|-|-|-| +|Push async message to the client|HTTP_PUSH_CLIENT_ASYNC(105)|`retCode`|The push is successful if the `retCode` is `0`| + +## gRPC Protocol + +### Protobuf + +The `eventmesh-protocol-gprc` module contains the [protobuf definition file](https://github.com/apache/eventmesh/blob/master/eventmesh-protocol-plugin/eventmesh-protocol-grpc/src/main/proto/eventmesh-client.proto) of the Eventmesh client. The `gradle build` command generates the gRPC codes, which are located in `/build/generated/source/proto/main`. The generated gRPC codes are used in `eventmesh-sdk-java` module. + +### Data Model + +#### Message + +The message data model used by `publish()`, `requestReply()` and `broadcast()` APIs is defined as: + +```protobuf +message RequestHeader { + string env = 1; + string region = 2; + string idc = 3; + string ip = 4; + string pid = 5; + string sys = 6; + string username = 7; + string password = 8; + string language = 9; + string protocolType = 10; + string protocolVersion = 11; + string protocolDesc = 12; +} + +message SimpleMessage { + RequestHeader header = 1; + string producerGroup = 2; + string topic = 3; + string content = 4; + string ttl = 5; + string uniqueId = 6; + string seqNum = 7; + string tag = 8; + map properties = 9; +} + +message BatchMessage { + RequestHeader header = 1; + string producerGroup = 2; + string topic = 3; + + message MessageItem { + string content = 1; + string ttl = 2; + string uniqueId = 3; + string seqNum = 4; + string tag = 5; + map properties = 6; + } + + repeated MessageItem messageItem = 4; +} + +message Response { + string respCode = 1; + string respMsg = 2; + string respTime = 3; +} +``` + +#### Subscription + +The subscription data model used by `subscribe()` and `unsubscribe()` APIs is defined as: + +```protobuf +message Subscription { + RequestHeader header = 1; + string consumerGroup = 2; + + message SubscriptionItem { + enum SubscriptionMode { + CLUSTERING = 0; + BROADCASTING = 1; + } + + enum SubscriptionType { + ASYNC = 0; + SYNC = 1; + } + + string topic = 1; + SubscriptionMode mode = 2; + SubscriptionType type = 3; + } + + repeated SubscriptionItem subscriptionItems = 3; + string url = 4; +} +``` + +#### Heartbeat + +The heartbeat data model used by the `heartbeat()` API is defined as: + +```protobuf +message Heartbeat { + enum ClientType { + PUB = 0; + SUB = 1; + } + + RequestHeader header = 1; + ClientType clientType = 2; + string producerGroup = 3; + string consumerGroup = 4; + + message HeartbeatItem { + string topic = 1; + string url = 2; + } + + repeated HeartbeatItem heartbeatItems = 5; +} +``` + +### Service Definition + +#### Event Publisher Service + +```protobuf +service PublisherService { + // Async event publish + rpc publish(SimpleMessage) returns (Response); + + // Sync event publish + rpc requestReply(SimpleMessage) returns (Response); + + // Batch event publish + rpc batchPublish(BatchMessage) returns (Response); +} +``` + +#### Event Consumer Service + +```protobuf +service ConsumerService { + // The subscribed event will be delivered by invoking the webhook url in the Subscription + rpc subscribe(Subscription) returns (Response); + + // The subscribed event will be delivered through stream of Message + rpc subscribeStream(Subscription) returns (stream SimpleMessage); + + rpc unsubscribe(Subscription) returns (Response); +} +``` + +#### Client Hearthbeat Service + +```protobuf +service HeartbeatService { + rpc heartbeat(Heartbeat) returns (Response); +} +``` diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md new file mode 100644 index 0000000000..8dcd90ee37 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/02-https.md @@ -0,0 +1,30 @@ +# HTTPS + +1. Configuration in eventmesh-runtime + +``` +eventMesh.properties (add the following configurations) +eventMesh.server.useTls.enabled=true // Default value: false + +Configuring environment variable +-Dssl.server.protocol=TLSv1.1 // Default value: TLSv1.1 +-Dssl.server.cer=sChat2.jks // Place the file in the conPath directory specified by the startup script start.sh +-Dssl.server.pass=sNetty +``` + +2. Configuration in eventmesh-sdk-java + +``` +// Create a producer +LiteClientConfig eventMeshHttpClientConfig = new LiteClientConfig(); +... + +// Enable TLS +eventMeshHttpClientConfig.setUseTls(true); +LiteProducer producer = new LiteProducer(eventMeshHttpClientConfig); + +// Configure environment variables +-Dssl.client.protocol=TLSv1.1 // Default value: TLSv1.1 +-Dssl.client.cer=sChat2.jks // Place the file in the conPath directory specified by the application +-Dssl.client.pass=sNetty +``` \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md new file mode 100644 index 0000000000..bff91b4575 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/03-cloudevents.md @@ -0,0 +1,106 @@ +# CloudEvents Integration + +## Introduction + +[CloudEvents](https://github.com/cloudevents/spec) is a specification for describing event data in common formats to provide interoperability across services, platforms and systems. + +As of May 2021, EventMesh contains the following major components: `eventmesh-runtime`, `eventmesh-sdk-java` and `eventmesh-connector-rocketmq`. +For a customer to use EventMesh, `eventmesh-runtime` can be deployed as microservices to transmit +customer's events between event producers and consumers. Customer's applications can then interact +with `eventmesh-runtime` using `eventmesh-sdk-java` to publish/subscribe for events on given topics. + +CloudEvents support has been a highly desired feature by EventMesh users. There are many reasons +for users to prefer using a SDK with CloudEvents support: + +- CloudEvents is a more widely accepted and supported way to describe events. `eventmesh-sdk-java` + currently uses the `LiteMessage` structure to describe events, which is less standardized. +- CloudEvents's Java SDK has a wider range of distribution methods. For example, EventMesh users + currently need to use the SDK tarball or build from source for every EventMesh release. With + CloudEvents support, it's easier for users to take a dependency on EventMesh's SDK using CloudEvents's public distributions (e.g. through a Maven configuration). +- CloudEvents's SDK supports multiple languages. Although EventMesh currently only supports a Java SDK, in future if more languages need to be supported, the extensions can be easier with experience on binding Java SDK with CloudEvents. + +## Requirements + +### Functional Requirements + +| Requirement ID | Requirement Description | Comments | +| -------------- | ----------------------- | -------- | +| F-1 | EventMesh users should be able to depend on a public SDK to publish/subscribe events in CloudEvents format | Functionality | +| F-2 | EventMesh users should continue to have access to existing EventMesh client features (e.g. load balancing) with an SDK that supports CloudEvent | Feature Parity | +| F-3 | EventMesh developers should be able to sync `eventmesh-sdk-java` and an SDK with CloudEvents support without much effort/pain | Maintainability | +| F-4 | EventMesh support pluggable protocols for developers integrate other protocols (e.g. CloudEvents\EventMesh Message\OpenMessage\MQTT ...) | Functionality | +| F-5 | EventMesh support the unified api for publish/subscribe events to/from event store | Functionality | + +### Performance Requirements + +| Requirement ID | Requirement Description | Comments | +| -------------- | ----------------------- | -------- | +| P-1 | Client side latency for SDK with CloudEvents support should be similar to current SDK | | + +## Design Details + +Binding with the CloudEvents Java SDK (similar to what Kafka already did, see Reference for more details) +should be an easy way to achieve the requirements. + +### Pluggable Protocols + +![Pluggable Protocols](/images/design-document/cloudevents-pluggable-protocols.png) + +### Process of CloudEvents under EventMesh + +#### For TCP + +##### SDK side for publish + +- add the CloudEvents identifier in `package` header +- use `CloudEventBuilder` build the CloudEvent and put it into the `package` body + +##### SDK side for subscribe + +- add `convert` function under the `ReceiveMsgHook` interface, for converting the `package` body to the specific protocol with the identifier in `package` header +- different protocols should implement the `ReceiveMsgHook` interface + +##### Server side for publish + +- design the protocol convert api contains `decodeMessage` interface which convert the package's body to CloudEvent +- update `Session.upstreamMsg()` in `MessageTransferTask` change the input parameter Message to CloudEvent, the CloudEvent use the last step `decodeMessage` api convert +- update `SessionSender.send()` change the input parameter `Message` to `CloudEvent` +- update `MeshMQProducer` api support send `CloudEvents` in runtime +- support the implementation in `connector-plugin` for send `CloudEvents` to EventStore + +##### Server side for subscribe + +- support change the `RocketMessage` to `CloudEvent` in connector-plugin + +- overwrite the `AsyncMessageListener.consume()` function, change the input parameter `Message` to `CloudEvent` + +- update the `MeshMQPushConsumer.updateOffset()` implementation change the the input parameter `Message` to `CloudEvent` + +- update `DownStreamMsgContext` , change the input parameter `Message` to `CloudEvent`, update the `DownStreamMsgContext.ackMsg` + +#### For HTTP + +##### SDK side for publish + +- support `LiteProducer.publish(cloudEvent)` +- add the CloudEvents identifier in http request header + +##### SDK side for subscribe + +##### Server side for publish + +- support build the `HttpCommand.body` by pluggable protocol plugins according the protocol type in `HttpCommand` header +- support publish the CloudEvent in message processors + +##### Server side for subscribe + +- update the `EventMeshConsumer.subscribe()` + +- update `HandleMsgContext` , change the input parameter `Message` to `CloudEvent` +- update `AsyncHttpPushRequest.tryHTTPRequest()` + +## Appendix + +### References + +- diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md new file mode 100644 index 0000000000..0bacec2808 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/04-event-bridge.md @@ -0,0 +1,156 @@ +# Event Bridge + +![event-bridge](/images/eventmesh-bridge.png) + +Event Bridge 可以支持跨mesh集群的消息投递,下面展示这一功能的详细设计与体验步骤 + +![event-bridge-detail](/images/design-document/event-bridge-detail.png) + +> 注:在本地体验这一功能时需要启动两台eventmesh实例,同时要修改`eventmesh-runtime`目录下的`eventmesh.properties`文件中的端口配置,避免端口冲突。便于下文描述,event-bridge特性按照上图信息进行表述。 + +## 01 远程订阅 + +**描述**:向cluster2 eventmesh发起远程订阅指令,cluster2 eventmesh收到指令后会携带订阅信息调用cluster1 eventmesh的本地订阅接口 + +**URL**: http://{cluster2 address}/eventmesh/subscribe/remote + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------------------------------ | +| url | String | 是 | 标识订阅url信息,暂时无用,后续可移除,目前仅为强校验,实际会被(/eventmesh/bridge/publish)替换 | +| consumerGroup | String | 是 | 标识消费组信息,实际会被cluster2的eventmesh配置信息替换 | +| topic | List | 是 | 标识订阅信息列表 | +| mode | String | 是 | 标识消费模式,分为集群模式和广播模式 | +| topic | String | 是 | 标识订阅的topic | +| type | String | 是 | 标识消费类型,分为同步和异步 | +| remoteMesh | String | 否 | 标识远程mesh地址,优先根据topic从注册中心获取,获取不到使用该字段替换 | + +**请求样例:** + +```json +{ + "url": "http://127.0.0.1:8088/sub/test", + "consumerGroup": "TEST-GROUP", + "topic": [ + { + "mode": "CLUSTERING", + "topic": "TEST-TOPIC-HTTP-ASYNC", + "type": "ASYNC" + } + ], + "remoteMesh" : "http://127.0.0.1:10105/eventmesh/subscribe/local" +} +``` + +## 02 本地订阅 + +**描述**:向cluster2的EventMesh实例发起本地订阅指令,cluster2的EventMesh收到订阅指令后会启动本地监听从event store收下来的消息,并推送给订阅信息中的url。 + +**URL**: http://{cluster2 address}/eventmesh/subscribe/local + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------ | +| url | String | 是 | 标识订阅url信息 | +| consumerGroup | String | 是 | 标识消费组信息 | +| topic | List | 是 | 标识订阅信息列表 | +| mode | String | 是 | 标识消费模式,分为集群模式和广播模式 | +| topic | String | 是 | 标识订阅的topic | +| type | String | 是 | 标识消费类型,分为同步和异步 | + +**请求样例:** + +```JSON +{ + "url": "http://127.0.0.1:8088/sub/test", + "consumerGroup": "TEST-GROUP", + "topic": [ + { + "mode": "CLUSTERING", + "topic": "TEST-TOPIC-HTTP-ASYNC", + "type": "ASYNC" + } + ] +} +``` + +## 03 发送消息 + +**描述**:向cluster1的EventMesh实例发送消息,cluster1的EventMesh收到消息后会发送到event store,再从event store收下来消息推送给cluster2的EventMesh url `/eventmesh/bridge/publish`。 + +**URL**: http://{cluster1 address}/eventmesh/publish/TEST-TOPIC-HTTP-ASYNC + +**请求方式**:POST + +**请求参数:**application/json 格式 + +**请求样例:** + +```json +{ + "name":"test", + "age":"19" +} +``` + +## 04远程去订阅 + +**描述**:向cluster2的EventMesh实例发送去除订阅指令,cluster2的EventMesh收到指令后会发送cluster1的EventMesh,cluster1的EventMesh会本地执行去除订阅 + +**URL**: http://{cluster2 address}/eventmesh/unsubscribe/remote + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ------------------------------------------------------------ | +| url | String | 是 | 标识要去除订阅url信息,暂时无用,后续可移除,目前仅为强校验,实际会被(/eventmesh/bridge/publish)替换 | +| consumerGroup | String | 是 | 标识要去除的消费组信息,实际会使用EventMesh cluster2的group信息替换 | +| topic | List | 是 | 标识订阅topic信息列表 | + +**请求样例:** + +```json +{ + "consumerGroup": "EventMeshTest-consumerGroup", + "url": "http://127.0.0.1:8088/sub/test", + "topic": [ + "TEST-TOPIC-HTTP-ASYNC" + ] +} +``` + +## 05本地去订阅 + +**描述**:向cluster2的EventMesh实例发送去除订阅指令,cluster2的EventMesh收到指令后会本地执行去除订阅 + +**URL**: http://{cluster2 address}/eventmesh/unsubscribe/local + +**请求方式**:POST + +**请求参数:**application/json 格式 + +| 参数名 | 类型 | 是否必填 | 说明 | +| ------------- | ------ | -------- | ---------------------- | +| url | String | 是 | 标识要去除订阅url信息 | +| consumerGroup | String | 是 | 标识要去除的消费组信息 | +| topic | List | 是 | 标识订阅topic信息列表 | + +**请求样例:** + +```json +{ + "consumerGroup": "EventMeshTest-consumerGroup", + "url": "http://127.0.0.1:8088/sub/test", + "topic": [ + "TEST-TOPIC-HTTP-ASYNC" + ] +} +``` diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md new file mode 100644 index 0000000000..84aab72720 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/05-webhook.md @@ -0,0 +1,320 @@ +# Use Webhook to subscribe events +## Webhook usage process + +### The first step: Configure Webhook related information in eventmesh and start + +Configuration: + +``` +# Whether to start the Webhook admin service +eventMesh.webHook.admin.start=true + +# Webhook event configuration storage mode. But currently only supports file and nacos +eventMesh.webHook.operationMode=file + +# The file path of fileMode. If you write #{eventMeshHome}, in the eventMesh root directory +eventMesh.webHook.fileMode.filePath= #{eventMeshHome}/webhook + +# The nacos storage mode. The configuration naming rule is eventMesh.webHook.nacosMode.{nacos native configuration key} For the specific configuration, please see [nacos github api](https://github.com/alibaba/nacos/blob/develop/api/src/main/java /com/alibaba/nacos/api/SystemPropertyKeyConst.java) +## address of nacos +eventMesh.webHook.nacosMode.serverAddr=127.0.0.1:8848 + +# Webhook CloudEvent sending mode. This property is the same as the eventMesh.storage.plugin.type configuration. +eventMesh.webHook.producer.connector=standalone +``` + +### The second step: Add Webhook configuration information + +Configuration information description: + +```java + /** + * The path called by the manufacturer. Manufacturer event call address, [http or https]://[domain or IP]:[port]/webhook/[callbackPath] + * for example: http://127.0.0.1:10504/webhook/test/event , The full url needs to be filled in the manufacturer call input + * callbackPath is the only + */ + private String callbackPath; + + /** + * manufacturer name, like github + */ + private String manufacturerName; + + /** + * manufacturer domain name, like www.github.com + */ + private String manufacturerDomain; + + /** + * Webhook event name, like rep-push + */ + private String manufacturerEventName; + + /** + * http header content type + */ + private String contentType = "application/json"; + + /** + * description of this WebHookConfig + */ + private String description; + + /** + * secret key, for authentication + */ + private String secret; + + /** + * userName, for HTTP authentication + */ + private String userName; + + /** + * password, for HTTP authentication + */ + private String password; + + + /** + * roll out event name, like topic to mq + */ + private String cloudEventName; + + /** + * roll out data format -> CloudEvent serialization mode + * If HTTP protocol is used, the request header contentType needs to be marked + */ + private String dataContentType = "application/json"; + + /** + * id of cloudEvent, like uuid/manufacturerEventId + */ + private String cloudEventIdGenerateMode; + +``` + +#### Add WebHook config + +path: /webhook/insertWebHookConfig + +method: POST + +contentType: application/json + +input params: + +| field | desc | type | necessary | default | +| -- | -- | -- | -- | -- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | manufacturer name | string | Y | null | +| manufacturerDomain | manufacturer domain name | string | Y | null | +| manufacturerEventName | manufacturer event name | string | Y | null | +| contentType | http connettype | string | N | application/json | +| description | configuration instructions | string | N | null | +| secret | signature string | string | N | null | +| userName | username | string | N | null | +| password | password | string | N | null | +| cloudEventName | cloudEvent name | string | Y | null | +| cloudEventIdGenerateMode | cloudEvent event object identification method, uuid or event id | string | N |manufacturerEventId| + +E.g: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github", + "manufacturerDomain":"www.github.com", + "manufacturerEventName":"all", + "cloudEventName":"github-eventmesh" +} +``` + +Output params: 1 for success, 0 for failure + +#### Query WebHook config by callback path + +path: /webhook/queryWebHookConfigById + +method: POST + +contentType: application/json + +input params: + +| field | desc | type | necessary | default | +| -- | -- | -- | -- | -- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | the caller of this callbackPath belongs to | string | Y | null | + +E.g: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github" +} +``` + +Output params: + +| field | desc | type | necessary | default | +| -- | -- | -- | -- | -- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | manufacturer name | string | Y | null | +| manufacturerDomain | manufacturer domain name | string | Y | null | +| manufacturerEventName | manufacturer event name | string | Y | null | +| contentType | http connettype | string | N | application/json | +| description | configuration instructions | string | N | null | +| secret | signature key | string | N | null | +| userName | user name | string | N | null | +| password | password | string | N | null | +| cloudEventName | cloudEvent name | string | Y | null | +| cloudEventIdGenerateMode | cloudEvent event object identification method, uuid or event id | string | N | manufacturerEventId | + +#### Query WebHook config by manufacturer + +path: /webhook/queryWebHookConfigByManufacturer + +method: POST + +contentType: application/json + +input params: + +| field | desc | type | necessary | default | +| -- | -- | -- | -- | -- | +| manufacturerName | manufacturer name | string | Y | null | +| pageNum | page number of paging query | string | Y | null | +| pageSize | page size of each page | string | Y | null | + +E.g: + +```json +{ + "manufacturerName":"github", + "pageNum":1, + "pageSize":2 +} +``` + +Output params: + +| field | desc | type | necessary | default | +| -- | -- | -- | -- | -- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | manufacturer name | string | Y | null | +| manufacturerDomain | manufacturer domain name | string | Y | null | +| manufacturerEventName | manufacturer event name | string | Y | null | +| contentType | http connettype | string | N | application/json | +| description | configuration instructions | string | N | null | +| secret | signature key | string | N | null | +| userName | user name | string | N | null | +| password | password | string | N | null | +| cloudEventName | cloudEvent name | string | Y | null | +| cloudEventIdGenerateMode | cloudEvent event object identification method, uuid or event id | string | N | manufacturerEventId | + +#### Update WebHook config + +path: /webhook/updateWebHookConfig + +method: POST + +contentType: application/json + +input params: + +| field | desc | type | necessary | default | +| ------------------------ | ------------------------------------------------------------ | ------ | --------- | ------------------- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | manufacturer name | string | Y | null | +| manufacturerDomain | manufacturer domain name | string | Y | null | +| manufacturerEventName | manufacturer event name | string | Y | null | +| contentType | http connettype | string | N | application/json | +| description | configuration instructions | string | N | null | +| secret | signature string | string | N | null | +| userName | username | string | N | null | +| password | password | string | N | null | +| cloudEventName | cloudEvent name | string | Y | null | +| cloudEventIdGenerateMode | cloudEvent event object identification method, uuid or event id | string | N | manufacturerEventId | + +E.g: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github", + "manufacturerDomain":"www.github.com", + "manufacturerEventName":"all", + "cloudEventName":"github-eventmesh" +} +``` + +Output params: 1 for success, 0 for failure + +#### Delete WebHook config + +path: /webhook/deleteWebHookConfig + +method: POST + +contentType: application/json + +input params: + +| field | desc | type | necessary | default | +| ---------------- | ------------------------------------------ | ------ | --------- | ------- | +| callbackPath | call address, unique address | string | Y | null | +| manufacturerName | the caller of this callbackPath belongs to | string | Y | null | + +E.g: + +```json +{ + "callbackPath":"/webhook/github/eventmesh/all", + "manufacturerName":"github" +} +``` + +Output params: 1 for success, 0 for failure + +### The third step: Check if the configuration is successful + +1. file storage mode. Please go to the eventMesh.webHook.fileMode.filePath directory to view. The Filename is callbackPath. +2. nacos storage mode. Please go to the nacos service configured by eventMesh.webHook.nacosMode.serverAddr to see this. + +### The fourth step: Configure the consumer of cloudevent + +### The fifth step: Configure Webhook related information in the manufacturer + +> For manufacturer's operation, please refer to [Manufacturer's Webhook operation instructions](#Manufacturer's-Webhook-operation-instructions). + +## Manufacturer's Webhook operation instructions + +### GitHub sign up + +#### The first step: Enter the corresponding project + +#### The second step: click setting + +![](/images/design-document/webhook/webhook-github-setting.png) + +#### The third step: click Webhooks + +![](/images/design-document/webhook/webhook-github-webhooks.png) + +#### The fourth step: Click on Add Webhook + +![](/images/design-document/webhook/webhook-github-add.png) + +#### The fifth step: Fill in the Webhook information + +![](/images/design-document/webhook/webhook-github-info.png) + +Payload URL: EventMesh service address and callbackPath, which must include the protocol header. For example, when the callback address `callbackPath` is `/webhook/github/eventmesh/all`, the Payload URL is `http://www.example.com:10105/webhook/github/eventmesh/all`. + +[http or https]://[domain or IP]:[port]/webhook/[callbackPath] + +Content type: http header content type + +Secret: signature string \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md new file mode 100644 index 0000000000..eff287bad6 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/06-workflow.md @@ -0,0 +1,261 @@ +# EventMesh Workflow + +## Business Problem + +Imaging you are building a simple Order Management System for an E-Commerce Store. The system should be able to receive and provision new orders from a store website. The provisioning process should be able to process all orders, handle payments, as well as process shipments. + +For high availability and high performance, you architect the system using event-driven architecture (EDA), and build microservice apps to handle store frontend, order management, payment processing, and shipment management. You deploy the whole system in a cloud environment. To handle high workloads, you leverage a messaging system to buffer the loads, and scale up multiple instances of microservices. The architecture could look similar to: + +![Workflow Use Case](/images/design-document/workflow-use-case.jpg) + +While each microservice is acting on its own event channels, EventMesh plays a crucial role of doing Event Orchestration. + +We use [CNCF Serverless Workflow](https://serverlessworkflow.io/) to describe this Event Workflow Orchestration. + +## CNCF Serverless Workflow + +CNCF Serverless Workflow defines a vendor-neutral, open-source, and fully community-driven ecosystem for defining and running DSL-based workflows that target the Serverless technology domain. + +Serverless Workflow defines a Domain Specific Language (DSL) to describe stateful and stateless workflow-based orchestrations of serverless functions and microservices. + +More details could be found in its [official github site](https://github.com/serverlessworkflow/specification) + +## EventMesh Workflow + +We leverage Serverless Workflow DSL to describe the EventMesh workflow. Based on its spec, the workflow is consists of a series of workflow states used to describe the control-flow logic. At this time we only support event related workflow states. See the supported states in [Workflow DSL Design](#workflow-dsl-design-wip). + +A `workflow state` can include applicable `actions`, or services/functions that should be invoked during workflow execution. These `actions` can reference reusable `function` definitions which define how these functions/services should be invoked. They can also reference events that trigger event-based service invocations, and events to wait for that denote completion of such event-based service invocation completion. + +In EDA solution, we usually defined our event-driven microservice using AsyncAPI. Serverless workflow `function` definitions support defining invocation semantics using AsyncAPI. See [Using Funtions for AsyncAPI Service](https://github.com/serverlessworkflow/specification/blob/main/specification.md#using-functions-for-async-api-service-invocations) for more information. + +### AsyncAPI + +AsyncAPI is an open source initiative that seeks to improve the current state of Event-Driven Architectures (EDA). +Our long-term goal is to make working with EDAs as easy as it is to work with REST APIs. +That goes from documentation to code generation, discovery to event management. +Most of the processes you apply to your REST APIs nowadays would be applicable to your event-driven/asynchronous APIs too. + +See AsyncAPI detail in the [official site](https://www.asyncapi.com/docs/guides) + +### Workflow Example + +In this example, we build the event-driven workflow of the Order management system above. + +First, we need to define AsyncAPI definitions for our microservice apps. + +- Online Store App + +```yaml +asyncapi: 2.2.0 +info: + title: Online Store application + version: '0.1.0' +channels: + store/order: + subscribe: + operationId: newStoreOrder + message: + $ref : '#/components/NewOrder' + +``` + +- Order Service + +```yaml +asyncapi: 2.2.0 +info: + title: Order Service + version: '0.1.0' +channels: + order/inbound: + publish: + operationId: sendOrder + message: + $ref : '#/components/Order' + order/outbound: + subscribe: + operationId: processedOrder + message: + $ref : '#/components/Order' +``` + +- Payment Service + +```yaml +asyncapi: 2.2.0 +info: + title: Payment Service + version: '0.1.0' +channels: + payment/inbound: + publish: + operationId: sendPayment + message: + $ref : '#/components/OrderPayment' + payment/outbound: + subscribe: + operationId: paymentReceipt + message: + $ref : '#/components/OrderPayment' +``` + +- Shipment Service + +```yaml +asyncapi: 2.2.0 +info: + title: Shipment Service + version: '0.1.0' +channels: + shipment/inbound: + publish: + operationId: sendShipment + message: + $ref : '#/components/OrderShipment' +``` + +Once that is defined, we define the order workflow that describes our Order Management business logic. + +```yaml +id: storeorderworkflow +version: '1.0' +specVersion: '0.8' +name: Store Order Management Workflow +states: + - name: Receive New Order Event + type: event + onEvents: + - eventRefs: + - NewOrderEvent + actions: + - eventRef: + triggerEventRef: OrderServiceSendEvent + resultEventRef: OrderServiceResultEvent + - eventRef: + triggerEventRef: PaymentServiceSendEvent + resultEventRef: PaymentServiceResultEvent + transition: Check Payment Status + - name: Check Payment Status + type: switch + dataConditions: + - name: Payment Successfull + condition: "${ .payment.status == 'success' }" + transition: Send Order Shipment + - name: Payment Denied + condition: "${ .payment.status == 'denied' }" + end: true + defaultCondition: + end: true + - name: Send Order Shipment + type: operation + actions: + - eventRef: + triggerEventRef: ShipmentServiceSendEvent + end: true +events: + - name: NewOrderEvent + source: file://onlineStoreApp.yaml#newStoreOrder + type: asyncapi + kind: consumed + - name: OrderServiceSendEvent + source: file://orderService.yaml#sendOrder + type: asyncapi + kind: produced + - name: OrderServiceResultEvent + source: file://orderService.yaml#processedOrder + type: asyncapi + kind: consumed + - name: PaymentServiceSendEvent + source: file://paymentService.yaml#sendPayment + type: asyncapi + kind: produced + - name: PaymentServiceResultEvent + source: file://paymentService.yaml#paymentReceipt + type: asyncapi + kind: consumed + - name: ShipmentServiceSendEvent + source: file://shipmentService.yaml#sendShipment + type: asyncapi + kind: produced +``` + +The corresponding workflow diagram is the following: + +![Workflow Diagram](/images/design-document/workflow-diagram.png) + +## EventMesh Workflow Engine + +In the following architecture diagram, the EventMesh Catalog, EventMesh Workflow Engine and EventMesh Runtime are running in three different processors. + +![Workflow Architecture](/images/design-document/workflow-architecture.jpg) + +The steps running the workflow is the followings: + +1. Deploy the Publisher and Subscriber Apps in the environment. + Describe the App APIs using AsyncAPI, generate the asyncAPI yaml. + Register the Publisher and Subscriber Apps in EventMesh Catalog using AsyncAPI. + +2. Register the Serverless Workflow DSL in EventMesh Workflow Engine. + +3. EventMesh Workflow Engine query the EventMesh Catalog for Publisher and Subscribers required in Workflow DSL `function` + +4. Event-driven Apps are publish events to EventMesh Runtime to trigger the Workflow. EventMesh Workflow Engine also publish and subscribe events for orchestrating the events. + +### EventMesh Catalog Design + +EventMesh Catalog store the Publisher, Subscriber and Channel metadata. consists of the following modules: + +- AsyncAPI Parser + + Using the SDK provided by AsyncAPI community (see [tool list](https://www.asyncapi.com/docs/community/tooling)), + parse and validated the AsyncAPI yaml inputs, and generate the AsyncAPI definition. + +- Publisher, Channel, Subscriber Modules + + From the AsyncAPI definition store the Publisher, Subscriber and Channel information. + +### EventMesh Workflow Engine Design + +EventMesh Workflow Engine consists of the following modules: + +- Workflow Parser + + Using the SDK provided by Serverless Workflow community (see supported [SDKs](https://github.com/serverlessworkflow/specification#sdks)), + parse and validated the workflow DSL inputs, and generate workflow definition. + +- Workflow Module + + It manages a workflow instance life cycle, from create, start, stop to destroy. + +- State Module + + It manages workflow state life cycle. We support the event-related states, and the supported state list below is Work-in-Progress. + + | Workflow State | Description | + | --- | --- | + | Operation | Execute the AsyncAPI functions defined in the Actions | + | Event | Check if the defined Event matched, if so execute the defined AsyncAPI functions | + | Switch | Check the event is matched with the event-conditions, and execute teh defined AsyncAPI functions | + | Parallel | Execute the defined AsyncAPI functions in parallel | + | ForEach | Iterate the inputCollection and execute the defined AsyncAPI functions | + +- Action Module + + It managed the functions inside the action. + +- Function Module + + It manages the AsyncAPI functions by creating the publisher and/or subscriber in EventMesh Runtime, and manage the publisher/subscriber life cycle. + + | AsyncAPI Operation | EventMesh Runtime | + | --- | --- | + | Publish | Publisher | + | Subscribe | Subscriber | + +- Event Module + + It manages the CloudEvents data model, including event filter, correlation and transformation using the rules defined in the workflow DSL. + +- Retry Module + + It manages the retry logic of the event publishing into EventMesh Runtime. diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md new file mode 100644 index 0000000000..a8e722da20 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/07-knative-connector.md @@ -0,0 +1,93 @@ +# Knative Connector + +## Prerequisite +### Create Knative Source and Sink +We use the *cloudevents-player* [Knative service](https://knative.dev/docs/serving/) as an example. If you do not know how to create *cloudevents-player* Knative service as source and sink, please follow the steps in this [link](https://knative.dev/docs/getting-started/first-source/#creating-your-first-source). + + +### Set up EventMesh Configuration +- Add the following lines to [eventmesh-starter/build.gradle](https://github.com/apache/eventmesh/blob/master/eventmesh-starter/build.gradle) file. +``` +plugins { + id 'application' +} + +application { + mainClass = project.hasProperty("mainClass") ? project.getProperty("mainClass") : 'org.apache.eventmesh.starter.StartUp' + applicationDefaultJvmArgs = [ + '-Dlog4j.configurationFile=../eventmesh-runtime/conf/log4j2.xml', '-Deventmesh.log.home=../eventmesh-runtime/logs', '-Deventmesh.home=../eventmesh-runtime', '-DconfPath=../eventmesh-runtime/conf' + ] +} + +dependencies { + implementation project(":eventmesh-connector-plugin:eventmesh-connector-knative") + implementation project(":eventmesh-runtime") +} +``` +- Add the following lines to [eventmesh-examples/build.gradle](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/build.gradle) file. +``` +plugins { + id 'application' +} + +application { + mainClass = project.hasProperty("mainClass") ? project.getProperty("mainClass") : 'NULL' +} +``` +- Set ```eventMesh.connector.plugin.type=knative``` in [eventmesh-runtime/conf/eventmesh.properties](https://github.com/apache/eventmesh/blob/master/eventmesh-runtime/conf/eventmesh.properties) file. + +## Demo +### Publish an Event Message from Knative and Subscribe from EventMesh +#### Step 1: Start an Eventmesh-Runtime Server +```bash +$ cd eventmesh-starter +$ ../gradlew -PmainClass=org.apache.eventmesh.starter.StartUp run +``` + +#### Step 2: Publish an Event Message from Knative +```bash +$ curl -i http://cloudevents-player.default.127.0.0.1.sslip.io -H "Content-Type: application/json" -H "Ce-Id: 123456789" -H "Ce-Specversion: 1.0" -H "Ce-Type: some-type" -H "Ce-Source: command-line" -d '{"msg":"Hello CloudEvents!"}' +``` + +#### Step 3: Subscribe from an EventMesh +- Set ```public static final String EVENTMESH_HTTP_ASYNC_TEST_TOPIC = "messages";``` in [ExampleConstants.java](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/common/ExampleConstants.java) file. +```bash +$ cd eventmesh-examples +$ ../gradlew -PmainClass=org.apache.eventmesh.http.demo.sub.SpringBootDemoApplication run +``` + +#### Expected Result +The following message with ```data``` field as ```Hello CloudEvents!``` will be printed on the console of EventMesh server. +```bash +2022-09-05 16:37:58,237 INFO [eventMesh-clientManage-] DefaultConsumer(DefaultConsumer.java:60) - \ +[{"event":{"attributes":{"datacontenttype":"application/json","id":"123456789","mediaType":"application/json",\ +"source":"command-line","specversion":"1.0","type":"some-type"},"data":{"msg":"Hello CloudEvents!"},"extensions":{}},\ +"id":"123456789","receivedAt":"2022-09-05T10:37:49.537658+02:00[Europe/Madrid]","type":"RECEIVED"}] +``` + +### Publish an Event Message from EventMesh and Subscribe from Knative +#### Step 1: Start an Eventmesh-Runtime Server +```bash +$ cd eventmesh-starter +$ ../gradlew -PmainClass=org.apache.eventmesh.starter.StartUp run +``` + +#### Step 2: Publish an Event Message from EventMesh +We use a test program to demo this function. +```bash +$ cd eventmesh-connector-plugin/eventmesh-connector-knative +$ ../../gradlew clean test --tests KnativeProducerImplTest.testPublish +``` + +#### Step 3: Subscribe from Knative +```bash +$ curl http://cloudevents-player.default.127.0.0.1.sslip.io/messages +``` + +#### Expected Result +The following message with ```data``` field as ```Hello Knative from EventMesh!``` will be printed on the console of EventMesh server. +```bash +2022-09-05 16:52:41,633 INFO [eventMesh-clientManage-] DefaultConsumer(DefaultConsumer.java:60) - \ +[{"event":{"attributes":{"datacontenttype":"application/json","id":"1234","mediaType":"application/json",\ +"source":"java-client","specversion":"1.0","type":"some-type"},"data":{"msg":["Hello Knative from EventMesh!"]},"extensions":{}},"id":"1234","receivedAt":"2022-09-05T10:52:32.999273+02:00[Europe/Madrid]","type":"RECEIVED"}] +``` \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json new file mode 100644 index 0000000000..7706f45770 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-event-handling-and-integration/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Event Handling and Integration", + "collapsed": false +} diff --git a/versioned_docs/version-v1.10.0/design-document/01-spi.md b/versioned_docs/version-v1.10.0/design-document/01-spi.md new file mode 100644 index 0000000000..94da666e7f --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/01-spi.md @@ -0,0 +1,113 @@ +# Service Provider Interface + +## Introduction + +In order to improve scalability,EventMesh introduce the SPI(Service Provider Interface)mechanism, which can help to automatically find the concrete implementation +class of the extended interface at runtime and load it dynamically. In EventMesh, all extension modules are implemented by using plugin. +User can develop custom plugins by simply implementing extended interfaces, and select the plugin to be run at runtime by simply declare at configuration. + +## eventmesh-spi module + +The implementation of SPI is at eventmesh-spi module, there are three main classes `EventMeshSPI`, `EventMeshExtensionFactory` and `ExtensionClassLoader`. + +### EventMeshSPI + +EventMeshSPI is an SPI declaration annotation, all extended interface that want to be implemented should be declared by @EventMeshSPI. + +```java +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.TYPE}) +public @interface EventMeshSPI { + /** + * If true, the spi instance is singleton + */ + boolean isSingleton() default false; +} +``` + +Use annotation to declare the interface is an SPI extended interface can improve the readability of the code. +On the other hand, @EventMeshSPI contains a isSingleton attribute which used to declare whether the extension instance is a singleton. +If this attribute is true, that means the instance of this interface will be singleton. + +### EventMeshExtensionFactory + +EventMeshExtensionFactory is a factory used to get the SPI extension instance which contains a static method `getExtension(Class extensionType, String extensionName)`. + +```java +public enum EventMeshExtensionFactory { + /** + * @param extensionType extension plugin class type + * @param extensionName extension instance name + * @param the type of the plugin + * @return plugin instance + */ + public static T getExtension(Class extensionType, String extensionName) { + /* ... */ + } +} +``` + +If you want to get the extension instance, you should use EventMeshExtensionFactory. + +### ExtensionClassLoader + +ExtensionClassLoader is used to load extension instance classed, it has two subclass MetaInfExtensionClassLoader and JarExtensionClassLoader. + +```java +/** + * Load extension class + *
    + *
  • {@link MetaInfExtensionClassLoader}
  • + *
  • {@link JarExtensionClassLoader}
  • + *
+ */ +public interface ExtensionClassLoader { + /** + * load + * + * @param extensionType extension type class + * @param extension type + * @return extension instance name to extension instance class + */ + Map> loadExtensionClass(Class extensionType); +} +``` + +MetaInfExtensionClassLoader used to load class from classPath, and JarExtensionClassLoader used to load class from extension jar on the plugin directory. +In the future, we might support the implementation to load from the maven repository. + +## SPI use case + +The following is an example of how to use the SPI to declare a plugin. + +First, we create an eventmesh-connector-api module, and define the extension interface MeshMQProducer, and we use @EventMeshSPI on the MeshMQProducer, +which indicates the MeshMQProducer is an SPI interface. + +```java +@EventMeshSPI(isSingleton = false) +public interface MeshMQProducer extends Producer { + /* ... */ +} +``` + +Then we create an eventmesh-connector-rocketmq module, which contains the concrete implementation named RocketMQProducerImpl. + +```java +public class RocketMQProducerImpl implements MeshMQProducer { + /* ... */ +} +``` + +At the same time, we need to create a file with the full qualified name of the SPI interface under the resource/META-INF/eventmesh directory +in the eventmesh-connector-rocketmq module. + +org.apache.eventmesh.api.producer.Producer + +The content of the file is the extension instance name and the corresponding instance full class name + +```properties +rocketmq=org.apache.eventmesh.connector.rocketmq.producer.RocketMQProducerImpl +``` + +At this point, an SPI expansion module is complete. We can use `EventMeshExtensionFactory.getExtension(MeshMQProducer.class, "rocketmq")` to get the `RocketMQProducerImpl` instance. diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md b/versioned_docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md new file mode 100644 index 0000000000..0fb8dbb5da --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/01-metrics-export.md @@ -0,0 +1,47 @@ +# EventMesh Metrics (OpenTelemetry and Prometheus) + +## Introduction + +[EventMesh](https://github.com/apache/eventmesh) is a dynamic cloud-native eventing infrastructure. + +## An overview of OpenTelemetry + +OpenTelemetry is a collection of tools, APIs, and SDKs. You can use it to instrument, generate, collect, and export telemetry data (metrics, logs, and traces) for analysis in order to understand your software's performance and behavior. + +## An overview of Prometheus + +Power your metrics and alerting with a leading open-source monitoring solution. + +- Dimensional data +- Powerful queries +- Great visualization +- Efficient storage +- Simple operation +- Precise alerting +- Many client libraries +- Many integrations + +## Requirements + +### Functional Requirements + +| Requirement ID | Requirement Description | Comments | +| :------------- | ------------------------------------------------------------ | ------------- | +| F-1 | EventMesh users should be able to observe HTTP metrics from Prometheus | Functionality | +| F-2 | EventMesh users should be able to observe TCP metrics from Prometheus | Functionality | + +## Design Details + +use the meter instrument provided by OpenTelemetry to observe the metrics exist in EventMesh then export to Prometheus. + +1、Initialize a meter instrument + +2、set the Prometheus server + +3、different metrics observer built + +## Appendix + +### References + + diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/02-tracing.md b/versioned_docs/version-v1.10.0/design-document/02-observability/02-tracing.md new file mode 100644 index 0000000000..1b191965da --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/02-tracing.md @@ -0,0 +1,87 @@ +# Distributed Tracing + +## Overview of OpenTelemetry + +OpenTelemetry is a collection of tools, APIs, and SDKs. You can use it to instrument, generate, collect, and export telemetry data (metrics, logs, and traces) for analysis in order to understand your software's performance and behavior. + +## Requirements + +- set tracer +- different exporter +- start and end span in server + +## Design Details + +- SpanProcessor: BatchSpanProcessor + +- Exporter: log(default), would be changed from properties + +```java +// Configure the batch spans processor. This span processor exports span in batches. +BatchSpanProcessor batchSpansProcessor = + BatchSpanProcessor.builder(exporter) + .setMaxExportBatchSize(512) // set the maximum batch size to use + .setMaxQueueSize(2048) // set the queue size. This must be >= the export batch size + .setExporterTimeout( + 30, TimeUnit.SECONDS) // set the max amount of time an export can run before getting + // interrupted + .setScheduleDelay(5, TimeUnit.SECONDS) // set time between two different exports + .build(); +OpenTelemetrySdk.builder() + .setTracerProvider( + SdkTracerProvider.builder().addSpanProcessor(batchSpansProcessor).build()) + .build(); +``` + +1. When using the method 'init()' of the class "EventMeshHTTPServer", the class "AbstractHTTPServer” will get the tracer + +```java +super.openTelemetryTraceFactory = new OpenTelemetryTraceFactory(eventMeshHttpConfiguration); +super.tracer = openTelemetryTraceFactory.getTracer(this.getClass().toString()); +super.textMapPropagator = openTelemetryTraceFactory.getTextMapPropagator(); +``` + +2. then the trace in class "AbstractHTTPServer” will work. + +## Problems + +### How to set different exporter in class 'OpenTelemetryTraceFactory'? (Solved) + +After I get the exporter type from properties, how to deal with it. + +The 'logExporter' only needs to new it. + +But the 'zipkinExporter' needs to new and use the "getZipkinExporter()" method. + +## Solutions + +### Solution of different exporter + +Use reflection to get an exporter. + +First of all, different exporter must implement the interface 'EventMeshExporter'. + +Then we get the exporter name from the configuration and reflect to the class. + +```java +//different spanExporter +String exporterName = configuration.eventMeshTraceExporterType; +//use reflection to get spanExporter +String className = String.format("org.apache.eventmesh.runtime.exporter.%sExporter",exporterName); +EventMeshExporter eventMeshExporter = (EventMeshExporter) Class.forName(className).newInstance(); +spanExporter = eventMeshExporter.getSpanExporter(configuration); +``` + +Additional, this will surround with try catch.If the specified exporter cannot be obtained successfully, the default exporter log will be used instead + +#### Improvement of different exporter + +SPI (To be completed) + +## Appendix + +### References + + + + diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/03-prometheus.md b/versioned_docs/version-v1.10.0/design-document/02-observability/03-prometheus.md new file mode 100644 index 0000000000..1bc6e0e554 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/03-prometheus.md @@ -0,0 +1,24 @@ +# Observe Metrics with Prometheus + +## Prometheus + +[Prometheus](https://prometheus.io/docs/introduction/overview/) is an open-source system monitoring and alerting toolkit that collects and stores the metrics as time-series data. EventMesh exposes a collection of metrics data that could be scraped and analyzed by Prometheus. Please follow [the "First steps with Prometheus" tutorial](https://prometheus.io/docs/introduction/first_steps/) to download and install the latest release of Prometheus. + +## Edit Prometheus Configuration + +The `eventmesh-runtime/conf/prometheus.yml` configuration file specifies the port of the metrics HTTP endpoint. The default metrics port is `19090`. + +```properties +eventMesh.metrics.prometheus.port=19090 +``` + +Please refer to [the Prometheus configuration guide](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config) to add the EventMesh metrics as a scrape target in the configuration file. Here's the minimum configuration that creates a job with the name `eventmesh` and the endpoint `http://localhost:19090`: + +```yaml +scrape_configs: + - job_name: "eventmesh" + static_configs: + - targets: ["localhost:19090"] +``` + +Please navigate to the Prometheus dashboard (e.g. `http://localhost:9090`) to view the list of metrics exported by EventMesh, which are prefixed with `eventmesh_`. diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/04-zipkin.md b/versioned_docs/version-v1.10.0/design-document/02-observability/04-zipkin.md new file mode 100644 index 0000000000..f4d07252d7 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/04-zipkin.md @@ -0,0 +1,38 @@ +# Collect Trace with Zipkin + +## Zipkin + +Distributed tracing is a method used to profile and monitor applications built with microservices architecture. Distributed tracing helps pinpoint where failures occur and what causes poor performance. + +[Zipkin](https://zipkin.io) is a distributed tracing system that helps collect timing data needed to troubleshoot latency problems in service architectures. EventMesh exposes a collection of trace data that could be collected and analyzed by Zipkin. Please follow [the "Zipkin Quickstart" tutorial](https://zipkin.io/pages/quickstart.html) to download and install the latest release of Zipkin. + +## Configuration + +To enable the trace exporter of EventMesh Runtime, set the `eventMesh.server.trace.enabled` field in the `conf/eventmesh.properties` file to `true`. + +```conf +# Trace plugin +eventMesh.server.trace.enabled=true +eventMesh.trace.plugin=zipkin +``` + +To customize the behavior of the trace exporter such as timeout or export interval, edit the `exporter.properties` file. + +```conf +# Set the maximum batch size to use +eventmesh.trace.max.export.size=512 +# Set the queue size. This must be >= the export batch size +eventmesh.trace.max.queue.size=2048 +# Set the max amount of time an export can run before getting(TimeUnit=SECONDS) +eventmesh.trace.export.timeout=30 +# Set time between two different exports (TimeUnit=SECONDS) +eventmesh.trace.export.interval=5 +``` + +To send the exported trace data to Zipkin, edit the `eventmesh.trace.zipkin.ip` and `eventmesh.trace.zipkin.port` fields in the `conf/zipkin.properties` file to match the configuration of the Zipkin server. + +```conf +# Zipkin's IP and Port +eventmesh.trace.zipkin.ip=localhost +eventmesh.trace.zipkin.port=9411 +``` diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/05-jaeger.md b/versioned_docs/version-v1.10.0/design-document/02-observability/05-jaeger.md new file mode 100644 index 0000000000..e9452c3554 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/05-jaeger.md @@ -0,0 +1,44 @@ +# Collect Trace with Jaeger + +## Jaeger + +[Jaeger](https://www.jaegertracing.io/), inspired by [Dapper](https://research.google.com/pubs/pub36356.html) and [OpenZipkin](https://zipkin.io/), is a distributed tracing platform created by [Uber Technologies](https://uber.github.io/) and donated to [Cloud Native Computing Foundation](https://cncf.io/). It can be used for monitoring microservices-based distributed systems. + +For the installation of Jaeger, you can refer to the [official documentation](https://www.jaegertracing.io/docs/latest/getting-started/) of Jaeger. It is recommended to use docker image `jaegertracing/all-in-one` to quickly build the environment for testing. + +## Configuration + +To enable the trace exporter of EventMesh Runtime, set the `eventMesh.server.trace.enabled` field in the `conf/eventmesh.properties` file to `true`. + +```conf +# Trace plugin +eventMesh.server.trace.enabled=true +eventMesh.trace.plugin=jaeger +``` + +To customize the behavior of the trace exporter such as timeout or export interval, edit the `exporter.properties` file. + +```conf +# Set the maximum batch size to use +eventmesh.trace.max.export.size=512 +# Set the queue size. This must be >= the export batch size +eventmesh.trace.max.queue.size=2048 +# Set the max amount of time an export can run before getting(TimeUnit=SECONDS) +eventmesh.trace.export.timeout=30 +# Set time between two different exports (TimeUnit=SECONDS) +eventmesh.trace.export.interval=5 +``` + +To send the exported trace data to Jaeger, edit the `eventmesh.trace.jaeger.ip` and `eventmesh.trace.jaeger.port` fields in the `conf/jaeger.properties` file to match the configuration of the Jaeger server. + +```conf +# Jaeger's IP and Port +eventmesh.trace.jaeger.ip=localhost +eventmesh.trace.jaeger.port=14250 +``` + +## Migrating from Zipkin + +Collector service exposes Zipkin compatible REST API `/api/v1/spans` which accepts both Thrift and JSON. Also there is `/api/v2/spans` for JSON and Proto. + +So you can also use the `eventmesh-trace-zipkin` plugin to collect trace with Jaeger. Please refer to the `eventmesh-trace-zipkin` documentation for the specific configuration. By default this feature in Jaeger is disabled. It can be enabled with `--collector.zipkin.host-port=:9411`. \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/design-document/02-observability/_category_.json b/versioned_docs/version-v1.10.0/design-document/02-observability/_category_.json new file mode 100644 index 0000000000..9a251b1a4d --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-observability/_category_.json @@ -0,0 +1,4 @@ +{ + "label": "Observability", + "collapsed": false +} diff --git a/versioned_docs/version-v1.10.0/design-document/02-stream.md b/versioned_docs/version-v1.10.0/design-document/02-stream.md new file mode 100644 index 0000000000..7f2534a5d4 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/02-stream.md @@ -0,0 +1,118 @@ +# EventMesh Stream + +## Overview of Event Streaming + +Event Streaming is an implementation of Pub/Sub architecture pattern,it consist of + +- Message or Event: Change of State. + +- Topic: Partition in messaging middle ware broker. + +- Consumer: Can subscribe to read events from broker topic. + +- Producer: Generate events + +Streaming of event is continuous flow of events in order to maintain order between events, events flow should be in a specific direction means from producers to consumers. + +## Requirements + +### Functional Requirements + +| Requirement ID | Requirement Description | Comments | +| -------------- | ----------------------- | -------- | +| F-1 | EventMesh users should be able to achieve Event Streaming functionality in EventMesh | Functionality | +| F-2 | EventMesh users can apply dynamic user specific logics for routing, filter, transformation etc | Functionality | + +## Design Details + +We are introduce EventMesh Stream component allow us to use programming model and binder abstractions +from Spring Cloud Stream natively within Apache Camel. + +[Spring-Cloud-Stream](https://spring.io/projects/spring-cloud-stream) Spring Cloud Stream is a framework for building +highly scalable event-driven microservices connected with shared messaging systems. + +[Apache Camel](https://camel.apache.org/) Camel is an Open Source integration framework that empowers you to quickly +and easily integrate various systems consuming or producing data. + +## Architecture + +![Stream Architecture](/images/design-document/stream-architecture.png) + +## Design + +### EventMesh-Stream Component + +- Event +- Event Channel +- Event EndPoint +- Event Pipes & Filters +- Event Routes +- Event Converter + +#### Event + +> A event is the smallest unit for transmitting data in system. It structure divided into headers, body and attachments. + +#### Event Channel + +> A event channel is a logical channel in system, we are achieving by Spring Cloud Stream programming model, it has abstract functionality around messaging channels(As of now Spring `MessageChannel`). + +#### Event EndPoint + +> A event endpoint is the interface between an application and a messaging system. We can define two types of endpoint + +- Consumer endpoint - Appears at start of a route and read incoming events from an incoming channel. +- Producer endpoint - Appears at end of a route and write incoming events to an outgoing channel. + +#### Event Pipes & Filters + +> We can construct a route by creating chain of filters( Apache Camel `Processor`), where the output of one filter is fed into input for next filter in the pipeline. +The main advantage of the pipeline is that you can create complex event processing logic. + +#### Event Routes + +> A event router, is a type of filter on consumer and redirect them to the appropriate target endpoint based on a decision criteria. + +#### Event Converter + +> The event converter that modifies the contents of a event, translating it to a different format(i.e cloudevents -> Event (Camel) -> Binder Message(Spring Message) and vice versa). + +## EventMesh-Stream Component Interfaces + +### Component + +Component interface is the primary entry point, you can use Component object as a factory to create EndPoint objects. + +![Stream Component Interface](/images/design-document/stream-component-interface.png) + +### EndPoint + +EndPoint which is act as factories for creating Consumer, Producer and Event objects. + +- `createConsumer()` — Creates a consumer endpoint, which represents the source endpoint at the beginning of a route. +- `createProducer()` — Creates a producer endpoint, which represents the target endpoint at the end of a route. + +![Stream Component Routes](/images/design-document/stream-component-routes.png) + +#### Producer + +User can create following types of producer +> Synchronous Producer-Processing thread blocks until the producer has finished the event processing. + +![Stream Sync Producer](/images/design-document/stream-sync-producer.png) + +In future Producer Types: + +- Asynchronous Producer - Producer process the event in a sub-thread. + +#### Consumer + +User can create following types of consumer +> Event-driven consumer-the processing of an incoming request is initiated when message binder call a method in consumer. + +![Stream Event-Driven Consumer](/images/design-document/stream-event-driven-consumer.png) + +In the Future + +- Scheduled poll consumer +- Custom polling consumer diff --git a/versioned_docs/version-v1.10.0/design-document/03-schema-registry.md b/versioned_docs/version-v1.10.0/design-document/03-schema-registry.md new file mode 100644 index 0000000000..7c5d228260 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/03-schema-registry.md @@ -0,0 +1,134 @@ +# EventMesh Schema Registry (OpenSchema) + +## Overview of Schema and Schema Registry + +### Schema + +A Schema stands for the description of serialization instances(string/stream/file/...) and has two properties. First, it is also in the format of serialization type. Second, it defines what requirements such serialized instances should satisfy. + +Besides describing a serialization instance, a Schema may also be used for validating whether an instance is legitimate. The reason is that it defines the ```type```(and other properties) of a serialized instance and inside keys. Taking JSON Schema for example, it could not only be referred when describing a JSON string, but also be used for validating whether a string satisfies properties defined in the schema[[1]](#References). + +Commonly, there are JSON Schema, Protobuf Schema, and Avro Schema. + +### Schema Registry + +Schema Registry is a server provides RESTful interfaces. It could receive and store Schemas from clients, as well as provide intrefaces for other clients to retrieve Schemas from it. + +It could be applied to validation process and (de-)serialization process. + +### Comparison of Schema Registry in Different Projects + +Project | Application +:---: | :--- +EMQ[[2]](#References) | Mainly in (de-)serialization process. Use "Schema Registry" and "Rule Matching" to transfer a message from one serialization format to another. +Pulsar[[3]](#References) | Mainly in validation process. Use "Schema Registry" to validate a message. +Confluentinc[[4]](#References) | In both validation and (de-)serialization process. + +## Overview of OpenSchema + +OpenSchema[[5]](#References) proposes a specification for data schema when exchanging the message and event in more and more modern cloud-native applications. It designs a RESTful interface for storing and retrieving such as Avro, JSON Schema, and Protobuf3 schemas from three aspects(subject/schema/compatibility). + +## Requirements(Goals) + +| Requirement ID | Requirement Description | Comments | +| :------------- | ------------------------------------------------------------ | ------------- | +| F-1 | In transmission, no message needs to contain schema information which bring efficiency. | Functionality | +| F-2 | The message content from producer could be validated whether serialized correctly according to schema. | Functionality | + +## Design Details + +### Architecture + +![OpenSchema](/images/design-document/schema-registry-architecture.png) + +### Process of Transferring Messages under Schema Registry + +![Process](/images/design-document/schema-registry-process.jpg) + +The highlevel process of messages transmission contains 10 steps as follows: + +- 1: Consumer subscribes "TOPIC" messages from EventMesh. +- 2: Producer registers a schema to EventMesh. +- 3: EventMesh registers a schema to Schema Registry. +- 4: Schema Registry returns the id of newly created schema; EventMesh caches such id and schema. +- 5: EventMesh returns the id of schema to Producer. +- 6: Producer patches the id in front of messages and send messages to EventMesh. +- 7: EventMesh validates the messages in the entry port and send it to EventStore; EventMesh retrieves messages from EventStore. +- 8: EventMesh unpatches the id and send it to Schema Registry(if such `` does not exists in local cache). +- 9: Schema Registry returns schema and EventMesh caches it. +- 10: EventMesh patches schema in front of messages and push it to consumer. + +## Current Progress + +### Status + +**Current state**: Developing + +**Discussion thread**: ISSUE #339 + +### Proposed Changes + +The proposal has two aspects. + +First is a separated Open Schema Registry, which includes storage and compatibility check for schema. +This proposal is under developing. + +Second is the integration of Open Schema in Eventmesh, which includes validation for schema. This proposal is to be developed. + +As for the first proposal, some developing statuses are as follows. + +#### Status Code and Exception Code + +No. | Status Code | Exception Code | Description | status +--- | :---: | :---: | :---: | :---: +1 | 401 | 40101 | Unauthorized Exception | ✔ +2 | 404 | 40401 | Schema Non- Exception | ✔ +3 | ^ | 40402 | Subject Non-exist Exception | ✔ +4 | ^ | 40403 | Version Non-exist Exception | ✔ +5 | 409 | 40901 | Compatibility Exception | ✔ +6 | 422 | 42201 | Schema Format Exception | ✔ +7 | ^ | 42202 | Subject Format Exception | ✔ +8 | ^ | 42203 | Version Format Exception | ✔ +9 | ^ | 42204 | Compatibility Format Exception | ✔ +10 | 500 | 50001 | Storage Service Exception | ✔ +11 | ^ | 50002 | Timeout Exception | ✔ + +#### API Development Status + +No. | Type | URL | response | exception | code | test +--- | --- | --- | --- | --- | --- | --- +1 | GET | /schemas/ids/{string: id} | `Schema.class` | 40101\40401\50001 | ✔ | ❌ +2 | GET | /schemas/ids/{string: id}/subjects | `SubjectAndVersion.class` | 40101\40401\50001 | ✔ | ❌ +3 | GET | /subjects | `List\` | 40101\50001 | ✔ | ❌ +4 | GET | /subjects/{string: subject}/versions | `List\` | 40101\40402\50001 | ✔ | ❌ +5 | DELETE | /subjects/(string: subject) | `List\` | 40101\40402\50001 | ✔ | ❌ +6 | GET | /subjects/(string: subject) | `Subject.class` | 40101\40402\50001 | ✔ | ❌ +7 | GET | /subjects/(string: subject)/versions/(version: version)/schema | `SubjectWithSchema.class` | 40101\40402\40403\50001 | ✔ | ❌ +8 | POST | /subjects/(string: subject)/versions | `SchemaIdResponse.class` | 40101\40901\42201\50001\50002 | - | ❌ +9 | POST | /subjects/(string: subject)/ | `Subject.class` | 40101\40901\42202\50001\50002 | ✔ | ❌ +10 | DELETE | /subjects/(string: subject)/versions/(version: version) | `int` | 40101\40402\40403\40901\50001| - | ❌ +11 | POST | /compatibility/subjects/(string: subject)/versions/(version: version) | `CompatibilityResultResponse.class` | 40101\40402\40403\42201\42203\50001| - | ❌ +12 | GET | /compatibility/(string: subject) | `Compatibility.class` | 40101\40402\50001 | ✔ | ❌ +13 | PUT | /compatibility/(string: subject) | `Compatibility.class` | 40101\40402\40901\42204\50001 | - | ❌ + +#### Overall Project Structure + +```SchemaController.java```+```SchemaService.java``` : ```OpenSchema 7.1.1~7.1.2 (API 1~2)``` + +```SubjectController.java```+```SubjectService.java``` : ```OpenSchema 7.2.1~7.2.8 (API 3~10)``` + +```CompatibilityController.java```+```CompatibilityService.java``` : ```OpenSchema 7.3.1~7.3.3 (API 11~13)``` + ```Check for Compatibility``` + +![Project Structure](/images/design-document/schema-registry-project-structure.png) + +## References + +[1] [schema validator (github.com)](https://github.com/search?q=schema+validator) + +[2] [EMQ: Schema Registry](https://www.jianshu.com/p/33e0655c642b) + +[3] [Pulsar: Schema Registry](https://mp.weixin.qq.com/s/PaB66-Si00cX80py5ig5Mw) + +[4] [confluentinc/schema-registry](https://github.com/confluentinc/schema-registry) + +[5] [openmessaging/openschema](https://github.com/openmessaging/openschema) diff --git a/versioned_docs/version-v1.10.0/design-document/_category_.json b/versioned_docs/version-v1.10.0/design-document/_category_.json new file mode 100644 index 0000000000..f9283e2f00 --- /dev/null +++ b/versioned_docs/version-v1.10.0/design-document/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 6, + "label": "Design Document", + "collapsed": false +} diff --git a/versioned_docs/version-v1.10.0/instruction/00-eclipse.md b/versioned_docs/version-v1.10.0/instruction/00-eclipse.md new file mode 100644 index 0000000000..7ef27d5358 --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/00-eclipse.md @@ -0,0 +1,42 @@ +# Import EventMesh into eclipse + +We recommend using `Intellij IDEA` for development, if you wish to use `Eclipse`, you can refer to the following steps to import the project. + +### 1. Dependencies + +``` +64-bit JDK 1.8+. +Gradle is at least 7.0, 7.0.* recommended; +eclipse installed gradle plugin or eclipse comes with gradle plugin; +``` + +### 2. Download source code + +```shell +git@github.com:apache/eventmesh.git +``` + +### 3. Project compile eclipse environment + +Open a command line terminal and run `gradlew cleanEclipse eclipse` + +### 4. Configuration changes + +Modify the project name to match the `settings.gradle` configuration file parameter `rootProject.name`. + +### 5. Modify `eclipse.init` configuration file, configure lombok to 1.18.8 version for example + +``` +-javaagent:lombok-1.18.8.jar +-XBootclasspath/a:lombok-1.18.8.jar +``` + +### 6. 202106 version, `eclipse.init` add configuration parameters + +``` +--illegal-access=permit +``` + +### 7. Import projetc + +Open eclipse, import gradle project to IDE. \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/instruction/01-store.md b/versioned_docs/version-v1.10.0/instruction/01-store.md new file mode 100644 index 0000000000..0abf341598 --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/01-store.md @@ -0,0 +1,46 @@ +# EventMesh Store + +## 1 Dependencies + +```text +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.* +4g+ available disk to deploy eventmesh-store +If you choose standalone mode, you could skip this file and go to the next step: Start Eventmesh-Runtime; if not, you could choose RocketMQ as the store layer. +``` + +## 2 Download + +Download the Binary code (recommended: 4.9.*) from [RocketMQ Official](https://rocketmq.apache.org/download/). Here we take 4.9.4 as an example. + +``` +unzip rocketmq-all-4.9.4-bin-release.zip +cd rocketmq-4.9.4/ +``` + +![rocketmq_1](/images/install/rocketmq_1.png) + +### 3 Start + +Start Name Server: + +```console +nohup sh bin/mqnamesrv & +tail -f ~/logs/rocketmqlogs/namesrv.log +``` + +![rocketmq_2](/images/install/rocketmq_2.png) + +Start Broker: + +```console +nohup sh bin/mqbroker -n localhost:9876 & +tail -f ~/logs/rocketmqlogs/broker.log +``` + +The deployment of eventmesh-store has finished, please go to the next step: [Start Eventmesh-Runtime](03-runtime.md) + +## Reference + +For more details about RocketMQ, please refer to \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/instruction/02-store-with-docker.md b/versioned_docs/version-v1.10.0/instruction/02-store-with-docker.md new file mode 100644 index 0000000000..b728b3cca0 --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/02-store-with-docker.md @@ -0,0 +1,73 @@ +# EventMesh Store with Docker + +If you choose standalone mode, you could skip this file and go to the next step: Start Eventmesh-Runtime; if not, you could choose RocketMQ as the store layer. + +## 1. Dependencies + +``` +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.* +4g+ available disk to deploy eventmesh-store +``` + +## 2. Deploy + +### 2.1 Pull Images + +Pull RocketMQ image from Docker Hub: + +```shell +#Pull rocketmq image +sudo docker pull apache/rocketmq:4.9.4 +``` + +You can list and view existing local mirrors with the following command: + +```shell +sudo docker images +``` + +If the terminal displays the image information as shown below, the EventMesh image has been successfully downloaded locally. + +```shell +REPOSITORY TAG IMAGE ID CREATED SIZE +apache/rocketmq 4.9.4 a2a50ca263c3 13 months ago 548MB +``` + +![rocketmq_docker_1](/images/install/rocketmq_docker_1.png) + +### 2.2 Run Docker + +Run namerv container: + +```shell + +sudo docker run -d -p 9876:9876 \ + -v `pwd`/data/namesrv/logs:/root/logs \ + -v `pwd`/data/namesrv/store:/root/store \ + --name rmqnamesrv \ + apache/rocketmq:4.9.4 \ + sh mqnamesrv + +``` + +Run broker container: + +```shell +sudo docker run -d -p 10911:10911 -p 10909:10909 \ + -v `pwd`/data/broker/logs:/root/logs \ + -v `pwd`/data/broker/store:/root/store \ + --name rmqbroker \ + --link rmqnamesrv:namesrv \ + -e "NAMESRV_ADDR=namesrv:9876" \ + apache/rocketmq:4.9.4 \ + sh mqbroker -c ../conf/broker.conf + +``` + +![rocketmq_docker_2](/images/install/rocketmq_docker_2.png) + +Please note that the **rocketmq-broker ip** is **pod ip**. If you want to modify this ip, you can set it your custom value in **broker.conf**。 + +By now, the deployment of eventmesh-store has finished, please go to the next step: [Start Eventmesh-Runtime Using Docker](04-runtime-with-docker.md) diff --git a/versioned_docs/version-v1.10.0/instruction/03-runtime.md b/versioned_docs/version-v1.10.0/instruction/03-runtime.md new file mode 100644 index 0000000000..0b0e455731 --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/03-runtime.md @@ -0,0 +1,209 @@ +# EventMesh Runtime + +The EventMesh Runtime is a stateful mesh node in an EventMesh cluster that is responsible for event transfer between the Source Connector and the Sink Connector, and can use EventMesh Storage as a storage queue for events. + +## 1 Run on your local machine + +### 1.1 Run from source code + +#### 1.1.1 Dependencies + +``` +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.*; +``` +#### 1.1.2 Download source code + +Download and extract the source code of the latest release from [EventMesh download](https://eventmesh.apache.org/download). For example, with the current latest version, you will get `apache-eventmesh-1.9.0-source.tar.gz`. + +#### 1.1.3 Run form local + +**1.1.3.1 Description of the project structure:** + +- eventmesh-common : eventmesh public classes and methods module +- eventmesh-connector-api : eventmesh connector plugin interface definition module +- eventmesh-connector-plugin : eventmesh connector plugin module +- eventmesh-runtime : eventmesh runtime module +- eventmesh-sdk-java : eventmesh java client sdk +- eventmesh-starter : eventmesh local startup and runtime project portal +- eventmesh-spi : eventmesh SPI loader module + +> Note: Plugin modules follow the SPI specification defined by eventmesh, custom SPI interfaces need to be marked with the annotation @EventMeshSPI. +> Plugin instances need to be configured in the corresponding module under /main/resources/META-INF/eventmesh with a mapping file of the relevant interfaces to their implementation classes, with the name of the file being the full class name of the SPI interface. +> The content of the file is the mapping from the plugin instance name to the plugin instance, see eventmesh-connector-rocketmq plugin module for details. + +**1.1.3.2 Plugin Description** + +***1.1.3.2.1 Installing the plugin*** + +There are two ways to install the plugin + +- classpath loading: Local developers can install the plugin by declaring it in the eventmesh-starter module build.gradle, e.g., declare that it uses the rocketmq plugin + +```gradle + implementation project(":eventmesh-connectors:eventmesh-connector-rocketmq") +``` + +- File loading: By installing the plugin to the plugin directory, EventMesh will automatically load the plugins in the plugin directory according to the conditions at runtime, you can install the plugin by executing the following command + +```shell +. /gradlew clean jar dist && . /gradlew installPlugin +``` + +***1.1.3.2.2 Using Plugins *** + +EventMesh will load plugins in the dist/plugin directory by default, you can change the plugin directory with `-DeventMeshPluginDir=your_plugin_directory`. Examples of plugins to be used at runtime can be found in the +`confPath` directory under `eventmesh.properties`. For example declare the use of the rocketmq plugin at runtime with the following settings. + +```properties +#connector plugin +eventMesh.connector.plugin.type=rocketmq +``` + +**1.1.3.3 Configuring the VM startup parameters** + +```properties +-Dlog4j.configurationFile=eventmesh-runtime/conf/log4j2.xml +-Deventmesh.log.home=eventmesh-runtime/logs +-Deventmesh.home=eventmesh-runtime +-DconfPath=eventmesh-runtime/conf +``` + +> Note: If your operating system is Windows, you may need to replace the file separator with '\'. + +**1.1.3.4 Getting up and running** + +``` +Run org.apache.eventmesh.starter. +``` + +### 1.2 Run form local binary + +#### 1.1.1 Dependencies + +``` +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.*; +``` + +Gradle is the build automation tool used by Apache EventMesh. Please refer to the [offical guide](https://docs.gradle.org/current/userguide/installation.html) to install the latest release of Gradle. + +### 1.1.2 Download Source Code + +Download and extract the source code of the latest release from [EventMesh download](https://eventmesh.apache.org/download). For example, with the current latest version, you will get `apache-eventmesh-1.9.0-source.tar.gz`. + +```console +tar -xvzf apache-eventmesh-1.9.0-source.tar.gz +cd apache-eventmesh-1.9.0-src/ +``` + +Build the source code with Gradle. + +```console +gradle clean dist +``` + +![runtime_2](/images/install/runtime_2.png) + +Edit the `eventmesh.properties` to change the configuration (e.g. TCP port, client blacklist) of EventMesh Runtime. + +```console +cd dist +vim conf/eventmesh.properties +``` + +### 1.1.3 Build and Load Plugins + +Apache EventMesh introduces the SPI (Service Provider Interface) mechanism, which enables EventMesh to discover and load the plugins at runtime. The plugins could be installed with these methods: + +- Gradle Dependencies: Declare the plugins as the build dependencies in `eventmesh-starter/build.gradle`. + +```gradle +dependencies { + implementation project(":eventmesh-runtime") + + // Example: Load the RocketMQ plugin + implementation project(":eventmesh-connectors:eventmesh-connector-rocketmq") +} +``` + +- Plugin directory: EventMesh loads the plugins in the `dist/plugin` directory based on `eventmesh.properties`. The `installPlugin` task of Gradle builds and moves the plugins into the `dist/plugin` directory. + +```console +gradle installPlugin +``` +### 1.1.4 启动Runtime + +Execute the `start.sh` script to start the EventMesh Runtime server. + +```console +bash bin/start.sh +``` + +![runtime_4](/images/install/runtime_4.png) + +View the output log: + +```console +tail -f logs/eventmesh.out +``` +![runtime_3](/images/install/runtime_3.png) + +## 2 Remote deployment + +### 2.1 Dependencies + +``` +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.*; +4g+ available disk to deploy eventmesh-store; +``` + +### 2.2 Download + +Download and extract the executable binaries of the latest release from [EventMesh download](https://eventmesh.apache.org/download).For example, with the current latest version, you will get `apache-eventmesh-1.9.0.tar.gz`. + + +```console +tar -xvzf apache-eventmesh-1.9.0-bin.tar.gz +cd apache-eventmesh-1.9.0 +``` + +### 2.3 Deploy + +Edit the `eventmesh.properties` to change the configuration (e.g. TCP port, client blacklist) of EventMesh Runtime. The executable binaries contain all plugins in the bundle, thus there's no need to build them from source code. + +```console +vim conf/eventmesh.properties +``` + +Execute the `start.sh` script to start the EventMesh Runtime server. + +```console +bash bin/start.sh +``` + +If you see "EventMeshTCPServer[port=10000] started...." , then the setup was successful. + +![runtime_6](/images/install/runtime_6.png) + + +View the output log: + +```console +cd /root/apache-eventmesh-1.9.0/logs +tail -f eventmesh.out +``` +![runtime_7](/images/install/runtime_7.png) + +You can stop the run with the following command: + +```console +bash bin/stop.sh +``` + +![runtime_8](/images/install/runtime_8.png) +![runtime_9](/images/install/runtime_9.png) diff --git a/versioned_docs/version-v1.10.0/instruction/04-runtime-with-docker.md b/versioned_docs/version-v1.10.0/instruction/04-runtime-with-docker.md new file mode 100644 index 0000000000..7d2c1d1a5a --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/04-runtime-with-docker.md @@ -0,0 +1,130 @@ +# EventMesh Runtime with Docker + +The documentation introduces the steps to install the latest release of EventMesh Runtime with Docker and connect to Apache RocketMQ. It's recommended to use a Linux-based system with [Docker Engine](https://docs.docker.com/engine/install/). Please follow the [Docker tutorial](https://docs.docker.com/get-started/) to get familiar with the basic concepts (registry, volume, etc.) and commands of Docker. + +## 1. Dependencies + +``` +64-bit OS,we recommend Linux/Unix; +64-bit JDK 1.8+; +Gradle 7.0+, we recommend 7.0.* +4g+ available disk to deploy eventmesh-store +If you choose standalone mode, you could skip this file and go to the next step: Start Eventmesh-Runtime; if not, you could choose RocketMQ as the store layer. +``` + +## 2. Pull EventMesh Image + +Download the pre-built image of [`eventmesh`](https://hub.docker.com/r/eventmesh/eventmesh) from Docker Hub with `docker pull`: + +```console +sudo docker pull eventmesh/eventmesh:v1.4.0 +``` + +To verify that the `eventmesh/eventmesh` image is successfully installed, list the downloaded images with `docker images`: + +```console +$ sudo docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +eventmesh/eventmesh v1.4.0 6e2964599c78 16 months ago 937MB +``` + +![runtime_docker_1](/images/install/runtime_docker_1.png) + +## 3. Edit Configuration + +Edit the `eventmesh.properties` to change the configuration (e.g. TCP port, client blacklist) of EventMesh Runtime. To integrate RocketMQ as a connector, these two configuration files should be created: `eventmesh.properties` and `rocketmq-client.properties`. + +```shell +sudo mkdir -p /data/eventmesh/rocketmq/conf +cd /data/eventmesh/rocketmq/conf +sudo touch eventmesh.properties +sudo touch rocketmq-client.properties +``` + +![runtime_docker_2](/images/install/runtime_docker_2.png) + +### 4. Configure `eventmesh.properties` + +The `eventmesh.properties` file contains the properties of EventMesh runtime environment and integrated plugins. Please refer to the [default configuration file](https://github.com/apache/eventmesh/blob/master/eventmesh-runtime/conf/eventmesh.properties) for the available configuration keys. + +```shell +sudo vim eventmesh.properties +``` + +| Configuration Key | Default Value | Description | +|-|-|-| +| `eventMesh.server.http.port` | 10105 | EventMesh HTTP server port | +| `eventMesh.server.tcp.port` | 10000 | EventMesh TCP server port | +| `eventMesh.server.grpc.port` | 10205 | EventMesh gRPC server port | + +### 5. Configure `rocketmq-client.properties` + +The `rocketmq-client.properties` file contains the properties of the Apache RocketMQ nameserver. + +```shell +sudo vim rocketmq-client.properties +``` + +Please refer to the [default configuration file](https://github.com/apache/eventmesh/blob/1.3.0/eventmesh-runtime/conf/rocketmq-client.properties) and change the value of `eventMesh.server.rocketmq.namesrvAddr` to the nameserver address of RocketMQ. + +| Configuration Key | Default Value | Description | +|-|-|-| +| `eventMesh.server.rocketmq.namesrvAddr` | `127.0.0.1:9876;127.0.0.1:9876` | The address of RocketMQ nameserver | + +## 6. Run and Manage EventMesh Container + +Run an EventMesh container from the `eventmesh/eventmesh` image with the `docker run` command. The `-p` option of the command binds the container port with the host machine port. The `-v` option of the command mounts the configuration files from files in the host machine. + +```shell +sudo docker run -d -p 10000:10000 -p 10105:10105 \ +-v `pwd`/data/eventmesh/rocketmq/conf/eventmesh.properties:/data/app/eventmesh/conf/eventmesh.properties \ +-v `pwd`/data/eventmesh/rocketmq/conf/rocketmq-client.properties:/data/app/eventmesh/conf/rocketmq-client.properties \ +eventmesh/eventmesh:v1.4.0 +``` + +The `docker ps` command lists the details (id, name, status, etc.) of the running containers. The container id is the unique identifier of the container. + +```shell +$ sudo docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +5bb6b6092672 eventmesh/eventmesh:v1.4.0 "/bin/sh -c 'sh star…" 5 seconds ago Up 3 seconds 0.0.0.0:10000->10000/tcp, :::10000->10000/tcp, 0.0.0.0:10105->10105/tcp, :::10105->10105/tcp eager_driscoll +``` + +![runtime_docker_3](/images/install/runtime_docker_3.png) + +As you can see from this message, the ```container id``` is ``5bb6b6092672``, and the ```name`` is ``eager_driscoll``, and they can both be used to uniquely identify this container. **Note**: On your computer, their values may be different from the ones here. + +## 7. Managing EventMesh Containers + +After successfully running an EventMesh container, you can manage the container by entering it, viewing logs, deleting it, and so on. + + +To connect to the EventMesh container: + +```shell +sudo docker exec -it [container id or name] /bin/bash +``` + +To read the log of the EventMesh container: + +```shell +tail -f ../logs/eventmesh.out +``` + +![runtime_docker_4](/images/install/runtime_docker_4.png) + +To stop or remove the container: + +```shell +sudo docker stop [container id or name] + +sudo docker rm -f [container id or name] +``` + +![runtime_docker_5](/images/install/runtime_docker_5.png) + +## 8. Explore more + +Now that EventMesh is running through a container, you can refer to the [``eventmesh-examples`` module](https://github.com/apache/eventmesh/tree/master/eventmesh-examples) to write and test your own code. + +I hope you enjoy the process and get more out of it! \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/instruction/05-demo.md b/versioned_docs/version-v1.10.0/instruction/05-demo.md new file mode 100644 index 0000000000..0b270e32be --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/05-demo.md @@ -0,0 +1,208 @@ +# Run eventmesh-sdk-java demo + +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java/badge.svg)](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java) + +> EventMesh-sdk-java as the client,and comminucate with eventmesh-runtime,to finish the message sub and pub +> +> EventMesh-sdk-java support both async and broadcast. +> +> EventMesh-sdk-java support HTTP, TCP and gRPC. + +The test demos of TCP, HTTP 和 GRPC are in the module **eventmesh-examples**: + +## 1. TCP DEMO + +### 1.1 ASYNC + +- Start consumer to subscribe the topic (we have created the TEST-TOPIC-TCP-ASYNC by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.tcp.demo.sub.eventmeshmessage.AsyncSubscribe +``` + +- Start producer to publish async message + +``` +Run the main method of org.apache.eventmesh.tcp.demo.pub.eventmeshmessage.AsyncPublish +``` + +### 1.2 BROADCAST + +- Start subscriber to subscribe the topic (we have created the TEST-TOPIC-TCP-BROADCAST by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.tcp.demo.sub.eventmeshmessage.AsyncSubscribeBroadcast +``` + +- Start publisher to publish async message + +``` +Run the main method of org.apache.eventmesh.tcp.demo.pub.eventmeshmessage.AsyncPublishBroadcast +``` + +More information about EventMesh-TCP, please refer to [EventMesh TCP](../sdk-java/03-tcp.md) + + +## 2 HTTP DEMO + +### 2.1 ASYNC + +- The subscriber is a SpringBoot demo, so run this demo to start subscriber (we have created the topic TEST-TOPIC-HTTP-ASYNC by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.http.demo.sub.SpringBootDemoApplication +``` + +- Start publisher to publish message + +``` +Run the main method of org.apache.eventmesh.http.demo.pub.eventmeshmessage.AsyncPublishInstance +``` +More information about EventMesh-HTTP, please refer to [EventMesh HTTP](../sdk-java/02-http.md) + +## 3 GRPC DEMO + +### 3.1 ASYNC PUBLISH & WEBHOOK SUBSCRIBE + +- Start publisher to publish message (we have created the topic TEST-TOPIC-GRPC-ASYNC by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.grpc.pub.eventmeshmessage.AsyncPublishInstance +``` + +- Start webhook subscriber + +``` +Run the main method of org.apache.eventmesh.grpc.sub.app.SpringBootDemoApplication +``` + +### 3.2 SYNC PUBLISH & STREAM SUBSCRIBE + +- Start Request-Reply publisher to publish message (we have created the topic TEST-TOPIC-GRPC-RR by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.grpc.pub.eventmeshmessage.RequestReplyInstance +``` + +- Start stream subscriber + +``` +Run the main method of org.apache.eventmesh.grpc.sub.EventmeshAsyncSubscribe +``` + +### 3.3 PUBLISH BATCH MESSAGE + +- Start publisher to publish batch message (we have created the TEST-TOPIC-GRPC-ASYNC by default, you can also create other topic to test) + +``` +Run the main method of org.apache.eventmesh.grpc.pub.eventmeshmessage.BatchPublishInstance +``` + +More information about EventMesh-gRPC, please refer to [EventMesh gRPC](../sdk-java/04-grpc.md) + +## 4. Run these demos by yourself + +Please refer to [EventMesh Store](./01-store.md) and [EventMesh Runtime](./03-runtime.md) to finish the necessary deployment before try our demo + +After finishing the deployment of store and runtime, you can run our demos in module `eventmesh-examples`: + +gradle: + +```shell +cd apache-eventmesh-1.9.0-src/eventmesh-examples +gradle clean dist + +cd ./dist/bin +``` + +![demo_1](/images/install/demo_1.png) + +### 4.1 TCP +TCP Sub + +```shell +bash tcp_eventmeshmessage_sub.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_pub.out +``` +![demo_2](/images/install/demo_2.png) + +TCP Pub + +```shell +bash tcp_pub_eventmeshmessage.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_sub.out +``` + +![demo_3](/images/install/demo_3.png) + +### 4.2 TCP Broadcast + +TCP Sub Broadcast + +```shell +sh tcp_sub_eventmeshmessage_broadcast.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_sub_broadcast.out +``` + +![demo_4](/images/install/demo_4.png) + +TCP Pub Broadcast + +```shell +sh tcp_pub_eventmeshmessage_broadcast.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_tcp_pub_broadcast.out +``` + +![demo_5](/images/install/demo_5.png) + +### 4.3 HTTP + +HTTP Sub + +```shell +sh http_sub.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_http_sub.out +``` + +![demo_6](/images/install/demo_6.png) + +HTTP Pub + +```shell +sh http_pub_eventmeshmessage.sh +``` + +Open the corresponding log file to view the log: +``` +cd /root/apache-eventmesh-1.9.0-src/eventmesh-examples/dist/logs +tail -f demo_http_pub.out +``` + +![demo_7](/images/install/demo_7.png) + +You can see the run logs for the different modes under the `/logs` directory. \ No newline at end of file diff --git a/versioned_docs/version-v1.10.0/instruction/_category_.json b/versioned_docs/version-v1.10.0/instruction/_category_.json new file mode 100644 index 0000000000..690c6eb204 --- /dev/null +++ b/versioned_docs/version-v1.10.0/instruction/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 2, + "label": "Installation and Deployment", + "collapsed": false +} diff --git a/versioned_docs/version-v1.10.0/introduction.md b/versioned_docs/version-v1.10.0/introduction.md new file mode 100644 index 0000000000..27503ffa44 --- /dev/null +++ b/versioned_docs/version-v1.10.0/introduction.md @@ -0,0 +1,29 @@ +--- +sidebar_position: 0 +--- + +# Introduction to EventMesh + +[![CI status](https://img.shields.io/github/actions/workflow/status/apache/eventmesh/ci.yml?logo=github&style=for-the-badge)](https://github.com/apache/eventmesh/actions/workflows/ci.yml) +[![CodeCov](https://img.shields.io/codecov/c/gh/apache/eventmesh/master?logo=codecov&style=for-the-badge)](https://codecov.io/gh/apache/eventmesh) +[![License](https://img.shields.io/github/license/apache/eventmesh?style=for-the-badge)](https://www.apache.org/licenses/LICENSE-2.0.html) +[![GitHub Release](https://img.shields.io/github/v/release/apache/eventmesh?style=for-the-badge)](https://github.com/apache/eventmesh/releases) +[![Slack Status](https://img.shields.io/badge/slack-join_chat-blue.svg?logo=slack&style=for-the-badge)](https://join.slack.com/t/the-asf/shared_invite/zt-1y375qcox-UW1898e4kZE_pqrNsrBM2g) + +**Apache EventMesh** is a fully serverless platform used to build distributed [event-driven](https://en.wikipedia.org/wiki/Event-driven_architecture) applications. + +## Features + +Apache EventMesh has a vast amount of features to help users achieve their goals. Let us share with you some of the key features EventMesh has to offer: + +- Built around the [CloudEvents](https://cloudevents.io) specification. +- Rapidly extensible language sdk around [gRPC](https://grpc.io) protocols. +- Rapidly extensible middleware by connectors such as [Apache RocketMQ](https://rocketmq.apache.org), [Apache Kafka](https://kafka.apache.org), [Apache Pulsar](https://pulsar.apache.org), [RabbitMQ](https://rabbitmq.com), [Redis](https://redis.io), [Pravega](https://cncf.pravega.io), and [RDMS](https://en.wikipedia.org/wiki/Relational_database)(in progress) using [JDBC](https://en.wikipedia.org/wiki/Java_Database_Connectivity). +- Rapidly extensible controller such as [Consul](https://consulproject.org/en/), [Nacos](https://nacos.io), [ETCD](https://etcd.io) and [Zookeeper](https://zookeeper.apache.org/). +- Guaranteed at-least-once delivery. +- Deliver events between multiple EventMesh deployments. +- Event schema management by catalog service. +- Powerful event orchestration by [Serverless workflow](https://serverlessworkflow.io/) engine. +- Powerful event filtering and transformation. +- Rapid, seamless scalability. +- Easy Function develop and framework integration. diff --git a/versioned_docs/version-v1.10.0/roadmap.md b/versioned_docs/version-v1.10.0/roadmap.md new file mode 100644 index 0000000000..b3b922294f --- /dev/null +++ b/versioned_docs/version-v1.10.0/roadmap.md @@ -0,0 +1,51 @@ +--- +sidebar_position: 1 +--- + +# Development Roadmap + +The development roadmap of Apache EventMesh is an overview of the planned features and milestones involved in the next several releases. The recent features and bug fixes are documented in the [release notes](https://eventmesh.apache.org/events/release-notes/v1.10.0/). The order of the features listed below doesn't correspond to their priorities. + +## List of Features and Milestones + +| Status | Description | Reference | +|-------------------------------------------|---------------------------------| --- | +| **Implemented in 1.0.0** | Support HTTP | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.0.0** | Support TCP | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.0.0** | Support Pub/Sub Event | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.1.1** | Provide Java SDK | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.1.1** | Support HTTPS | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.2.0** | Support RocketMQ as EventStore | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.2.0** | Support Heartbeat | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.3.0** | Integrate with OpenSchema | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.3.0** | Integrate with OpenTelemetry | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.3.0** | Support CloudEvents | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.4.0** | Support gRPC | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.5.0** | Provide Golang SDK | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.5.0** | Support Nacos Registry | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.5.0** | Support Mesh Bridge | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.5.0** | Support Federal Government | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.5.0** | Support Mesh Bridge | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.6.0** | Integrate with Consul | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.6.0** | Support Webhook | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.6.0** | Support etcd | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.7.0** | Support Knative Eventing Infrastructure | [GitHub Issue](https://github.com/apache/issues/790), [GSoC '22](https://issues.apache.org/jira/browse/COMDEV-463) | +| **Implemented in 1.7.0** | Support Pravega as EventStore | [GitHub Issue](https://github.com/apache/issues/270) | +| **Implemented in 1.7.0** | Support Kafka as EventStore | [GitHub Issue](https://github.com/apache/issues/676) | +| **Implemented in 1.7.0** | Support Pulsar as EventStore | [GitHub Issue](https://github.com/apache/issues/676) | +| **Implemented in 1.7.0** | Support CNCF Serverless Workflow| [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.7.0** | Support Redis | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.7.0** | Provide Rust SDK | [GitHub Issue](https://github.com/apache/issues/815) | +| **Implemented in 1.7.0** | Support Zookeeper | [GitHub Issue](https://github.com/apache/issues/417) | +| **Implemented in 1.7.0** | Support RabbitMQ as EventStore | [GitHub Issue](https://github.com/apache/issues/1553) | +| **Implemented in 1.8.0** | Provide Dashboard | [GitHub Issue](https://github.com/apache/issues/700), [GSoC '22](https://issues.apache.org/jira/browse/COMDEV-465) +| **In Progress** | Source/Sink Connector | [GitHub Issue](https://github.com/apache/eventmesh/issues/3492) | +| **In Progress** | K8s integration | [GitHub Issue](https://github.com/apache/eventmesh/issues/3327) | +| **In Progress** | OpenFunction integration | [GitHub Issue](https://github.com/apache/eventmesh/issues/2040) | +| **In Progress** | OpenSergo integration | [GitHub Issue](https://github.com/apache/eventmesh/issues/2805) | +| Planned | Transaction Event | [GitHub Issue](https://github.com/apache/issues/697) | +| Planned | Provide NodeJS SDK | [GitHub Issue](https://github.com/apache/eventmesh/) | +| Planned | Provide PHP SDK | [GitHub Issue](https://github.com/apache/eventmesh/3) | +| Planned | Event Query Language (EQL) | [GitHub Issue](https://github.com/apache/eventmesh/) | +| Planned | WebAssembly Runtime | [GitHub Issue](https://github.com/apache/eventmesh/) | + diff --git a/versioned_docs/version-v1.10.0/sdk-java/01-intro.md b/versioned_docs/version-v1.10.0/sdk-java/01-intro.md new file mode 100644 index 0000000000..2c1a74bbef --- /dev/null +++ b/versioned_docs/version-v1.10.0/sdk-java/01-intro.md @@ -0,0 +1,41 @@ +# Installation + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +[![Maven Central](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java/badge.svg?style=for-the-badge)](https://maven-badges.herokuapp.com/maven-central/org.apache.eventmesh/eventmesh-sdk-java) + +EventMesh SDK for Java is a collection of Java libraries to integrate EventMesh in a Java application. The SDK supports sending and receiving synchronous messages, asynchronous messages, and broadcast messages in TCP, HTTP, and gRPC protocols. The SDK implements EventMesh Message, CloudEvents, and OpenMessaging formats. The demo project is available in the [`eventmesh-example`](https://github.com/apache/eventmesh/tree/master/eventmesh-examples) module. + + + + +​ To install EventMesh SDK for Java with Gradle, declare `org.apache.eventmesh:eventmesh-sdk-java` as `implementation` in the dependencies block of the module's `build.gradle` file. + +```groovy +dependencies { + implementation 'org.apache.eventmesh:eventmesh-sdk-java:1.4.0' +} +``` + + + + + + +To install EventMesh SDK for Java with Maven, declare `org.apache.eventmesh:eventmesh-sdk-java` as a dependency in the dependencies block of the project's `pom.xml` file. + +```xml + + + org.apache.eventmesh + eventmesh-sdk-java + 1.4.0 + + +``` + + + + + diff --git a/versioned_docs/version-v1.10.0/sdk-java/02-http.md b/versioned_docs/version-v1.10.0/sdk-java/02-http.md new file mode 100644 index 0000000000..5e5ae8b4db --- /dev/null +++ b/versioned_docs/version-v1.10.0/sdk-java/02-http.md @@ -0,0 +1,115 @@ +# HTTP Protocol + +EventMesh SDK for Java implements the HTTP producer and consumer of asynchronous messages. Both the producer and consumer require an instance of `EventMeshHttpClientConfig` class that specifies the configuration of EventMesh HTTP client. The `liteEventMeshAddr`, `userName`, and `password` fields should match the `eventmesh.properties` file of EventMesh runtime. + +```java +import org.apache.eventmesh.client.http.conf.EventMeshHttpClientConfig; +import org.apache.eventmesh.common.utils.IPUtils; +import org.apache.eventmesh.common.utils.ThreadUtils; + +public class HTTP { + public static void main(String[] args) throws Exception { + EventMeshHttpClientConfig eventMeshClientConfig = EventMeshHttpClientConfig.builder() + .liteEventMeshAddr("localhost:10105") + .producerGroup("TEST_PRODUCER_GROUP") + .env("env") + .idc("idc") + .ip(IPUtils.getLocalAddress()) + .sys("1234") + .pid(String.valueOf(ThreadUtils.getPID())) + .userName("eventmesh") + .password("password") + .build(); + /* ... */ + } +} +``` + +## HTTP Consumer + +The `EventMeshHttpConsumer` class implements the `heartbeat`, `subscribe`, and `unsubscribe` methods. The `subscribe` method accepts a list of `SubscriptionItem` that defines the topics to be subscribed and a callback URL. + +```java +import org.apache.eventmesh.client.http.consumer.EventMeshHttpConsumer; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import com.google.common.collect.Lists; + +public class HTTP { + final String url = "http://localhost:8080/callback"; + final List topicList = Lists.newArrayList( + new SubscriptionItem("eventmesh-async-topic", SubscriptionMode.CLUSTERING, SubscriptionType.ASYNC) + ); + + public static void main(String[] args) throws Exception { + /* ... */ + eventMeshHttpConsumer = new EventMeshHttpConsumer(eventMeshClientConfig); + eventMeshHttpConsumer.heartBeat(topicList, url); + eventMeshHttpConsumer.subscribe(topicList, url); + /* ... */ + eventMeshHttpConsumer.unsubscribe(topicList, url); + } +} +``` + +The EventMesh runtime will send a POST request that contains the message in the [CloudEvents format](https://github.com/cloudevents/spec) to the callback URL. The [`SubController.java` file](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/http/demo/sub/controller/SubController.java) implements a Spring Boot controller that receives and parses the callback messages. + +## HTTP Producer + +The `EventMeshHttpProducer` class implements the `publish` method. The `publish` method accepts the message to be published and an optional timeout value. The message should be an instance of either of these classes: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` +- `io.openmessaging.api.Message` + +```java +import org.apache.eventmesh.client.http.producer.EventMeshHttpProducer; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.Constants; +import org.apache.eventmesh.common.utils.JsonUtils; + +import io.cloudevents.CloudEvent; +import io.cloudevents.core.builder.CloudEventBuilder; + +public class HTTP { + public static void main(String[] args) throws Exception { + /* ... */ + EventMeshHttpProducer eventMeshHttpProducer = new EventMeshHttpProducer(eventMeshClientConfig); + Map content = new HashMap<>(); + content.put("content", "testAsyncMessage"); + + CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject("eventmesh-async-topic") + .withSource(URI.create("/")) + .withDataContentType("application/cloudevents+json") + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + eventMeshHttpProducer.publish(event); + } +} +``` + +## Using Curl Command + +You can also publish/subscribe event without eventmesh SDK. + +### Publish + +```shell +curl -H "Content-Type:application/json" -X POST -d '{"name": "admin", "pass":"12345678"}' http://127.0.0.1:10105/eventmesh/publish/TEST-TOPIC-HTTP-ASYNC +``` + +After you start the eventmesh runtime server, you can use the curl command publish the event to the specific topic with the HTTP POST method and the package body must be in JSON format. The publish url like (http://127.0.0.1:10105/eventmesh/publish/TEST-TOPIC-HTTP-ASYNC), and you will get the publish successful result. + +### Subscribe + +```shell +curl -H "Content-Type:application/json" -X POST -d '{"url": "http://127.0.0.1:8088/sub/test", "consumerGroup":"TEST-GROUP", "topic":[{"mode":"CLUSTERING","topic":"TEST-TOPIC-HTTP-ASYNC","type":"ASYNC"}]}' http://127.0.0.1:10105/eventmesh/subscribe/local +``` + +After you start the eventmesh runtime server, you can use the curl command to subscribe the specific topic list with the HTTP POST method, and the package body must be in JSON format. The subscribe url like (http://127.0.0.1:10105/eventmesh/subscribe/local), and you will get the subscribe successful result. You should pay attention to the `url` field in the package body, which means you need to set up an HTTP service at the specified URL, you can see the example in the `eventmesh-examples` module. + diff --git a/versioned_docs/version-v1.10.0/sdk-java/03-tcp.md b/versioned_docs/version-v1.10.0/sdk-java/03-tcp.md new file mode 100644 index 0000000000..87a682dc15 --- /dev/null +++ b/versioned_docs/version-v1.10.0/sdk-java/03-tcp.md @@ -0,0 +1,118 @@ +# TCP Protocol + +EventMesh SDK for Java implements the TCP producer and consumer of synchronous, asynchronous, and broadcast messages. Both the producer and consumer require an instance of `EventMeshTCPClientConfig` class that specifies the configuration of EventMesh TCP client. The `host` and `port` fields should match the `eventmesh.properties` file of EventMesh runtime. + +```java +import org.apache.eventmesh.client.tcp.conf.EventMeshTCPClientConfig; +import org.apache.eventmesh.client.tcp.common.ReceiveMsgHook; +import io.cloudevents.CloudEvent; + +public class AsyncSubscribe implements ReceiveMsgHook { + public static void main(String[] args) throws InterruptedException { + EventMeshTCPClientConfig eventMeshTcpClientConfig = EventMeshTCPClientConfig.builder() + .host(eventMeshIp) + .port(eventMeshTcpPort) + .userAgent(userAgent) + .build(); + /* ... */ + } +} +``` + +## TCP Consumer + +The consumer should implement the `ReceiveMsgHook` class, which is defined in [`ReceiveMsgHook.java`](https://github.com/apache/eventmesh/blob/master/eventmesh-sdk-java/src/main/java/org/apache/eventmesh/client/tcp/common/ReceiveMsgHook.java). + +```java +public interface ReceiveMsgHook { + Optional handle(ProtocolMessage msg); +} +``` + +The `EventMeshTCPClient` class implements the `subscribe` method. The `subscribe` method accepts the topic, the `SubscriptionMode`, and the `SubscriptionType`. The `handle` method will be invoked when the consumer receives a message from the topic it subscribes. If the `SubscriptionType` is `SYNC`, the return value of `handle` will be sent back to the producer. + +```java +import org.apache.eventmesh.client.tcp.EventMeshTCPClient; +import org.apache.eventmesh.client.tcp.EventMeshTCPClientFactory; +import org.apache.eventmesh.client.tcp.common.ReceiveMsgHook; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import io.cloudevents.CloudEvent; + +public class TCPConsumer implements ReceiveMsgHook { + public static TCPConsumer handler = new TCPConsumer(); + private static EventMeshTCPClient client; + + public static void main(String[] args) throws Exception { + client = EventMeshTCPClientFactory.createEventMeshTCPClient( + eventMeshTcpClientConfig, + CloudEvent.class + ); + client.init(); + + client.subscribe( + "eventmesh-sync-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.SYNC + ); + + client.registerSubBusiHandler(handler); + client.listen(); + } + + @Override + public Optional handle(CloudEvent message) { + log.info("Messaged received: {}", message); + return Optional.of(message); + } +} +``` + +## TCP Producer + +### Asynchronous Producer + +The `EventMeshTCPClient` class implements the `publish` method. The `publish` method accepts the message to be published and an optional timeout value and returns the response message from the consumer. + +```java +/* ... */ +client = EventMeshTCPClientFactory.createEventMeshTCPClient(eventMeshTcpClientConfig, CloudEvent.class); +client.init(); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); +client.publish(event, 1000); +``` + +### Synchronous Producer + +The `EventMeshTCPClient` class implements the `rr` method. The `rr` method accepts the message to be published and an optional timeout value and returns the response message from the consumer. + +```java +/* ... */ +client = EventMeshTCPClientFactory.createEventMeshTCPClient(eventMeshTcpClientConfig, CloudEvent.class); +client.init(); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + +Package response = client.rr(event, 1000); +CloudEvent replyEvent = EventFormatProvider + .getInstance() + .resolveFormat(JsonFormat.CONTENT_TYPE) + .deserialize(response.getBody().toString().getBytes(StandardCharsets.UTF_8)); +``` diff --git a/versioned_docs/version-v1.10.0/sdk-java/04-grpc.md b/versioned_docs/version-v1.10.0/sdk-java/04-grpc.md new file mode 100644 index 0000000000..79d0d4ae57 --- /dev/null +++ b/versioned_docs/version-v1.10.0/sdk-java/04-grpc.md @@ -0,0 +1,174 @@ +# gRPC Protocol + +EventMesh SDK for Java implements the gRPC producer and consumer of synchronous, asynchronous, and broadcast messages. Both the producer and consumer require an instance of `EventMeshGrpcClientConfig` class that specifies the configuration of EventMesh gRPC client. The `liteEventMeshAddr`, `userName`, and `password` fields should match the `eventmesh.properties` file of EventMesh runtime. + +```java +import org.apache.eventmesh.client.grpc.config.EventMeshGrpcClientConfig; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import io.cloudevents.CloudEvent; + +public class CloudEventsAsyncSubscribe implements ReceiveMsgHook { + public static void main(String[] args) throws InterruptedException { + EventMeshGrpcClientConfig eventMeshClientConfig = EventMeshGrpcClientConfig.builder() + .serverAddr("localhost") + .serverPort(10205) + .consumerGroup(ExampleConstants.DEFAULT_EVENTMESH_TEST_CONSUMER_GROUP) + .env("env").idc("idc") + .sys("1234").build(); + /* ... */ + } +} +``` + +## gRPC Consumer + +### Stream Consumer + +The EventMesh runtime sends the message from producers to the stream consumer as a series of event streams. The consumer should implement the `ReceiveMsgHook` class, which is defined in [`ReceiveMsgHook.java`](https://github.com/apache/eventmesh/blob/master/eventmesh-sdk-java/src/main/java/org/apache/eventmesh/client/grpc/consumer/ReceiveMsgHook.java). + +```java +public interface ReceiveMsgHook { + Optional handle(T msg) throws Throwable; + String getProtocolType(); +} +``` + +The `EventMeshGrpcConsumer` class implements the `registerListener`, `subscribe`, and `unsubscribe` methods. The `subscribe` method accepts a list of `SubscriptionItem` that defines the topics to be subscribed to. The `registerListener` accepts an instance of a class that implements the `ReceiveMsgHook`. The `handle` method will be invoked when the consumer receives a message from the topic it subscribes. If the `SubscriptionType` is `SYNC`, the return value of `handle` will be sent back to the producer. + +```java +import org.apache.eventmesh.client.grpc.consumer.EventMeshGrpcConsumer; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; +import io.cloudevents.CloudEvent; + +public class CloudEventsAsyncSubscribe implements ReceiveMsgHook { + public static CloudEventsAsyncSubscribe handler = new CloudEventsAsyncSubscribe(); + public static void main(String[] args) throws InterruptedException { + /* ... */ + SubscriptionItem subscriptionItem = new SubscriptionItem( + "eventmesh-async-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.ASYNC + ); + EventMeshGrpcConsumer eventMeshGrpcConsumer = new EventMeshGrpcConsumer(eventMeshClientConfig); + + eventMeshGrpcConsumer.init(); + eventMeshGrpcConsumer.registerListener(handler); + eventMeshGrpcConsumer.subscribe(Collections.singletonList(subscriptionItem)); + /* ... */ + eventMeshGrpcConsumer.unsubscribe(Collections.singletonList(subscriptionItem)); + } + + @Override + public Optional handle(CloudEvent message) { + log.info("Messaged received: {}", message); + return Optional.empty(); + } + + @Override + public String getProtocolType() { + return EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME; + } +} +``` + +### Webhook Consumer + +The `subscribe` method of the `EventMeshGrpcConsumer` class accepts a list of `SubscriptionItem` that defines the topics to be subscribed and an optional callback URL. If the callback URL is provided, the EventMesh runtime will send a POST request that contains the message in the [CloudEvents format](https://github.com/cloudevents/spec) to the callback URL. The [`SubController.java` file](https://github.com/apache/eventmesh/blob/master/eventmesh-examples/src/main/java/org/apache/eventmesh/grpc/sub/app/controller/SubController.java) implements a Spring Boot controller that receives and parses the callback messages. + +```java +import org.apache.eventmesh.client.grpc.consumer.EventMeshGrpcConsumer; +import org.apache.eventmesh.client.grpc.consumer.ReceiveMsgHook; +import org.apache.eventmesh.client.tcp.common.EventMeshCommon; +import org.apache.eventmesh.common.protocol.SubscriptionItem; +import org.apache.eventmesh.common.protocol.SubscriptionMode; +import org.apache.eventmesh.common.protocol.SubscriptionType; + +@Component +public class SubService implements InitializingBean { + final String url = "http://localhost:8080/callback"; + + public void afterPropertiesSet() throws Exception { + /* ... */ + eventMeshGrpcConsumer = new EventMeshGrpcConsumer(eventMeshClientConfig); + eventMeshGrpcConsumer.init(); + + SubscriptionItem subscriptionItem = new SubscriptionItem( + "eventmesh-async-topic", + SubscriptionMode.CLUSTERING, + SubscriptionType.ASYNC + ); + + eventMeshGrpcConsumer.subscribe(Collections.singletonList(subscriptionItem), url); + /* ... */ + eventMeshGrpcConsumer.unsubscribe(Collections.singletonList(subscriptionItem), url); + } +} +``` + +## gRPC Producer + +### Asynchronous Producer + +The `EventMeshGrpcProducer` class implements the `publish` method. The `publish` method accepts the message to be published and an optional timeout value. The message should be an instance of either of these classes: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +```java +/* ... */ +EventMeshGrpcProducer eventMeshGrpcProducer = new EventMeshGrpcProducer(eventMeshClientConfig); +eventMeshGrpcProducer.init(); + +Map content = new HashMap<>(); +content.put("content", "testAsyncMessage"); + +CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); +eventMeshGrpcProducer.publish(event); +``` + +### Synchronous Producer + +The `EventMeshGrpcProducer` class implements the `requestReply` method. The `requestReply` method accepts the message to be published and an optional timeout value. The method returns the message returned from the consumer. The message should be an instance of either of these classes: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +### Batch Producer + +The `EventMeshGrpcProducer` class overloads the `publish` method, which accepts a list of messages to be published and an optional timeout value. The messages in the list should be an instance of either of these classes: + +- `org.apache.eventmesh.common.EventMeshMessage` +- `io.cloudevents.CloudEvent` + +```java +/* ... */ +List cloudEventList = new ArrayList<>(); +for (int i = 0; i < 5; i++) { + CloudEvent event = CloudEventBuilder.v1() + .withId(UUID.randomUUID().toString()) + .withSubject(ExampleConstants.EVENTMESH_GRPC_ASYNC_TEST_TOPIC) + .withSource(URI.create("/")) + .withDataContentType(ExampleConstants.CLOUDEVENT_CONTENT_TYPE) + .withType(EventMeshCommon.CLOUD_EVENTS_PROTOCOL_NAME) + .withData(JsonUtils.serialize(content).getBytes(StandardCharsets.UTF_8)) + .withExtension(Constants.EVENTMESH_MESSAGE_CONST_TTL, String.valueOf(4 * 1000)) + .build(); + + cloudEventList.add(event); +} + +eventMeshGrpcProducer.publish(cloudEventList); +/* ... */ +``` diff --git a/versioned_docs/version-v1.10.0/sdk-java/_category_.json b/versioned_docs/version-v1.10.0/sdk-java/_category_.json new file mode 100644 index 0000000000..e93d4cdd8a --- /dev/null +++ b/versioned_docs/version-v1.10.0/sdk-java/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 3, + "label": "EventMesh SDK for Java", + "collapsed": false +} diff --git a/versioned_docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md b/versioned_docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md new file mode 100644 index 0000000000..e300cdf57e --- /dev/null +++ b/versioned_docs/version-v1.10.0/upgrade-guide/01-upgrade-guide.md @@ -0,0 +1,15 @@ +# EventMesh Upgrade Guide + +> This article briefly introduces the precautions for upgrading EventMesh from version 1.2.0 to the latest version. + +## 1. Precautions + +**If you are using EventMesh for the first time, you can ignore this chapter.** + +## 2. Service upgrade installation + +The upgrade and startup of the EventMesh runtime module can be done in accordance with the [deployment guide](https://eventmesh.apache.org/docs/instruction/runtime). + +For differences and changes between versions, please refer to the [release notes](https://eventmesh.apache.org/events/release-notes) of different versions. Compatibility between versions can be achieved. + +If you need to use the latest features, follow the release note to upgrade to the corresponding version That’s it, and different plug-in module components can be packaged and configured separately. You can refer to the corresponding [feature design documents and guidelines](https://eventmesh.apache.org/docs/design-document/) diff --git a/versioned_docs/version-v1.10.0/upgrade-guide/_category_.json b/versioned_docs/version-v1.10.0/upgrade-guide/_category_.json new file mode 100644 index 0000000000..af764643e9 --- /dev/null +++ b/versioned_docs/version-v1.10.0/upgrade-guide/_category_.json @@ -0,0 +1,5 @@ +{ + "position": 7, + "label": "Upgrade Guide", + "collapsed": false +} diff --git a/versioned_sidebars/version-v1.10.0-sidebars.json b/versioned_sidebars/version-v1.10.0-sidebars.json new file mode 100644 index 0000000000..c4f2c9c665 --- /dev/null +++ b/versioned_sidebars/version-v1.10.0-sidebars.json @@ -0,0 +1,77 @@ +{ + "tutorialSidebar": [ + "introduction", + "roadmap", + { + "type": "category", + "label": "Installation and Deployment", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "autogenerated", + "dirName": "instruction" + } + ] + }, + { + "type": "category", + "label": "Design Document", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "category", + "label": "Event Handling and Integration", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "autogenerated", + "dirName": "design-document/01-event-handling-and-integration" + } + ] + }, + { + "type": "category", + "label": "Observability", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "autogenerated", + "dirName": "design-document/02-observability" + } + ] + }, + "design-document/schema-registry", + "design-document/spi", + "design-document/stream" + ] + }, + { + "type": "category", + "label": "EventMesh SDK for Java", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "autogenerated", + "dirName": "sdk-java" + } + ] + }, + { + "type": "category", + "label": "Upgrade Guide", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "autogenerated", + "dirName": "upgrade-guide" + } + ] + } + ] +} diff --git a/versions.json b/versions.json index d0e522b67f..ec6a8b5820 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,5 @@ [ + "v1.10.0", "v1.9.0", "v1.8.0", "v1.7.0",