From 2930de7f629a24f30d4b9109f59576879fa1b804 Mon Sep 17 00:00:00 2001 From: Dan Park Date: Wed, 11 Dec 2024 20:46:45 +0900 Subject: [PATCH] Deploy website - based on 7ab8c7277d4a6b7bbb6572266d98b5afa4888229 --- 404.html | 4 ++-- assets/js/{935f2afb.f8a0c813.js => 935f2afb.b89c384f.js} | 2 +- assets/js/d8004e22.d6e4be0e.js | 1 + assets/js/d8004e22.fe7f4d1a.js | 1 - ...{runtime~main.6262096e.js => runtime~main.8200397e.js} | 2 +- developers.html | 4 ++-- developers/build-your-contract.html | 4 ++-- developers/build-your-contract/deploy-your-contract.html | 4 ++-- developers/build-your-contract/developer-tools.html | 4 ++-- developers/client-apis.html | 4 ++-- developers/deployed-contracts.html | 4 ++-- developers/differences-from-ethereum.html | 4 ++-- developers/how-can-i-restore-my-account.html | 4 ++-- index.html | 4 ++-- learn/consensus/overview.html | 4 ++-- learn/consensus/requirements.html | 4 ++-- learn/consensus/rewards-and-penalties.html | 4 ++-- learn/consensus/validator-cycle.html | 4 ++-- learn/design-principles.html | 4 ++-- learn/layered-architecture/ethanos.html | 4 ++-- learn/layered-architecture/overview.html | 4 ++-- learn/tokenomics/distribution.html | 8 ++++---- learn/tokenomics/fee.html | 4 ++-- learn/tokenomics/feedback.html | 4 ++-- learn/tokenomics/overview.html | 4 ++-- operators.html | 4 ++-- operators/CLI-options/chronos.html | 4 ++-- operators/CLI-options/kairos.html | 4 ++-- operators/advanced-guides/run-with-docker.html | 4 ++-- operators/faqs.html | 4 ++-- operators/operate-restoration-client.html | 4 ++-- operators/operate-validators.html | 4 ++-- operators/run-a-node.html | 4 ++-- operators/system-requirements.html | 4 ++-- 34 files changed, 65 insertions(+), 65 deletions(-) rename assets/js/{935f2afb.f8a0c813.js => 935f2afb.b89c384f.js} (71%) create mode 100644 assets/js/d8004e22.d6e4be0e.js delete mode 100644 assets/js/d8004e22.fe7f4d1a.js rename assets/js/{runtime~main.6262096e.js => runtime~main.8200397e.js} (97%) diff --git a/404.html b/404.html index 6f634fe..b8e4e3c 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | OverProtocol Docs - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/935f2afb.f8a0c813.js b/assets/js/935f2afb.b89c384f.js similarity index 71% rename from assets/js/935f2afb.f8a0c813.js rename to assets/js/935f2afb.b89c384f.js index 8069e49..46f2f15 100644 --- a/assets/js/935f2afb.f8a0c813.js +++ b/assets/js/935f2afb.b89c384f.js @@ -1 +1 @@ -"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"v1.0.0","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"learnSidebar":[{"type":"html","value":"Learn","className":"sidebar-title"},{"type":"link","label":"Welcome to OverProtocol","href":"/","docId":"learn/index"},{"type":"link","label":"Design Principles","href":"/learn/design-principles","docId":"learn/design-principles"},{"type":"category","label":"Layered Architecture","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Ethanos","href":"/learn/layered-architecture/ethanos","docId":"learn/layered-architecture/ethanos"}],"href":"/learn/layered-architecture/overview"},{"type":"category","label":"Consensus Mechanism","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Validator Requirements","href":"/learn/consensus/requirements","docId":"learn/consensus/requirements"},{"type":"link","label":"Validator Cycle","href":"/learn/consensus/validator-cycle","docId":"learn/consensus/validator-cycle"},{"type":"link","label":"Rewards and Penalties","href":"/learn/consensus/rewards-and-penalties","docId":"learn/consensus/rewards-and-penalties"}],"href":"/learn/consensus/overview"},{"type":"category","label":"Tokenomics","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Token Distribution","href":"/learn/tokenomics/distribution","docId":"learn/tokenomics/distribution"},{"type":"link","label":"Deposit and Yield","href":"/learn/tokenomics/feedback","docId":"learn/tokenomics/feedback"},{"type":"link","label":"Fees","href":"/learn/tokenomics/fee","docId":"learn/tokenomics/fee"}],"href":"/learn/tokenomics/overview"}],"operatorsSidebar":[{"type":"html","value":"Operators","className":"sidebar-title"},{"type":"link","label":"Getting Started","href":"/operators/","docId":"operators/index"},{"type":"link","label":"System Requirements","href":"/operators/system-requirements","docId":"operators/system-requirements"},{"type":"link","label":"Run a Node","href":"/operators/run-a-node","docId":"operators/run-a-node"},{"type":"link","label":"Operate Validators","href":"/operators/operate-validators","docId":"operators/operate-validators"},{"type":"category","label":"Command Line Options","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Kairos","href":"/operators/CLI-options/kairos","docId":"operators/CLI-options/kairos"},{"type":"link","label":"Chronos","href":"/operators/CLI-options/chronos","docId":"operators/CLI-options/chronos"}]},{"type":"category","label":"Advanced Guides","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Run with Docker","href":"/operators/advanced-guides/run-with-docker","docId":"operators/advanced-guides/run-with-docker"}]},{"type":"link","label":"FAQs","href":"/operators/faqs","docId":"operators/faqs"}],"developersSidebar":[{"type":"html","value":"Developers","className":"sidebar-title"},{"type":"link","label":"Getting Started","href":"/developers/","docId":"developers/index"},{"type":"category","label":"Build Your Contract","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Deploy Your Contract","href":"/developers/build-your-contract/deploy-your-contract","docId":"developers/build-your-contract/deploy-your-contract"},{"type":"link","label":"Developer Tools","href":"/developers/build-your-contract/developer-tools","docId":"developers/build-your-contract/developer-tools"}],"href":"/developers/build-your-contract/"},{"type":"link","label":"Client APIs","href":"/developers/client-apis","docId":"developers/client-apis"},{"type":"link","label":"Deployed Contracts","href":"/developers/deployed-contracts","docId":"developers/deployed-contracts"}]},"docs":{"developers/build-your-contract/deploy-your-contract":{"id":"developers/build-your-contract/deploy-your-contract","title":"Deploy Your Contract","description":"Step-by-step guides for developers who wants to utilize Foundry, Hardhat or Remix.","sidebar":"developersSidebar"},"developers/build-your-contract/developer-tools":{"id":"developers/build-your-contract/developer-tools","title":"Developer Tools","description":"A list of developer tools that dApp developers on OverProtocol can utilize.","sidebar":"developersSidebar"},"developers/build-your-contract/index":{"id":"developers/build-your-contract/index","title":"Build Your Contract","description":"An overview of building smart contracts on OverProtocol.","sidebar":"developersSidebar"},"developers/client-apis":{"id":"developers/client-apis","title":"Client APIs","description":"A list of client APIs that developers can utilize to interact with OverProtocol.","sidebar":"developersSidebar"},"developers/deployed-contracts":{"id":"developers/deployed-contracts","title":"Deployed Contracts","description":"A list of contracts officially confirmed by foundation to quickly integrate and interact with established functionalities on the network.","sidebar":"developersSidebar"},"developers/differences-from-ethereum":{"id":"developers/differences-from-ethereum","title":"Differences from Ethereum","description":"A list of differences from Ethereum that can significantly impact how applications are built and function on this platform."},"developers/how-can-i-restore-my-account":{"id":"developers/how-can-i-restore-my-account","title":"How can I restore my Account?","description":"Guide to restoring an expired account"},"developers/index":{"id":"developers/index","title":"Welcome, Developers! \ud83d\ude80","description":"Get started with OverProtocol development! Learn how to set up nodes, access RPC endpoints, configure the Dolphin Testnet, and prepare your developer account with OVER tokens. Build scalable, decentralized applications with ease on OverProtocol\u2019s developer-friendly ecosystem.","sidebar":"developersSidebar"},"learn/consensus/overview":{"id":"learn/consensus/overview","title":"Over PoS Overview","description":"Discover how OverProtocol\'s adoption of Proof of Stake and the Gasper mechanism enhances scalability, inclusivity, and sustainability for its blockchain network.","sidebar":"learnSidebar"},"learn/consensus/requirements":{"id":"learn/consensus/requirements","title":"Validator Requirements","description":"Learn the key requirements for becoming a validator in OverProtocol, including staking minimums and uptime expectations.","sidebar":"learnSidebar"},"learn/consensus/rewards-and-penalties":{"id":"learn/consensus/rewards-and-penalties","title":"Rewards and Penalties","description":"Understand the rewards and penalties mechanism in OverProtocol\'s PoS system, designed to incentivize honest participation and ensure network stability.","sidebar":"learnSidebar"},"learn/consensus/validator-cycle":{"id":"learn/consensus/validator-cycle","title":"Validator Cycle","description":"Understand the lifecycle of a validator in OverProtocol, including activation, participation, exit, and withdrawal stages.","sidebar":"learnSidebar"},"learn/design-principles":{"id":"learn/design-principles","title":"Design Principles of OverProtocol","description":"Discover the foundational design principles of OverProtocol, a blockchain network built for accessibility, inclusivity, and sustainability. Learn how these principles guide its architecture to lower barriers, foster collaboration, and ensure long-term health.","sidebar":"learnSidebar"},"learn/index":{"id":"learn/index","title":"Welcome to OverProtocol \ud83c\udf10","description":"Welcome to OverProtocol, the blockchain network designed to break down barriers and empower participation. Our mission is to create an inclusive and sustainable ecosystem where anyone\u2014regardless of resources or technical expertise\u2014can contribute, innovate, and thrive. Explore the possibilities and join the movement shaping the next generation of blockchain technology.","sidebar":"learnSidebar"},"learn/layered-architecture/ethanos":{"id":"learn/layered-architecture/ethanos","title":"Ethanos","description":"Explore the Ethanos Algorithm, the core mechanism behind OverProtocol\'s layered architecture, enabling scalable and sustainable blockchain participation.","sidebar":"learnSidebar"},"learn/layered-architecture/overview":{"id":"learn/layered-architecture/overview","title":"Layered Architecture","description":"OverProtocol adopts a layered architecture for blockchain data management, balancing efficiency and accessibility.","sidebar":"learnSidebar"},"learn/tokenomics/distribution":{"id":"learn/tokenomics/distribution","title":"Token Distribution","description":"An introduction for distribution information of OVER","sidebar":"learnSidebar"},"learn/tokenomics/fee":{"id":"learn/tokenomics/fee","title":"Fees","description":"A description of OverProtocol\'s fee mechanisms.","sidebar":"learnSidebar"},"learn/tokenomics/feedback":{"id":"learn/tokenomics/feedback","title":"Deposit and Yield","description":"Description of the role and significance of this deposit and yield in a PoS blockchain, focusing on OverProtocol\'s system design.","sidebar":"learnSidebar"},"learn/tokenomics/overview":{"id":"learn/tokenomics/overview","title":"Tokenomics Overview","description":"Learn about the economic structure of OverProtocol, including the role of the native token OVER, its allocation, and the mechanisms supporting network growth and sustainability.","sidebar":"learnSidebar"},"operators/advanced-guides/run-with-docker":{"id":"operators/advanced-guides/run-with-docker","title":"Run with Docker","description":"Installation guide using Docker","sidebar":"operatorsSidebar"},"operators/CLI-options/chronos":{"id":"operators/CLI-options/chronos","title":"Command Line Options","description":"Geth command line options and sub-commands.","sidebar":"operatorsSidebar"},"operators/CLI-options/kairos":{"id":"operators/CLI-options/kairos","title":"Command Line Options","description":"Geth command line options and sub-commands.","sidebar":"operatorsSidebar"},"operators/faqs":{"id":"operators/faqs","title":"OverProtocol Validator FAQs","description":"Frequently asked questions about operating full nodes and validators of OverProtocol.","sidebar":"operatorsSidebar"},"operators/index":{"id":"operators/index","title":"Ready to Run Your Own Node?","description":"Discover how to become a key part of OverProtocol\u2019s decentralized ecosystem. This guide walks you through setting up a node, validating transactions, and contributing to the network\u2019s growth. Join the movement and power the future of blockchain today!","sidebar":"operatorsSidebar"},"operators/operate-restoration-client":{"id":"operators/operate-restoration-client","title":"Operate Restoration Client","description":"Step-by-step guides of how to operate restoration client."},"operators/operate-validators":{"id":"operators/operate-validators","title":"Operate Validators","description":"Learn how to register and operate validators in the OverProtocol network. This guide covers setup, staking, and best practices to help you actively contribute to network security, earn rewards, and enhance blockchain decentralization.","sidebar":"operatorsSidebar"},"operators/run-a-node":{"id":"operators/run-a-node","title":"Run a Node","description":"Setting up an OverProtocol node allows you to directly contribute to the network\u2019s decentralization and security. This guide provides clear steps for users of all experience levels, ensuring that anyone can start running a node with confidence.","sidebar":"operatorsSidebar"},"operators/system-requirements":{"id":"operators/system-requirements","title":"System Requirements","description":"This document outlines the hardware and network requirements for running an OverProtocol node. The information is tailored to ensure clarity and accessibility, even for those new to blockchain technology.","sidebar":"operatorsSidebar"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"v1.0.0","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"learnSidebar":[{"type":"html","value":"Learn","className":"sidebar-title"},{"type":"link","label":"Welcome to OverProtocol","href":"/","docId":"learn/index"},{"type":"link","label":"Design Principles","href":"/learn/design-principles","docId":"learn/design-principles"},{"type":"category","label":"Layered Architecture","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Ethanos","href":"/learn/layered-architecture/ethanos","docId":"learn/layered-architecture/ethanos"}],"href":"/learn/layered-architecture/overview"},{"type":"category","label":"Consensus Mechanism","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Validator Requirements","href":"/learn/consensus/requirements","docId":"learn/consensus/requirements"},{"type":"link","label":"Validator Cycle","href":"/learn/consensus/validator-cycle","docId":"learn/consensus/validator-cycle"},{"type":"link","label":"Rewards and Penalties","href":"/learn/consensus/rewards-and-penalties","docId":"learn/consensus/rewards-and-penalties"}],"href":"/learn/consensus/overview"},{"type":"category","label":"Tokenomics","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Token Distribution","href":"/learn/tokenomics/distribution","docId":"learn/tokenomics/distribution"},{"type":"link","label":"Deposit and Yield","href":"/learn/tokenomics/feedback","docId":"learn/tokenomics/feedback"},{"type":"link","label":"Fees","href":"/learn/tokenomics/fee","docId":"learn/tokenomics/fee"}],"href":"/learn/tokenomics/overview"}],"operatorsSidebar":[{"type":"html","value":"Operators","className":"sidebar-title"},{"type":"link","label":"Getting Started","href":"/operators/","docId":"operators/index"},{"type":"link","label":"System Requirements","href":"/operators/system-requirements","docId":"operators/system-requirements"},{"type":"link","label":"Run a Node","href":"/operators/run-a-node","docId":"operators/run-a-node"},{"type":"link","label":"Operate Validators","href":"/operators/operate-validators","docId":"operators/operate-validators"},{"type":"category","label":"Command Line Options","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Kairos","href":"/operators/CLI-options/kairos","docId":"operators/CLI-options/kairos"},{"type":"link","label":"Chronos","href":"/operators/CLI-options/chronos","docId":"operators/CLI-options/chronos"}]},{"type":"category","label":"Advanced Guides","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Run with Docker","href":"/operators/advanced-guides/run-with-docker","docId":"operators/advanced-guides/run-with-docker"}]},{"type":"link","label":"FAQs","href":"/operators/faqs","docId":"operators/faqs"}],"developersSidebar":[{"type":"html","value":"Developers","className":"sidebar-title"},{"type":"link","label":"Getting Started","href":"/developers/","docId":"developers/index"},{"type":"category","label":"Build Your Contract","collapsible":true,"collapsed":true,"items":[{"type":"link","label":"Deploy Your Contract","href":"/developers/build-your-contract/deploy-your-contract","docId":"developers/build-your-contract/deploy-your-contract"},{"type":"link","label":"Developer Tools","href":"/developers/build-your-contract/developer-tools","docId":"developers/build-your-contract/developer-tools"}],"href":"/developers/build-your-contract/"},{"type":"link","label":"Client APIs","href":"/developers/client-apis","docId":"developers/client-apis"},{"type":"link","label":"Deployed Contracts","href":"/developers/deployed-contracts","docId":"developers/deployed-contracts"}]},"docs":{"developers/build-your-contract/deploy-your-contract":{"id":"developers/build-your-contract/deploy-your-contract","title":"Deploy Your Contract","description":"Step-by-step guides for developers who wants to utilize Foundry, Hardhat or Remix.","sidebar":"developersSidebar"},"developers/build-your-contract/developer-tools":{"id":"developers/build-your-contract/developer-tools","title":"Developer Tools","description":"A list of developer tools that dApp developers on OverProtocol can utilize.","sidebar":"developersSidebar"},"developers/build-your-contract/index":{"id":"developers/build-your-contract/index","title":"Build Your Contract","description":"An overview of building smart contracts on OverProtocol.","sidebar":"developersSidebar"},"developers/client-apis":{"id":"developers/client-apis","title":"Client APIs","description":"A list of client APIs that developers can utilize to interact with OverProtocol.","sidebar":"developersSidebar"},"developers/deployed-contracts":{"id":"developers/deployed-contracts","title":"Deployed Contracts","description":"A list of contracts officially confirmed by foundation to quickly integrate and interact with established functionalities on the network.","sidebar":"developersSidebar"},"developers/differences-from-ethereum":{"id":"developers/differences-from-ethereum","title":"Differences from Ethereum","description":"A list of differences from Ethereum that can significantly impact how applications are built and function on this platform."},"developers/how-can-i-restore-my-account":{"id":"developers/how-can-i-restore-my-account","title":"How can I restore my Account?","description":"Guide to restoring an expired account"},"developers/index":{"id":"developers/index","title":"Welcome, Developers! \ud83d\ude80","description":"Get started with OverProtocol development! Learn how to set up nodes, access RPC endpoints, configure the Dolphin Testnet, and prepare your developer account with OVER tokens. Build scalable, decentralized applications with ease on OverProtocol\u2019s developer-friendly ecosystem.","sidebar":"developersSidebar"},"learn/consensus/overview":{"id":"learn/consensus/overview","title":"Over PoS Overview","description":"Discover how OverProtocol\'s adoption of Proof of Stake and the Gasper mechanism enhances scalability, inclusivity, and sustainability for its blockchain network.","sidebar":"learnSidebar"},"learn/consensus/requirements":{"id":"learn/consensus/requirements","title":"Validator Requirements","description":"Learn the key requirements for becoming a validator in OverProtocol, including staking minimums and uptime expectations.","sidebar":"learnSidebar"},"learn/consensus/rewards-and-penalties":{"id":"learn/consensus/rewards-and-penalties","title":"Rewards and Penalties","description":"Understand the rewards and penalties mechanism in OverProtocol\'s PoS system, designed to incentivize honest participation and ensure network stability.","sidebar":"learnSidebar"},"learn/consensus/validator-cycle":{"id":"learn/consensus/validator-cycle","title":"Validator Cycle","description":"Understand the lifecycle of a validator in OverProtocol, including activation, participation, exit, and withdrawal stages.","sidebar":"learnSidebar"},"learn/design-principles":{"id":"learn/design-principles","title":"Design Principles of OverProtocol","description":"Discover the foundational design principles of OverProtocol, a blockchain network built for accessibility, inclusivity, and sustainability. Learn how these principles guide its architecture to lower barriers, foster collaboration, and ensure long-term health.","sidebar":"learnSidebar"},"learn/index":{"id":"learn/index","title":"Welcome to OverProtocol \ud83c\udf10","description":"Welcome to OverProtocol, the blockchain network designed to break down barriers and empower participation. Our mission is to create an inclusive and sustainable ecosystem where anyone\u2014regardless of resources or technical expertise\u2014can contribute, innovate, and thrive. Explore the possibilities and join the movement shaping the next generation of blockchain technology.","sidebar":"learnSidebar"},"learn/layered-architecture/ethanos":{"id":"learn/layered-architecture/ethanos","title":"Ethanos","description":"Explore the Ethanos Algorithm, the core mechanism behind OverProtocol\'s layered architecture, enabling scalable and sustainable blockchain participation.","sidebar":"learnSidebar"},"learn/layered-architecture/overview":{"id":"learn/layered-architecture/overview","title":"Layered Architecture","description":"OverProtocol adopts a layered architecture for blockchain data management, balancing efficiency and accessibility.","sidebar":"learnSidebar"},"learn/tokenomics/distribution":{"id":"learn/tokenomics/distribution","title":"Token Distribution","description":"Learn about the allocation and initial supply dynamics of OVER tokens in OverProtocol, designed to ensure a stable and sustainable network.","sidebar":"learnSidebar"},"learn/tokenomics/fee":{"id":"learn/tokenomics/fee","title":"Fees","description":"A description of OverProtocol\'s fee mechanisms.","sidebar":"learnSidebar"},"learn/tokenomics/feedback":{"id":"learn/tokenomics/feedback","title":"Deposit and Yield","description":"Description of the role and significance of this deposit and yield in a PoS blockchain, focusing on OverProtocol\'s system design.","sidebar":"learnSidebar"},"learn/tokenomics/overview":{"id":"learn/tokenomics/overview","title":"Tokenomics Overview","description":"Learn about the economic structure of OverProtocol, including the role of the native token OVER, its allocation, and the mechanisms supporting network growth and sustainability.","sidebar":"learnSidebar"},"operators/advanced-guides/run-with-docker":{"id":"operators/advanced-guides/run-with-docker","title":"Run with Docker","description":"Installation guide using Docker","sidebar":"operatorsSidebar"},"operators/CLI-options/chronos":{"id":"operators/CLI-options/chronos","title":"Command Line Options","description":"Geth command line options and sub-commands.","sidebar":"operatorsSidebar"},"operators/CLI-options/kairos":{"id":"operators/CLI-options/kairos","title":"Command Line Options","description":"Geth command line options and sub-commands.","sidebar":"operatorsSidebar"},"operators/faqs":{"id":"operators/faqs","title":"OverProtocol Validator FAQs","description":"Frequently asked questions about operating full nodes and validators of OverProtocol.","sidebar":"operatorsSidebar"},"operators/index":{"id":"operators/index","title":"Ready to Run Your Own Node?","description":"Discover how to become a key part of OverProtocol\u2019s decentralized ecosystem. This guide walks you through setting up a node, validating transactions, and contributing to the network\u2019s growth. Join the movement and power the future of blockchain today!","sidebar":"operatorsSidebar"},"operators/operate-restoration-client":{"id":"operators/operate-restoration-client","title":"Operate Restoration Client","description":"Step-by-step guides of how to operate restoration client."},"operators/operate-validators":{"id":"operators/operate-validators","title":"Operate Validators","description":"Learn how to register and operate validators in the OverProtocol network. This guide covers setup, staking, and best practices to help you actively contribute to network security, earn rewards, and enhance blockchain decentralization.","sidebar":"operatorsSidebar"},"operators/run-a-node":{"id":"operators/run-a-node","title":"Run a Node","description":"Setting up an OverProtocol node allows you to directly contribute to the network\u2019s decentralization and security. This guide provides clear steps for users of all experience levels, ensuring that anyone can start running a node with confidence.","sidebar":"operatorsSidebar"},"operators/system-requirements":{"id":"operators/system-requirements","title":"System Requirements","description":"This document outlines the hardware and network requirements for running an OverProtocol node. The information is tailored to ensure clarity and accessibility, even for those new to blockchain technology.","sidebar":"operatorsSidebar"}}}')}}]); \ No newline at end of file diff --git a/assets/js/d8004e22.d6e4be0e.js b/assets/js/d8004e22.d6e4be0e.js new file mode 100644 index 0000000..b07e4d2 --- /dev/null +++ b/assets/js/d8004e22.d6e4be0e.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[926],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>k});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function l(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var s=n.createContext({}),u=function(e){var t=n.useContext(s),a=t;return e&&(a="function"==typeof e?e(t):l(l({},t),e)),a},d=function(e){var t=u(e.components);return n.createElement(s.Provider,{value:t},e.children)},p="mdxType",c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,i=e.originalType,s=e.parentName,d=o(e,["components","mdxType","originalType","parentName"]),p=u(a),m=r,k=p["".concat(s,".").concat(m)]||p[m]||c[m]||i;return a?n.createElement(k,l(l({ref:t},d),{},{components:a})):n.createElement(k,l({ref:t},d))}));function k(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var i=a.length,l=new Array(i);l[0]=m;var o={};for(var s in t)hasOwnProperty.call(t,s)&&(o[s]=t[s]);o.originalType=e,o[p]="string"==typeof e?e:r,l[1]=o;for(var u=2;u{a.r(t),a.d(t,{assets:()=>s,contentTitle:()=>l,default:()=>c,frontMatter:()=>i,metadata:()=>o,toc:()=>u});var n=a(7462),r=(a(7294),a(3905));const i={title:"Token Distribution",description:"Learn about the allocation and initial supply dynamics of OVER tokens in OverProtocol, designed to ensure a stable and sustainable network.",lang:"en"},l=void 0,o={unversionedId:"learn/tokenomics/distribution",id:"learn/tokenomics/distribution",title:"Token Distribution",description:"Learn about the allocation and initial supply dynamics of OVER tokens in OverProtocol, designed to ensure a stable and sustainable network.",source:"@site/docs/learn/tokenomics/distribution.md",sourceDirName:"learn/tokenomics",slug:"/learn/tokenomics/distribution",permalink:"/learn/tokenomics/distribution",draft:!1,editUrl:"https://github.com/overprotocol/overprotocol.github.io/edit/develop/docs/learn/tokenomics/distribution.md",tags:[],version:"current",frontMatter:{title:"Token Distribution",description:"Learn about the allocation and initial supply dynamics of OVER tokens in OverProtocol, designed to ensure a stable and sustainable network.",lang:"en"},sidebar:"learnSidebar",previous:{title:"Tokenomics Overview",permalink:"/learn/tokenomics/overview"},next:{title:"Deposit and Yield",permalink:"/learn/tokenomics/feedback"}},s={},u=[{value:"Token Allocation",id:"token-allocation",level:2},{value:"1. Staking Rewards",id:"1-staking-rewards",level:3},{value:"2. DAO Treasury",id:"treasury",level:3},{value:"3. Over Community Access Program(OCAP)",id:"3-over-community-access-programocap",level:3},{value:"4. Development and Strategic Investments",id:"4-development-and-strategic-investments",level:3},{value:"Initial Supply",id:"initial-supply",level:2},{value:"Circulating Supply at Genesis",id:"circulating-supply-at-genesis",level:3},{value:"Purpose of Initial Circulating Supply",id:"purpose-of-initial-circulating-supply",level:3},{value:"Initial Supply Summary",id:"initial-supply-summary",level:3},{value:"Staking Rewards",id:"staking-rewards",level:2},{value:"Minimum Guaranteed Rewards",id:"minimum-guaranteed-rewards",level:3},{value:"Adjustable Rewards",id:"adjustable-rewards",level:3}],d={toc:u},p="wrapper";function c(e){let{components:t,...a}=e;return(0,r.kt)(p,(0,n.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("p",null,"OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued."),(0,r.kt)("hr",null),(0,r.kt)("h2",{id:"token-allocation"},"Token Allocation"),(0,r.kt)("h3",{id:"1-staking-rewards"},"1. Staking Rewards"),(0,r.kt)("p",null,"Staking rewards are structured to ensure the network\u2019s stability and security. These rewards include a minimum guaranteed allocation of 200M OVER and an adjustable allocation of 100M OVER. Each year, 20M OVER is distributed as a fixed reward to participants. In addition, the network dynamically adjusts extra rewards through a feedback mechanism. Further details are described ",(0,r.kt)("a",{parentName:"p",href:"#staking-rewards"},"below"),"."),(0,r.kt)("h3",{id:"treasury"},"2. DAO Treasury"),(0,r.kt)("p",null,"The DAO Treasury serves as a funding resource for ecosystem development, distributed linearly over the first 10 years. The community decides how to allocate these funds through voting, and directing resources to initiatives such as new dApp development, network improvements, and user education campaigns. Additionally, transaction fees and other network-generated revenue are continuously added to the DAO Treasury, providing a steady stream of resources to support ongoing ecosystem growth."),(0,r.kt)("h3",{id:"3-over-community-access-programocap"},"3. Over Community Access Program(OCAP)"),(0,r.kt)("p",null,"Designed to boost engagement and adoption, this program supports activities like liquidity provision and airdrops. It encourages network participation from small-scale contributors and new users, helping to establish a strong initial user base for OverProtocol."),(0,r.kt)("h3",{id:"4-development-and-strategic-investments"},"4. Development and Strategic Investments"),(0,r.kt)("p",null,"Development and strategic investments are allocated to ensure the stable establishment and sustainable growth of the network during its early stages. These funds are distributed to the development team and early investors through a 2-year schedule, which includes a 6-month cliff and 18 months of linear vesting."),(0,r.kt)("img",{src:"/img/alloc_chart.png",style:{width:500},alt:"alloc_chart"}),(0,r.kt)("hr",null),(0,r.kt)("h2",{id:"initial-supply"},"Initial Supply"),(0,r.kt)("p",null,"At the genesis of OverProtocol, ",(0,r.kt)("strong",{parentName:"p"},"190 million OVER tokens")," (19% of the total supply) will be circulating to support the network's stability and early operation."),(0,r.kt)("h3",{id:"circulating-supply-at-genesis"},"Circulating Supply at Genesis"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},(0,r.kt)("strong",{parentName:"p"},"OCAP")," "),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"A total of ",(0,r.kt)("strong",{parentName:"li"},"150 million OVER")," from OCAP is circulating at genesis. "),(0,r.kt)("li",{parentName:"ul"},"These tokens are allocated for airdrops, liquidity provision, and user engagement to drive initial adoption and ecosystem participation."))),(0,r.kt)("li",{parentName:"ol"},(0,r.kt)("p",{parentName:"li"},(0,r.kt)("strong",{parentName:"p"},"Over Foundation(OF) and Over Technologies(OT)")," "),(0,r.kt)("ul",{parentName:"li"},(0,r.kt)("li",{parentName:"ul"},"Both categories are subject to a 2-year vesting schedule with a 6-month cliff. "),(0,r.kt)("li",{parentName:"ul"},"However, ",(0,r.kt)("strong",{parentName:"li"},"20% of their allocation")," (20 million OVER each) is unlocked at genesis. "),(0,r.kt)("li",{parentName:"ul"},"These unlocked tokens are deployed as ",(0,r.kt)("strong",{parentName:"li"},"validator staking resources"),", ensuring network security and stability in the early phases.")))),(0,r.kt)("h3",{id:"purpose-of-initial-circulating-supply"},"Purpose of Initial Circulating Supply"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Validator Staking"),": Tokens from OF and OT are used to stabilize the blockchain by operating validators during the early phase."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Ecosystem Growth"),": OCAP provides resources for airdrops, liquidity incentives, and user rewards to encourage network adoption."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Controlled Inflation"),": The careful management of initial supply prevents excessive inflation while maintaining security and decentralization.")),(0,r.kt)("h3",{id:"initial-supply-summary"},"Initial Supply Summary"),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"th"},"Category")),(0,r.kt)("th",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"th"},"Initial Circulating Supply (OVER)")),(0,r.kt)("th",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"th"},"Purpose")))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"td"},"OCAP")),(0,r.kt)("td",{parentName:"tr",align:null},"150M"),(0,r.kt)("td",{parentName:"tr",align:null},"Airdrops, liquidity, and user incentives.")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"td"},"OF")),(0,r.kt)("td",{parentName:"tr",align:null},"20M"),(0,r.kt)("td",{parentName:"tr",align:null},"Validator staking.")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"td"},"OT")),(0,r.kt)("td",{parentName:"tr",align:null},"20M"),(0,r.kt)("td",{parentName:"tr",align:null},"Validator staking.")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},(0,r.kt)("strong",{parentName:"td"},"Total")),(0,r.kt)("td",{parentName:"tr",align:null},"190M"),(0,r.kt)("td",{parentName:"tr",align:null})))),(0,r.kt)("hr",null),(0,r.kt)("h2",{id:"staking-rewards"},"Staking Rewards"),(0,r.kt)("h3",{id:"minimum-guaranteed-rewards"},"Minimum Guaranteed Rewards"),(0,r.kt)("p",null,"OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch."),(0,r.kt)("h3",{id:"adjustable-rewards"},"Adjustable Rewards"),(0,r.kt)("p",null,"The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to ",(0,r.kt)("a",{parentName:"p",href:"/learn/tokenomics/feedback"},"this page")," for a comprehensive overview of the feedback mechanism."),(0,r.kt)("p",null,"After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers."),(0,r.kt)("table",null,(0,r.kt)("thead",{parentName:"table"},(0,r.kt)("tr",{parentName:"thead"},(0,r.kt)("th",{parentName:"tr",align:null},"Year"),(0,r.kt)("th",{parentName:"tr",align:null},"Minimum Issuance"),(0,r.kt)("th",{parentName:"tr",align:null},"Maximum Issuance"))),(0,r.kt)("tbody",{parentName:"table"},(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Year 1 ~ 10"),(0,r.kt)("td",{parentName:"tr",align:null},"20M OVER"),(0,r.kt)("td",{parentName:"tr",align:null},"30M OVER")),(0,r.kt)("tr",{parentName:"tbody"},(0,r.kt)("td",{parentName:"tr",align:null},"Year 11 ~"),(0,r.kt)("td",{parentName:"tr",align:null},"0 OVER"),(0,r.kt)("td",{parentName:"tr",align:null},"10M OVER")))))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d8004e22.fe7f4d1a.js b/assets/js/d8004e22.fe7f4d1a.js deleted file mode 100644 index 37ad22f..0000000 --- a/assets/js/d8004e22.fe7f4d1a.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkover_docs=self.webpackChunkover_docs||[]).push([[926],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>k});var r=a(7294);function n(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,r)}return a}function o(e){for(var t=1;t=0||(n[a]=e[a]);return n}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(n[a]=e[a])}return n}var l=r.createContext({}),u=function(e){var t=r.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):o(o({},t),e)),a},d=function(e){var t=u(e.components);return r.createElement(l.Provider,{value:t},e.children)},c="mdxType",p={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var a=e.components,n=e.mdxType,i=e.originalType,l=e.parentName,d=s(e,["components","mdxType","originalType","parentName"]),c=u(a),m=n,k=c["".concat(l,".").concat(m)]||c[m]||p[m]||i;return a?r.createElement(k,o(o({ref:t},d),{},{components:a})):r.createElement(k,o({ref:t},d))}));function k(e,t){var a=arguments,n=t&&t.mdxType;if("string"==typeof e||n){var i=a.length,o=new Array(i);o[0]=m;var s={};for(var l in t)hasOwnProperty.call(t,l)&&(s[l]=t[l]);s.originalType=e,s[c]="string"==typeof e?e:n,o[1]=s;for(var u=2;u{a.r(t),a.d(t,{assets:()=>l,contentTitle:()=>o,default:()=>p,frontMatter:()=>i,metadata:()=>s,toc:()=>u});var r=a(7462),n=(a(7294),a(3905));const i={title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},o=void 0,s={unversionedId:"learn/tokenomics/distribution",id:"learn/tokenomics/distribution",title:"Token Distribution",description:"An introduction for distribution information of OVER",source:"@site/docs/learn/tokenomics/distribution.md",sourceDirName:"learn/tokenomics",slug:"/learn/tokenomics/distribution",permalink:"/learn/tokenomics/distribution",draft:!1,editUrl:"https://github.com/overprotocol/overprotocol.github.io/edit/develop/docs/learn/tokenomics/distribution.md",tags:[],version:"current",frontMatter:{title:"Token Distribution",description:"An introduction for distribution information of OVER",lang:"en"},sidebar:"learnSidebar",previous:{title:"Tokenomics Overview",permalink:"/learn/tokenomics/overview"},next:{title:"Deposit and Yield",permalink:"/learn/tokenomics/feedback"}},l={},u=[{value:"Token Allocation",id:"token-allocation",level:2},{value:"1. Staking Rewards",id:"1-staking-rewards",level:3},{value:"2. DAO Treasury",id:"treasury",level:3},{value:"3. Over Community Access Program(OCAP)",id:"3-over-community-access-programocap",level:3},{value:"4. Development and Strategic Investments",id:"4-development-and-strategic-investments",level:3},{value:"Initial Supply",id:"initial-supply",level:2},{value:"Circulating Supply at Genesis",id:"circulating-supply-at-genesis",level:3},{value:"Purpose of Initial Circulating Supply",id:"purpose-of-initial-circulating-supply",level:3},{value:"Staking Rewards",id:"staking-rewards",level:2},{value:"Minimum Guaranteed Rewards",id:"minimum-guaranteed-rewards",level:3},{value:"Adjustable Rewards",id:"adjustable-rewards",level:3}],d={toc:u},c="wrapper";function p(e){let{components:t,...a}=e;return(0,n.kt)(c,(0,r.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,n.kt)("p",null,"OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued."),(0,n.kt)("hr",null),(0,n.kt)("h2",{id:"token-allocation"},"Token Allocation"),(0,n.kt)("h3",{id:"1-staking-rewards"},"1. Staking Rewards"),(0,n.kt)("p",null,"Staking rewards are structured to ensure the network\u2019s stability and security. These rewards include a minimum guaranteed allocation of 200M OVER and an adjustable allocation of 100M OVER. Each year, 20M OVER is distributed as a fixed reward to participants. In addition, the network dynamically adjusts extra rewards through a feedback mechanism. Further details are described ",(0,n.kt)("a",{parentName:"p",href:"#staking-rewards"},"below"),"."),(0,n.kt)("h3",{id:"treasury"},"2. DAO Treasury"),(0,n.kt)("p",null,"The DAO Treasury serves as a funding resource for ecosystem development, distributed linearly over the first 10 years. The community decides how to allocate these funds through voting, and directing resources to initiatives such as new dApp development, network improvements, and user education campaigns. Additionally, transaction fees and other network-generated revenue are continuously added to the DAO Treasury, providing a steady stream of resources to support ongoing ecosystem growth."),(0,n.kt)("h3",{id:"3-over-community-access-programocap"},"3. Over Community Access Program(OCAP)"),(0,n.kt)("p",null,"Designed to boost engagement and adoption, this program supports activities like liquidity provision and airdrops. It encourages network participation from small-scale contributors and new users, helping to establish a strong initial user base for OverProtocol."),(0,n.kt)("h3",{id:"4-development-and-strategic-investments"},"4. Development and Strategic Investments"),(0,n.kt)("p",null,"Development and strategic investments are allocated to ensure the stable establishment and sustainable growth of the network during its early stages. These funds are distributed to the development team and early investors through a 2-year schedule, which includes a 6-month cliff and 18 months of linear vesting."),(0,n.kt)("img",{src:"/img/alloc_chart.png",style:{width:500},alt:"alloc_chart"}),(0,n.kt)("hr",null),(0,n.kt)("h2",{id:"initial-supply"},"Initial Supply"),(0,n.kt)("p",null,"At the genesis of OverProtocol, a portion of the token supply will be available in circulation to support the network's stability and early operation."),(0,n.kt)("h3",{id:"circulating-supply-at-genesis"},"Circulating Supply at Genesis"),(0,n.kt)("ol",null,(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"OCAP (Core Contributors)"),": "),(0,n.kt)("ul",{parentName:"li"},(0,n.kt)("li",{parentName:"ul"},"Entire allocation is ",(0,n.kt)("strong",{parentName:"li"},"circulating")," from the start."),(0,n.kt)("li",{parentName:"ul"},"These tokens represent contributors\u2019 rewards for establishing the protocol and community and are immediately accessible to support network growth."))),(0,n.kt)("li",{parentName:"ol"},(0,n.kt)("p",{parentName:"li"},(0,n.kt)("strong",{parentName:"p"},"OT (Investors) and OF (Foundation Operations)"),": "),(0,n.kt)("ul",{parentName:"li"},(0,n.kt)("li",{parentName:"ul"},"Both categories are subject to a ",(0,n.kt)("strong",{parentName:"li"},"2-year vesting schedule")," with a ",(0,n.kt)("strong",{parentName:"li"},"6-month cliff"),"."),(0,n.kt)("li",{parentName:"ul"},"However, ",(0,n.kt)("strong",{parentName:"li"},"20% of their allocation")," is unlocked at genesis and used for ",(0,n.kt)("strong",{parentName:"li"},"validator staking")," to stabilize the network during its early stages.")))),(0,n.kt)("h3",{id:"purpose-of-initial-circulating-supply"},"Purpose of Initial Circulating Supply"),(0,n.kt)("ul",null,(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("strong",{parentName:"li"},"Validator Staking"),": The unlocked tokens from OT and OF are deployed as staking resources to secure the blockchain and ensure robust consensus in the network\u2019s early phase."),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("strong",{parentName:"li"},"Ecosystem Stability"),": OCAP\u2019s immediate liquidity facilitates the rapid establishment of key ecosystem participants, such as developers and early adopters."),(0,n.kt)("li",{parentName:"ul"},(0,n.kt)("strong",{parentName:"li"},"Controlled Inflation"),": By carefully managing the initial supply, OverProtocol balances security and decentralization without overwhelming the token economy.")),(0,n.kt)("hr",null),(0,n.kt)("h2",{id:"staking-rewards"},"Staking Rewards"),(0,n.kt)("h3",{id:"minimum-guaranteed-rewards"},"Minimum Guaranteed Rewards"),(0,n.kt)("p",null,"OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch."),(0,n.kt)("h3",{id:"adjustable-rewards"},"Adjustable Rewards"),(0,n.kt)("p",null,"The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to ",(0,n.kt)("a",{parentName:"p",href:"/learn/tokenomics/feedback"},"this page")," for a comprehensive overview of the feedback mechanism."),(0,n.kt)("p",null,"After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers."),(0,n.kt)("table",null,(0,n.kt)("thead",{parentName:"table"},(0,n.kt)("tr",{parentName:"thead"},(0,n.kt)("th",{parentName:"tr",align:null},"Year"),(0,n.kt)("th",{parentName:"tr",align:null},"Minimum Issuance"),(0,n.kt)("th",{parentName:"tr",align:null},"Maximum Issuance"))),(0,n.kt)("tbody",{parentName:"table"},(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Year 1 ~ 10"),(0,n.kt)("td",{parentName:"tr",align:null},"20M OVER"),(0,n.kt)("td",{parentName:"tr",align:null},"30M OVER")),(0,n.kt)("tr",{parentName:"tbody"},(0,n.kt)("td",{parentName:"tr",align:null},"Year 11 ~"),(0,n.kt)("td",{parentName:"tr",align:null},"0 OVER"),(0,n.kt)("td",{parentName:"tr",align:null},"10M OVER")))))}p.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.6262096e.js b/assets/js/runtime~main.8200397e.js similarity index 97% rename from assets/js/runtime~main.6262096e.js rename to assets/js/runtime~main.8200397e.js index 6c69df6..8cd55c8 100644 --- a/assets/js/runtime~main.6262096e.js +++ b/assets/js/runtime~main.8200397e.js @@ -1 +1 @@ -(()=>{"use strict";var e,t,r,a,f,o={},d={};function n(e){var t=d[e];if(void 0!==t)return t.exports;var r=d[e]={exports:{}};return o[e].call(r.exports,r,r.exports,n),r.exports}n.m=o,e=[],n.O=(t,r,a,f)=>{if(!r){var o=1/0;for(i=0;i=f)&&Object.keys(n.O).every((e=>n.O[e](r[c])))?r.splice(c--,1):(d=!1,f0&&e[i-1][2]>f;i--)e[i]=e[i-1];e[i]=[r,a,f]},n.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return n.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var f=Object.create(null);n.r(f);var o={};t=t||[null,r({}),r([]),r(r)];for(var d=2&a&&e;"object"==typeof d&&!~t.indexOf(d);d=r(d))Object.getOwnPropertyNames(d).forEach((t=>o[t]=()=>e[t]));return o.default=()=>e,n.d(f,o),f},n.d=(e,t)=>{for(var r in t)n.o(t,r)&&!n.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},n.f={},n.e=e=>Promise.all(Object.keys(n.f).reduce(((t,r)=>(n.f[r](e,t),t)),[])),n.u=e=>"assets/js/"+({26:"93ff49c2",30:"24f0ede5",47:"8c2d38e9",53:"935f2afb",73:"b3c68a2c",108:"b9aeffe2",143:"5f02ec4e",169:"6571d487",206:"a5d2c59e",382:"8def2dd5",416:"3b6b5936",433:"b97ac028",434:"caac22b6",454:"eff5f417",465:"fa393df1",497:"e87f3a09",514:"1be78505",558:"abafa56d",587:"1bdd8635",633:"0879876d",648:"371a04a1",652:"2b8c0123",703:"fe9593e9",737:"75d63a0e",803:"7e4ee331",822:"63925da8",835:"cf4a6135",859:"669e6444",918:"17896441",926:"d8004e22",974:"72939e70",981:"15afe019",983:"ee1a2190"}[e]||e)+"."+{26:"0f671128",30:"8c25c975",47:"0d570d64",53:"f8a0c813",73:"6b25ffb8",108:"4f71667b",143:"24df96d6",169:"5d941ebb",206:"e5b529de",382:"23f8631a",416:"c87b9371",433:"fd99bae2",434:"4bdba21f",454:"8d65e67e",465:"4fb82f23",497:"f3cec2c8",514:"50d11fd2",558:"1c7e2d9f",587:"aef42469",633:"1ad11d36",648:"52ebc867",652:"e290c038",703:"6d24a5e6",737:"8b8edfef",803:"bad231d3",822:"d0d44fc6",835:"4405a29f",859:"2dbe5462",918:"a2ce1891",926:"fe7f4d1a",972:"6e80e6a5",974:"2cf4831e",981:"a9633514",983:"48efb849"}[e]+".js",n.miniCssF=e=>{},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),a={},f="over-docs:",n.l=(e,t,r,o)=>{if(a[e])a[e].push(t);else{var d,c;if(void 0!==r)for(var b=document.getElementsByTagName("script"),i=0;i{d.onerror=d.onload=null,clearTimeout(s);var f=a[e];if(delete a[e],d.parentNode&&d.parentNode.removeChild(d),f&&f.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:d}),12e4);d.onerror=l.bind(null,d.onerror),d.onload=l.bind(null,d.onload),c&&document.head.appendChild(d)}},n.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.p="/",n.gca=function(e){return e={17896441:"918","93ff49c2":"26","24f0ede5":"30","8c2d38e9":"47","935f2afb":"53",b3c68a2c:"73",b9aeffe2:"108","5f02ec4e":"143","6571d487":"169",a5d2c59e:"206","8def2dd5":"382","3b6b5936":"416",b97ac028:"433",caac22b6:"434",eff5f417:"454",fa393df1:"465",e87f3a09:"497","1be78505":"514",abafa56d:"558","1bdd8635":"587","0879876d":"633","371a04a1":"648","2b8c0123":"652",fe9593e9:"703","75d63a0e":"737","7e4ee331":"803","63925da8":"822",cf4a6135:"835","669e6444":"859",d8004e22:"926","72939e70":"974","15afe019":"981",ee1a2190:"983"}[e]||e,n.p+n.u(e)},(()=>{var e={303:0,532:0};n.f.j=(t,r)=>{var a=n.o(e,t)?e[t]:void 0;if(0!==a)if(a)r.push(a[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var f=new Promise(((r,f)=>a=e[t]=[r,f]));r.push(a[2]=f);var o=n.p+n.u(t),d=new Error;n.l(o,(r=>{if(n.o(e,t)&&(0!==(a=e[t])&&(e[t]=void 0),a)){var f=r&&("load"===r.type?"missing":r.type),o=r&&r.target&&r.target.src;d.message="Loading chunk "+t+" failed.\n("+f+": "+o+")",d.name="ChunkLoadError",d.type=f,d.request=o,a[1](d)}}),"chunk-"+t,t)}},n.O.j=t=>0===e[t];var t=(t,r)=>{var a,f,o=r[0],d=r[1],c=r[2],b=0;if(o.some((t=>0!==e[t]))){for(a in d)n.o(d,a)&&(n.m[a]=d[a]);if(c)var i=c(n)}for(t&&t(r);b{"use strict";var e,t,r,a,f,o={},d={};function n(e){var t=d[e];if(void 0!==t)return t.exports;var r=d[e]={exports:{}};return o[e].call(r.exports,r,r.exports,n),r.exports}n.m=o,e=[],n.O=(t,r,a,f)=>{if(!r){var o=1/0;for(i=0;i=f)&&Object.keys(n.O).every((e=>n.O[e](r[c])))?r.splice(c--,1):(d=!1,f0&&e[i-1][2]>f;i--)e[i]=e[i-1];e[i]=[r,a,f]},n.n=e=>{var t=e&&e.__esModule?()=>e.default:()=>e;return n.d(t,{a:t}),t},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var f=Object.create(null);n.r(f);var o={};t=t||[null,r({}),r([]),r(r)];for(var d=2&a&&e;"object"==typeof d&&!~t.indexOf(d);d=r(d))Object.getOwnPropertyNames(d).forEach((t=>o[t]=()=>e[t]));return o.default=()=>e,n.d(f,o),f},n.d=(e,t)=>{for(var r in t)n.o(t,r)&&!n.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},n.f={},n.e=e=>Promise.all(Object.keys(n.f).reduce(((t,r)=>(n.f[r](e,t),t)),[])),n.u=e=>"assets/js/"+({26:"93ff49c2",30:"24f0ede5",47:"8c2d38e9",53:"935f2afb",73:"b3c68a2c",108:"b9aeffe2",143:"5f02ec4e",169:"6571d487",206:"a5d2c59e",382:"8def2dd5",416:"3b6b5936",433:"b97ac028",434:"caac22b6",454:"eff5f417",465:"fa393df1",497:"e87f3a09",514:"1be78505",558:"abafa56d",587:"1bdd8635",633:"0879876d",648:"371a04a1",652:"2b8c0123",703:"fe9593e9",737:"75d63a0e",803:"7e4ee331",822:"63925da8",835:"cf4a6135",859:"669e6444",918:"17896441",926:"d8004e22",974:"72939e70",981:"15afe019",983:"ee1a2190"}[e]||e)+"."+{26:"0f671128",30:"8c25c975",47:"0d570d64",53:"b89c384f",73:"6b25ffb8",108:"4f71667b",143:"24df96d6",169:"5d941ebb",206:"e5b529de",382:"23f8631a",416:"c87b9371",433:"fd99bae2",434:"4bdba21f",454:"8d65e67e",465:"4fb82f23",497:"f3cec2c8",514:"50d11fd2",558:"1c7e2d9f",587:"aef42469",633:"1ad11d36",648:"52ebc867",652:"e290c038",703:"6d24a5e6",737:"8b8edfef",803:"bad231d3",822:"d0d44fc6",835:"4405a29f",859:"2dbe5462",918:"a2ce1891",926:"d6e4be0e",972:"6e80e6a5",974:"2cf4831e",981:"a9633514",983:"48efb849"}[e]+".js",n.miniCssF=e=>{},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),a={},f="over-docs:",n.l=(e,t,r,o)=>{if(a[e])a[e].push(t);else{var d,c;if(void 0!==r)for(var b=document.getElementsByTagName("script"),i=0;i{d.onerror=d.onload=null,clearTimeout(s);var f=a[e];if(delete a[e],d.parentNode&&d.parentNode.removeChild(d),f&&f.forEach((e=>e(r))),t)return t(r)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:d}),12e4);d.onerror=l.bind(null,d.onerror),d.onload=l.bind(null,d.onload),c&&document.head.appendChild(d)}},n.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.p="/",n.gca=function(e){return e={17896441:"918","93ff49c2":"26","24f0ede5":"30","8c2d38e9":"47","935f2afb":"53",b3c68a2c:"73",b9aeffe2:"108","5f02ec4e":"143","6571d487":"169",a5d2c59e:"206","8def2dd5":"382","3b6b5936":"416",b97ac028:"433",caac22b6:"434",eff5f417:"454",fa393df1:"465",e87f3a09:"497","1be78505":"514",abafa56d:"558","1bdd8635":"587","0879876d":"633","371a04a1":"648","2b8c0123":"652",fe9593e9:"703","75d63a0e":"737","7e4ee331":"803","63925da8":"822",cf4a6135:"835","669e6444":"859",d8004e22:"926","72939e70":"974","15afe019":"981",ee1a2190:"983"}[e]||e,n.p+n.u(e)},(()=>{var e={303:0,532:0};n.f.j=(t,r)=>{var a=n.o(e,t)?e[t]:void 0;if(0!==a)if(a)r.push(a[2]);else if(/^(303|532)$/.test(t))e[t]=0;else{var f=new Promise(((r,f)=>a=e[t]=[r,f]));r.push(a[2]=f);var o=n.p+n.u(t),d=new Error;n.l(o,(r=>{if(n.o(e,t)&&(0!==(a=e[t])&&(e[t]=void 0),a)){var f=r&&("load"===r.type?"missing":r.type),o=r&&r.target&&r.target.src;d.message="Loading chunk "+t+" failed.\n("+f+": "+o+")",d.name="ChunkLoadError",d.type=f,d.request=o,a[1](d)}}),"chunk-"+t,t)}},n.O.j=t=>0===e[t];var t=(t,r)=>{var a,f,o=r[0],d=r[1],c=r[2],b=0;if(o.some((t=>0!==e[t]))){for(a in d)n.o(d,a)&&(n.m[a]=d[a]);if(c)var i=c(n)}for(t&&t(r);b Welcome, Developers! 🚀 | OverProtocol Docs - +

Welcome, Developers! 🚀

Welcome to the OverProtocol developer documentation—a space where innovation meets blockchain. Whether you’re new to blockchain development or an experienced builder, this guide will equip you with everything you need to start creating applications on OverProtocol.

OverProtocol empowers developers with a robust, scalable, and inclusive blockchain ecosystem. From setting up your first node to deploying smart contracts, let’s get you started on your journey to building the decentralized future.

Setting Up a Node with RPC Access

To interact with the OverProtocol network, you'll need access to a node capable of handling Remote Procedure Calls (RPC). This will enable you to query and interact with the network, deploy contracts, and perform transactions programmatically.

Options for Setting Up a Node:

  • Running Your Own Node: Setting up and maintaining your own node gives you full control over network interactions. This can be done by following the setup instructions. Running your own node is beneficial for extensive development work that requires high levels of data integrity and privacy.
  • Using Public Nodes: If you prefer not to manage your own node, you can use publicly available RPC endpoints. These are provided by various services and can be accessed easily, though they might come with limitations on the rate of requests and reduced control over the node configuration.

Network Configurations

KeyValue
NetworkOverProtocol Mainnet
RPC URLhttps://rpc.overprotocol.com/
Chain ID54176
Currency symbolOVER
Block Explorer URLhttps://scan.over.network/

Preparing an Account with OVER Tokens

Developing on OverProtocol requires interacting with the network, which include transaction fees or testing token transactions. Therefore, it's essential to have an account loaded with OVER tokens.

Setting Up Your Developer Account:

  • Acquire OVER Tokens: If you are working on the main network, you'll need to acquire OVER tokens, which can be done through exchanges or from other token holders.
  • Testnet Tokens: For testing purposes, you can use the OverProtocol testnet, where tokens can often be acquired for free from a faucet that distributes small amounts of tokens for development use. Join developer community and feel free to ask some tokens!
  • Secure Your Account: Ensure that your account is secure, especially if you are working with real tokens. Utilize hardware wallets or secure key management solutions to protect your private keys and account credentials.
- + \ No newline at end of file diff --git a/developers/build-your-contract.html b/developers/build-your-contract.html index e4eb0a5..081e18b 100644 --- a/developers/build-your-contract.html +++ b/developers/build-your-contract.html @@ -4,13 +4,13 @@ Build Your Contract | OverProtocol Docs - +

Build Your Contract

To build and deploy your decentralized application (dApp) on OverProtocol, you can use Ethereum-compatible development environments like Hardhat, Foundry or Remix. Each tool has its own setup and configuration process, but generally, you'll need to make adjustments to your project’s network configuration to connect with the OverProtocol network.

- + \ No newline at end of file diff --git a/developers/build-your-contract/deploy-your-contract.html b/developers/build-your-contract/deploy-your-contract.html index 53ed0b8..a5d5f00 100644 --- a/developers/build-your-contract/deploy-your-contract.html +++ b/developers/build-your-contract/deploy-your-contract.html @@ -4,13 +4,13 @@ Deploy Your Contract | OverProtocol Docs - +

Deploy Your Contract

OverProtocol's compatibility with the Ethereum Virtual Machine (EVM) allows you to leverage various Ethereum development environments to build and deploy your smart contracts. This guide outlines how to use popular tools like Foundry, Hardhat and Remix for developing on OverProtocol. Detailed steps and tips will ensure you understand the nuances of deploying effectively in each environment.

Build With Foundry

Foundry is a fast, portable, and modular toolkit for Ethereum application development. For detailed information and further utilization of Foundry, please refer to the official documentation.

Installation

Install Foundry by following the instructions on Foundry's GitHub repository and the installation guide.

Foundry consists of:

  • Forge: Ethereum testing framework (like Truffle, Hardhat and DappTools).
  • Cast: Swiss army knife for interacting with EVM smart contracts, sending transactions and getting chain data.
  • Anvil: Local Ethereum node, akin to Ganache, Hardhat Network.
  • Chisel: Fast, utilitarian, and verbose solidity REPL.

Creating a New Project

To start a new project with Foundry, use forge init

$ forge init hello_foundry

Let's check out what forge generated for us:

$ cd hello_foundry
$ ls

The default template comes with one dependency installed: Forge Standard Library. This is the preferred testing library used for Foundry projects. Additionally, the template also comes with an empty starter contract and a simple test.

We can build the project with forge build:

$ forge build
Compiling 27 files with 0.8.19
Solc 0.8.19 finished in 1.16s
Compiler run successful!

And run the tests with forge test:

$ forge test
No files changed, compilation skipped

Ran 2 tests for test/Counter.t.sol:CounterTest
[PASS] testFuzz_SetNumber(uint256) (runs: 256, μ: 30454, ~: 31310)
[PASS] test_Increment() (gas: 31325)
Suite result: ok. 2 passed; 0 failed; 0 skipped; finished in 9.15ms (8.89ms CPU time)

Ran 1 test suite in 13.66ms (9.15ms CPU time): 2 tests passed, 0 failed, 0 skipped (2 total tests)

You’ll notice that two new directories have popped up: out and cache.

The out directory contains your contract artifact, such as the ABI, while the cache is used by forge to only recompile what is necessary.

Deploying Your Contracts

You can find out the official Foundry's documentation here.

Forge can deploy smart contracts to a given network with the forge create command.

To deploy a contract, you must provide a RPC URL (env: ETH_RPC_URL) and the private key of the account that will deploy the contract.

To deploy MyContract to a network:

$ forge create --rpc-url <your_rpc_url> --private-key <your_private_key> src/MyContract.sol:MyContract
Compiling...
No files changed, compilation skipped
Deployer: 0x079E40B71d...
Deployed to: 0x92e9a5A338...
Transaction hash: 0x2c13f01a69...

Solidity files may contain multiple contracts. :MyContract above specifies which contract to deploy from the src/MyContract.sol file.

Use the --constructor-args flag to pass arguments to the constructor:

// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;

import {ERC20} from "solmate/tokens/ERC20.sol";

contract MyToken is ERC20 {
constructor(
string memory name,
string memory symbol,
uint8 decimals,
uint256 initialSupply
) ERC20(name, symbol, decimals) {
_mint(msg.sender, initialSupply);
}
}

Build With Hardhat

Hardhat is a development environment for Ethereum software. It consists of different components for editing, compiling, debugging and deploying your smart contracts and dApps, all of which work together to create a complete development environment. For detailed information and further utilization of Hardhat, please refer to the official documentation.

Installation

Install Hardhat in your project by following the instructions on Hardhat's Installation Guide.

Creating a New Project

To create the sample project, run npx hardhat init in your project folder:

$ npx hardhat init
888 888 888 888 888
888 888 888 888 888
888 888 888 888 888
8888888888 8888b. 888d888 .d88888 88888b. 8888b. 888888
888 888 "88b 888P" d88" 888 888 "88b "88b 888
888 888 .d888888 888 888 888 888 888 .d888888 888
888 888 888 888 888 Y88b 888 888 888 888 888 Y88b.
888 888 "Y888888 888 "Y88888 888 888 "Y888888 "Y888

👷 Welcome to Hardhat v2.22.3 👷‍

? What do you want to do? …
❯ Create a JavaScript project
Create a TypeScript project
Create a TypeScript project (with Viem)
Create an empty hardhat.config.js
Quit

Compiling Your Contracts

Next, if you take a look in the contracts/ folder, you'll see Lock.sol:

// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.24;

// Uncomment this line to use console.log
// import "hardhat/console.sol";

contract Lock {
uint public unlockTime;
address payable public owner;

event Withdrawal(uint amount, uint when);

constructor(uint _unlockTime) payable {
require(
block.timestamp < _unlockTime,
"Unlock time should be in the future"
);

unlockTime = _unlockTime;
owner = payable(msg.sender);
}

function withdraw() public {
// Uncomment this line, and the import of "hardhat/console.sol", to print a log in your terminal
// console.log("Unlock time is %o and block timestamp is %o", unlockTime, block.timestamp);

require(block.timestamp >= unlockTime, "You can't withdraw yet");
require(msg.sender == owner, "You aren't the owner");

emit Withdrawal(address(this).balance, block.timestamp);

owner.transfer(address(this).balance);
}
}

To compile it, simply run:

npx hardhat compile

Testing Your Contracts

Your project comes with tests that use Mocha, Chai, Ethers.js and Hardhat Ignition.

If you take a look in the test/ folder, you'll see a test file.

You can run your tests with npx hardhat test.

Deploying Your Contracts

Next, to deploy the contract we will use a Hardhat Ignition module.

Before run the module, we have to update hardhat.config.js.

// hardhat.config.js
require("@nomicfoundation/hardhat-toolbox");
require("dotenv").config();

/** @type import('hardhat/config').HardhatUserConfig */
module.exports = {
solidity: "0.8.24",
networks: {
over: {
url: OVER_RPC_URL,
accounts: [process.env.PRIVATE_KEY],
},
},
};

And run the following command to deploy your contract:

$ npx hardhat ignition deploy ignition/modules/Lock.js --network over
Deploying [ LockModule ]

Batch #1
Executing LockModule#Lock...
Batch #1
Executed LockModule#Lock

[ LockModule ] successfully deployed 🚀

Deployed Addresses

LockModule#Lock - 0x194B734f7f...

Build With Remix

Remix IDE is an open-source web and desktop application for creating and deploying Smart Contracts. For comprehensive guidance and advanced features of Remix, please refer to the official documentation.

Using Remix with OverProtocol

Access

Open Remix IDE in your web browser to begin. You can access it at https://remix.ethereum.org.

Connect

  • Configure MetaMask (or its alternatives): Ensure MetaMask or a similar compatible browser extension is installed in your browser and configured for the OverProtocol network.
  • Connect to OverProtocol: In the "Deploy & Run Transactions" plugin within Remix, select "Injected Web3" to connect Remix with the OverProtocol node through MetaMask.

Load Contracts

  • Write or Import Contracts: You can either write new smart contracts directly in the Remix editor or import existing files into the Remix environment.

Compile

  • Compile Contracts: Use Remix's Solidity compiler to compile your contracts. Make sure to select the appropriate compiler version that matches your contract's pragma statement.

Deploy

  • Deploy Contracts: Once compiled, deploy your contracts to OverProtocol by clicking on the "Deploy" button. Ensure that the correct environment (OverProtocol) and account are selected.

By following these steps, you can efficiently develop, test, and deploy smart contracts on OverProtocol, leveraging the powerful features of Remix IDE to enhance your development workflow.

- + \ No newline at end of file diff --git a/developers/build-your-contract/developer-tools.html b/developers/build-your-contract/developer-tools.html index f893e03..aa472c5 100644 --- a/developers/build-your-contract/developer-tools.html +++ b/developers/build-your-contract/developer-tools.html @@ -4,13 +4,13 @@ Developer Tools | OverProtocol Docs - +

Developer Tools

This section offers an overview of the developer tools available for OverProtocol. Since OverProtocol is EVM-compatible, developers familiar with creating DApps on other EVM chains will find a seamless transition to building on OverProtocol.

Additionally, we are in the process of developing OverProtocol-specific, developer-friendly tools aimed at further lowering the entry barrier for application builders. Stay tuned for updates!

Smart Contract Programming Languages

Development Environments

Frontend Libraries

Wallets

- + \ No newline at end of file diff --git a/developers/client-apis.html b/developers/client-apis.html index 723dc09..12a572e 100644 --- a/developers/client-apis.html +++ b/developers/client-apis.html @@ -4,7 +4,7 @@ Client APIs | OverProtocol Docs - + @@ -31,7 +31,7 @@ See eth_getFilterChanges

Example

// Request
curl -X POST --data '{"jsonrpc":"2.0","method":"eth_getLogs","params":[{"topics":["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b"]}],"id":74}'

Result see eth_getFilterChanges

Usage Example

Deploying a contract using JSON_RPC

This section includes a demonstration of how to deploy a contract using only the RPC interface. There are alternative routes to deploying contracts where this complexity is abstracted away—for example, using libraries built on top of the RPC interface such as web3.js and web3.py. These abstractions are generally easier to understand and less error-prone, but it is still helpful to understand what is happening under the hood.

contract Multiply7 {
event Print(uint);
function multiply(uint input) returns (uint) {
Print(input * 7);
return input * 7;
}
}

The first thing to do is make sure the HTTP RPC interface is enabled. This means we supply Geth with the --http flag on startup. In this example we use the Geth node on a private development chain. Using this approach we don't need ether on the real network.

geth --http --dev console 2>>geth.log

This will start the HTTP RPC interface on http://RPC_URL.

We can verify that the interface is running by retrieving the Coinbase address and balance using curl. Please note that data in these examples will differ on your local node. If you want to try these commands, replace the request params in the second curl request with the result returned from the first.

curl --data '{"jsonrpc":"2.0","method":"eth_coinbase", "id":1}' -H "Content-Type: application/json" RPC_URL
{"id":1,"jsonrpc":"2.0","result":["0x9b1d35635cc34752ca54713bb99d38614f63c955"]}

curl --data '{"jsonrpc":"2.0","method":"eth_getBalance", "params": ["0x9b1d35635cc34752ca54713bb99d38614f63c955", "latest"], "id":2}' -H "Content-Type: application/json" RPC_URL
{"id":2,"jsonrpc":"2.0","result":"0x1639e49bba16280000"}

Because numbers are hex encoded, the balance is returned in wei as a hex string. If we want to have the balance in ether as a number we can use web3 from the Geth console.

web3.fromWei("0x1639e49bba16280000", "ether")
// "410"

Now that there is some ether on our private development chain, we can deploy the contract. The first step is to compile the Multiply7 contract to byte code that can be sent to the EVM. To install solc, the Solidity compiler, follow the Solidity documentation. (You might want to use an older solc release to match the version of compiler used for our example.)

The next step is to compile the Multiply7 contract to byte code that can be send to the EVM.

echo 'pragma solidity ^0.4.16; contract Multiply7 { event Print(uint); function multiply(uint input) public returns (uint) { Print(input * 7); return input * 7; } }' | solc --bin

======= <stdin>:Multiply7 =======
Binary:
6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029

Now that we have the compiled code we need to determine how much gas it costs to deploy it. The RPC interface has an eth_estimateGas method that will give us an estimate.

curl --data '{"jsonrpc":"2.0","method": "eth_estimateGas", "params": [{"from": "0x9b1d35635cc34752ca54713bb99d38614f63c955", "data": "0x6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029"}], "id": 5}' -H "Content-Type: application/json" RPC_URL
{"jsonrpc":"2.0","id":5,"result":"0x1c31e"}

And finally deploy the contract.

curl --data '{"jsonrpc":"2.0","method": "eth_sendTransaction", "params": [{"from": "0x9b1d35635cc34752ca54713bb99d38614f63c955", "gas": "0x1c31e", "data": "0x6060604052341561000f57600080fd5b60eb8061001d6000396000f300606060405260043610603f576000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063c6888fa1146044575b600080fd5b3415604e57600080fd5b606260048080359060200190919050506078565b6040518082815260200191505060405180910390f35b60007f24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da600783026040518082815260200191505060405180910390a16007820290509190505600a165627a7a7230582040383f19d9f65246752244189b02f56e8d0980ed44e7a56c0b200458caad20bb0029"}], "id": 6}' -H "Content-Type: application/json" RPC_URL
{"id":6,"jsonrpc":"2.0","result":"0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf"}

The transaction is accepted by the node and a transaction hash is returned. This hash can be used to track the transaction. The next step is to determine the address where our contract is deployed. Each executed transaction will create a receipt. This receipt contains various information about the transaction such as in which block the transaction was included and how much gas was used by the EVM. If a transaction creates a contract it will also contain the contract address. We can retrieve the receipt with the eth_getTransactionReceipt RPC method.

curl --data '{"jsonrpc":"2.0","method": "eth_getTransactionReceipt", "params": ["0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf"], "id": 7}' -H "Content-Type: application/json" RPC_URL
{"jsonrpc":"2.0","id":7,"result":{"blockHash":"0x77b1a4f6872b9066312de3744f60020cbd8102af68b1f6512a05b7619d527a4f","blockNumber":"0x1","contractAddress":"0x4d03d617d700cf81935d7f797f4e2ae719648262","cumulativeGasUsed":"0x1c31e","from":"0x9b1d35635cc34752ca54713bb99d38614f63c955","gasUsed":"0x1c31e","logs":[],"logsBloom":"0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000","status":"0x1","to":null,"transactionHash":"0xe1f3095770633ab2b18081658bad475439f6a08c902d0915903bafff06e6febf","transactionIndex":"0x0"}}

Our contract was created on 0x4d03d617d700cf81935d7f797f4e2ae719648262. A null result instead of a receipt means the transaction has not been included in a block yet. Wait for a moment and check if your miner is running and retry it.

Interacting with smart contracts

In this example we will be sending a transaction using eth_sendTransaction to the multiply method of the contract.

eth_sendTransaction requires several arguments, specifically from, to and data. From is the public address of our account, and to is the contract address. The data argument contains a payload that defines which method must be called and with which arguments. This is where the ABI (application binary interface) comes into play. The ABI is a JSON file that defines how to define and encode data for the EVM.

The bytes of the payload defines which method in the contract is called. This is the first 4 bytes from the Keccak hash over the function name and its argument types, hex encoded. The multiply function accepts an uint which is an alias for uint256. This leaves us with:

web3.sha3("multiply(uint256)").substring(0, 10)
// "0xc6888fa1"

The next step is to encode the arguments. There is only one uint256, say, the value 6. The ABI has a section which specifies how to encode uint256 types.

int<M>: enc(X) is the big-endian two’s complement encoding of X, padded on the higher-order (left) side with 0xff for negative X and with zero > bytes for positive X such that the length is a multiple of 32 bytes.

This encodes to 0000000000000000000000000000000000000000000000000000000000000006.

Combining the function selector and the encoded argument our data will be 0xc6888fa10000000000000000000000000000000000000000000000000000000000000006.

This can now be sent to the node:

curl --data '{"jsonrpc":"2.0","method": "eth_sendTransaction", "params": [{"from": "0xeb85a5557e5bdc18ee1934a89d8bb402398ee26a", "to": "0x6ff93b4b46b41c0c3c9baee01c255d3b4675963d", "data": "0xc6888fa10000000000000000000000000000000000000000000000000000000000000006"}], "id": 8}' -H "Content-Type: application/json" RPC_URL
{"id":8,"jsonrpc":"2.0","result":"0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74"}

Since a transaction was sent, a transaction hash was returned. Retrieving the receipt gives:

{
blockHash: "0xbf0a347307b8c63dd8c1d3d7cbdc0b463e6e7c9bf0a35be40393588242f01d55",
blockNumber: 268,
contractAddress: null,
cumulativeGasUsed: 22631,
gasUsed: 22631,
logs: [{
address: "0x6ff93b4b46b41c0c3c9baee01c255d3b4675963d",
blockHash: "0xbf0a347307b8c63dd8c1d3d7cbdc0b463e6e7c9bf0a35be40393588242f01d55",
blockNumber: 268,
data: "0x000000000000000000000000000000000000000000000000000000000000002a",
logIndex: 0,
topics: ["0x24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da"],
transactionHash: "0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74",
transactionIndex: 0
}],
transactionHash: "0x759cf065cbc22e9d779748dc53763854e5376eea07409e590c990eafc0869d74",
transactionIndex: 0
}

The receipt contains a log. This log was generated by the EVM on transaction execution and included in the receipt. The multiply function shows that the Print event was raised with the input times 7. Since the argument for the Print event was a uint256 we can decode it according to the ABI rules which will leave us with the expected decimal 42. Apart from the data it is worth noting that topics can be used to determine which event created the log:

web3.sha3("Print(uint256)")
// "24abdb5865df5079dcc5ac590ff6f01d5c16edbc5fab4e195d9febd1114503da"

This was just a brief introduction into some of the most common tasks, demonstrating direct usage of the JSON-RPC.

- + \ No newline at end of file diff --git a/developers/deployed-contracts.html b/developers/deployed-contracts.html index e802705..b75b18c 100644 --- a/developers/deployed-contracts.html +++ b/developers/deployed-contracts.html @@ -4,13 +4,13 @@ Deployed Contracts | OverProtocol Docs - +

Deployed Contracts

To avoid unnecessary redeployment and to streamline your development process, we highly recommend utilizing our already deployed and verified contracts. This approach not only saves time and resources but also ensures that you are integrating with trusted and stable contract implementations.

Here, you can access comprehensive information for each contract with the explorer link. Using these verified contracts allows you to quickly integrate and interact with established functionalities on the network.

- + \ No newline at end of file diff --git a/developers/differences-from-ethereum.html b/developers/differences-from-ethereum.html index fabccd5..bcfd912 100644 --- a/developers/differences-from-ethereum.html +++ b/developers/differences-from-ethereum.html @@ -4,13 +4,13 @@ Differences from Ethereum | OverProtocol Docs - +

Differences from Ethereum

OverProtocol is an independent Layer 1 protocol that inherits the Ethereum Virtual Machine (EVM), ensuring compatibility with Ethereum's established ecosystem. This compatibility enables developers familiar with Ethereum to transition smoothly and leverage their existing skills. However, there are key distinctions between OverProtocol and Ethereum that developers must understand, as these differences can significantly impact how applications are built and function on this platform. Here are the crucial aspects to consider and the actions to take:

Contracts Getting Swiped

OverProtocol currently doesn’t support restoration of contract accounts. This means that once a contract address becomes inactive, all the OVER the contract was holding is permanently lost, so please take care when deploying and managing contracts.

One important case to note is prefunded contracts going inactive. If a prefunded contract (a contract address that has received OVER before the actual contract is deployed) is not used for more than 2 epochs and goes inactive, it is still restorable while the contract is not deployed since it is technically the same as an EOA. However, it will not be restorable once the contract is deployed, so make sure to restore the address before deploying the contract if there is any chance of it being prefunded.

You Can't Use the Same Contract Address in Ethereum

tip

While the same Externally Owned Account (EOA) address can be used across various EVM-compatible chains with the same private key, this does not apply to contract addresses.

Due to the state expiry feature in OverProtocol, even if the feature is currently disabled, to mitigate the risk of an expired contract address being reused by a newly created contract, the contract creation operation always incorporates the caller account's epochCoverage value. This inclusion alters the outcome of the CREATE operation, making the resulting addresses differ from those on other EVM chains.

As a result, even though the CREATE2 operation allows for deterministic address prediction and usage, it is not possible to reuse the same address across different chains as you would with EOA addresses.

Actions to Take

  • Be aware that contract addresses on OverProtocol will differ from those on Ethereum and other EVM-compatible chains due to the inclusion of the epochCoverage value.
  • When deploying contracts, account for the different address derivation method and plan your deployment strategy accordingly.
// Create creates a new contract using code as deployment code.
func (evm *EVM) Create(caller ContractRef, code []byte, gas uint64, value *big.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
contractAddr = crypto.CreateAddress(caller.Address(), evm.StateDB.GetTxNonce(caller.Address())) // GetTxNonce = epochCoverage || Nonce
return evm.create(caller, &codeAndHash{code: code}, gas, value, contractAddr, CREATE)
}

// Create2 creates a new contract using code as deployment code.
//
// The different between Create2 with Create is Create2 uses keccak256(0xff ++ msg.sender ++ salt ++ keccak256(init_code))[12:]
// instead of the usual sender-and-nonce-hash as the address where the contract is initialized at.
func (evm *EVM) Create2(caller ContractRef, code []byte, gas uint64, endowment *big.Int, salt *uint256.Int) (ret []byte, contractAddr common.Address, leftOverGas uint64, err error) {
codeAndHash := &codeAndHash{code: code}
contractAddr = crypto.CreateAddress2(caller.Address(), salt.Bytes32(), codeAndHash.Hash().Bytes())
return evm.create(caller, codeAndHash, gas, endowment, contractAddr, CREATE2)
}

Transaction has a epochCoverage Field

In traditional blockchain architectures, the nonce primarily tracks the number of transactions sent from a given account, ensuring transaction order and preventing double-spending. However, due to the expiration feature in OverProtocol, distinguishing explicitly between expired accounts and newly created accounts becomes challenging, raising the possibility of nonce overlap. To address this issue, OverProtocol introduces the epochCoverage as a crucial component.

epochCoverage

The combination of the nonce and the epochCoverage value ensures uniqueness for each account. This system allows OverProtocol to maintain the integrity and distinction of account states over time, even through periods of account inactivity and expiration.

For a more detailed explanation, please refer to the documentation.

nonce Field in Transaction

The existing nonce field is split into a 64-bit field, with the first 32 bits representing the epochCoverage and the remaining 32 bits functioning as the traditional nonce. This adaptation allows developers to leverage existing Ethereum development environments while accommodating the unique features of OverProtocol.

Actions to Take

  • Learn how epochCoverage functions and its interaction with the nonce to ensure each account's uniqueness.
  • Use RPC requests like eth_getTransactionCount when making transactions. The response will include the correct nonce value, considering both nonce and epochCoverage.

Misc

SELFDESTRUCT Operation

The SELFDESTRUCT opcode, updated in accordance with EIP-6780, is implemented in such a way that while it does not actually destroy the contract account, it does process refunds. Contracts that are not used will naturally expire over time as the Ethanos epoch progresses.

The rationale behind incorporating EIP-6780 into OverProtocol differs significantly from its application in Ethereum. OverProtocol's implementation is specifically designed to avoid scenarios where a self-destructed contract account becomes indistinguishable from an Externally Owned Account (EOA). This distinction is crucial for maintaining clarity and integrity in the network's account management, ensuring that the lifecycle of contract accounts is handled in a better way.

- + \ No newline at end of file diff --git a/developers/how-can-i-restore-my-account.html b/developers/how-can-i-restore-my-account.html index da83251..83af637 100644 --- a/developers/how-can-i-restore-my-account.html +++ b/developers/how-can-i-restore-my-account.html @@ -4,13 +4,13 @@ How can I restore my Account? | OverProtocol Docs - +

How can I restore my Account?

To restore your account which was swept away to the nether layer, you need to send a restore data to the restoration client.

Restore Data

A valid restore data should include the following fields.

  • ChainID - To prevent attacks across different networks
  • Target - Address of the target account to restore
  • SourceEpoch - The current epochCoverage of the target account. This field should be set to the default epochCoverage which is current epoch -1 if the account is not currently existent.
  • TargetEpoch - Target epoch to restore the account.
  • Fee - Fee to pay the fee recipient.
  • FeeRecipient - Account the pay the restore fee, typically the owner account of the restoration client

In order to construct your restore data, you should retrieve the proper Fee and FeeRecipient by sending a http request to the restoration client you will use. Typically the restoration client will provide this through http request named something like minimumFee for fee information and feeRecipient for fee recipient. Your request will look something like this.

curl -X GET "http://hostAddress:hostPort/minimumFee"
curl -X GET "http://hostAddress:hostPort/feeRecipient"

Now before sending this restore data, you have to sign it. There are multiple reasons for this procedure, but the most important reason is so the restoration client can’t manipulate the restoration fee. You can sign the restore data by using the SignRestoreData function in the types package. Any account including the owner expired account can be used to sign the restore data. Note that the one who signs the restore data, which is the owner of the private key used to signed the restore data, will be the one who pays the restoration fee.

After making a valid restore data and signing it, you can send the restore data through requestRestoration http post method. The request should look something like this.

curl -H 'Content-Type: application/json' \
-X POST "http://hostAddress:hostPort/requestRestoration" \
--data '{"chainId": "0x84442",
"target": "0x1923f626bb8dc025849e00f99c25fe2b2f7fb0db",
"sourceEpoch": "0x10",
"targetEpoch": "0x5",
"fee": "0x100",
"feeRecipient": "0x07a565b7ed7d7a678680a4c162885bedbb695fe0",
"v": "0x26",
"r": "0x223a7c9bcf5531c99be5ea7082183816eb20cfe0bbc322e97cc5c7f71ab8b20e",
"s": "0x2aadee6b34b45bb15bc42d9c09de4a6754e7000908da72d48cc7704971491663"
}'

After the restoration client validate the restore data and check if the fee is profitable, it will send the restore create the corresponding restoration proof and send a restoration transaction to restore your account.

We are currently working on a more user friendly interface for signing and sending restore data. Until then, please use the solution above.

- + \ No newline at end of file diff --git a/index.html b/index.html index 95061b9..2a22fc1 100644 --- a/index.html +++ b/index.html @@ -4,13 +4,13 @@ Welcome to OverProtocol 🌐 | OverProtocol Docs - +

Welcome to OverProtocol 🌐

Break Barriers, Join the Cycle, Thrive Together

Blockchain thrives on a virtuous cycle: participation drives growth, and growth fosters participation. OverProtocol removes the technical and economic barriers, creating a network where you can actively participate and contribute. Be part of the network’s growth cycle—your journey starts here.


How Does OverProtocol Work?

🌍 Inclusive Ecosystem

Whether you’re a developer, a student, or a business owner, OverProtocol makes blockchain participation simple and accessible for everyone. No need for expensive infrastructure—just everyday resources.

⚙️ Advanced Features

  • Lightweight Nodes: Operate seamlessly on low-spec devices with OverProtocol’s Ethanos-based data hierarchy, lowering technical barriers and keeping the network accessible for all.
  • Inclusive Consensus: OverProtocol’s Proof-of-Stake (PoS) mechanism is designed to welcome all participants, not just those with large stakes.
  • Decentralized Governance: Empower the community through a transparent DAO Treasury, where participants vote on resource allocation and shape the network’s future.
  • Seamless Compatibility: Easily migrate Ethereum-based apps with full EVM compatibility, supporting standards like ERC-20 and ERC-721.

🔗 Grow with the Network

Every new participant strengthens the OverProtocol ecosystem. Join a network that grows with you, unlocking endless opportunities for collaboration and innovation.


Start Your Journey Today

Still exploring? Here’s where you can dive deeper:

Let’s build the future of blockchain—together! 🚀

- + \ No newline at end of file diff --git a/learn/consensus/overview.html b/learn/consensus/overview.html index f36cf46..56544c7 100644 --- a/learn/consensus/overview.html +++ b/learn/consensus/overview.html @@ -4,13 +4,13 @@ Over PoS Overview | OverProtocol Docs - +

Over PoS Overview

OverProtocol utilizes a Proof of Stake (PoS) consensus mechanism, powered by the Ethereum's Gasper Protocol to achieve a blockchain network that is scalable, inclusive, and sustainable. This approach aligns with OverProtocol’s core design principles, ensuring accessibility for all participants while maintaining network security and efficiency.


Inclusivity Through Accessibility

By replacing energy-intensive mining with staking, OverProtocol lowers the barriers to participation. Small-scale participants can join the network as validators without requiring expensive hardware or large capital investments.

  • Low Entry Threshold: Over PoS allows validators to contribute with minimal staking requirements.
  • Dynamic Adaptability: Over PoS’s staking model adjusts seamlessly to a growing validator base, supporting sustainable network expansion.

Enhanced Security and Robust Decentralization

Over PoS strengthens network security while fostering decentralization, ensuring that all participants, regardless of their scale, contribute to a resilient and trustworthy blockchain ecosystem.

  • Validator Incentive Alignment: Over PoS economically motivates validators to act in the network’s best interest, ensuring consistent and secure operations.
  • Robust Decentralization: By encouraging a diverse validator base, Over PoS minimizes reliance on centralized entities and supports true decentralization.

Sustainability and Environmental Responsibility

PoS eliminates the excessive energy consumption associated with Proof of Work (PoW), aligning OverProtocol with sustainable blockchain practices.

  • Eco-Friendly Operations: Over PoS reduces the environmental footprint by requiring only minimal computational power.
  • Optimized Resource Use: Over PoS ensures that network operations remain lightweight and efficient, even with increasing activity.

Looking Forward

As OverProtocol evolves, Over PoS will continue to play a central role in supporting its mission to build a blockchain that is not only scalable but also inclusive and environmentally responsible.

Learn More About OverProtocol’s Validator Requirements →

- + \ No newline at end of file diff --git a/learn/consensus/requirements.html b/learn/consensus/requirements.html index 2dbc057..908af43 100644 --- a/learn/consensus/requirements.html +++ b/learn/consensus/requirements.html @@ -4,13 +4,13 @@ Validator Requirements | OverProtocol Docs - +

Validator Requirements

Becoming a validator in OverProtocol requires participants to meet certain criteria to ensure the network’s security, efficiency, and decentralization. These requirements are designed to encourage responsible participation while maintaining OverProtocol's accessibility and sustainability.


Key Requirements

1. Minimum Stake: 256 OVER

To join as a validator, participants must stake at least 256 OVER, OverProtocol’s native token. This minimum staking requirement serves as a security deposit to deter malicious behavior and ensure validators have a vested interest in the network's stability and success.

  • Security Deposit: The staked OVER acts as collateral, which can be penalized if a validator acts maliciously or fails to meet network obligations. Misconduct, such as double-signing or prolonged inactivity, can lead to penalties, reducing the staked amount.
  • Economic Alignment: By staking OVER, validators demonstrate their commitment to the network, ensuring they are financially incentivized to act honestly and maintain network integrity.

Learn more about penalties and rewards →


2. Maintain High Uptime

Validators are required to maintain maximum possible uptime to support network reliability. Consistent availability ensures seamless block validation and enhances overall network performance.

  • Why It Matters: High uptime minimizes disruptions and secures validators’ rewards.
  • Best Practices: Ensure robust hardware and reliable network connectivity to avoid penalties for downtime.

Looking Ahead

OverProtocol’s validator requirements are designed to balance accessibility with responsibility, ensuring that participants of all scales can contribute to a secure and efficient network. By meeting these criteria, validators not only help maintain the integrity of OverProtocol but also earn rewards for their active participation.

- + \ No newline at end of file diff --git a/learn/consensus/rewards-and-penalties.html b/learn/consensus/rewards-and-penalties.html index cf3a494..f9d4df1 100644 --- a/learn/consensus/rewards-and-penalties.html +++ b/learn/consensus/rewards-and-penalties.html @@ -4,13 +4,13 @@ Rewards and Penalties | OverProtocol Docs - +

Rewards and Penalties

OverProtocol’s Proof of Stake (PoS) consensus mechanism employs a well-defined system of rewards and penalties to encourage responsible participation and ensure the network remains secure, efficient, and decentralized.


Rewards: Encouraging Active Participation

Validators in OverProtocol earn rewards for contributing to the network's security and efficiency. Rewards are distributed at the end of each epoch based on a predefined weighting system that incentivizes key contributions to the consensus process.


Reward Components and Weights

Rewards are divided into the following components, each with a specific weight that reflects its importance in the consensus process:

  1. Timely Source Weight (18.75%)

    • Rewards validators for successfully voting to finalize source checkpoints.
    • Source Voting ensures that a blockchain state is finalized, making it immutable and secure.
    • This component strengthens the network by locking in key states, reducing the risk of forks.
  2. Timely Target Weight (37.5%)

    • The largest share of rewards, given for timely and accurate voting to justify target checkpoints.
    • Target Voting prepares a checkpoint for finalization in the next round, ensuring consistency and alignment among validators.
    • By prioritizing justified states, the network achieves seamless progression toward finalization.
  3. Timely Head Weight (18.75%)

    • Rewards validators for identifying and confirming the correct head of the blockchain.
    • This encourages validators to maintain up-to-date and accurate views of the blockchain’s state.
    • Ensures continuous block proposal and validation accuracy.
  4. Proposer Weight (12.5%)

    • Rewards are allocated to block proposers for generating valid and timely blocks.
    • This recognizes the critical role of proposers in driving transaction throughput and network efficiency.
  5. Light Layer Weight (12.5%)

    • OverProtocol introduces the concept of a Light Layer, a flexible framework that supports innovative participation across the network.
    • The Light Layer allows both validators and non-validators to contribute to the network's growth in diverse ways.
    • Designed to accommodate new roles in the future, the Light Layer ensures scalability and adaptability, encouraging inclusive and innovative participation.

Summary of Reward Weights

ComponentWeight (%)Purpose
Timely Source18.75Finalize checkpoints, locking states as immutable.
Timely Target37.5Justify checkpoints for future finalization.
Timely Head18.75Confirm the blockchain's correct head state.
Proposer12.5Generate and propose valid blocks.
Light Layer12.5Foster diverse and innovative network participation.

By aligning rewards with these components, OverProtocol ensures validators focus on maintaining network security, consistency, and accessibility. Validators and participants leveraging the Light Layer framework maximize their contributions while fostering an inclusive and adaptable ecosystem.


Penalties: Ensuring Network Integrity

OverProtocol enforces a robust penalty system throughout the validator lifecycle to deter malicious or negligent behavior and maintain the network’s security and reliability. Validators are held accountable during their active participation and through the exit process to ensure seamless network operations.


Types of Penalties

  1. Inactivity Penalty

    • Validators accumulate an Inactivity Score if they fail to perform their duties, such as missing votes or attestations.
    • Once the score exceeds a certain threshold, penalties are applied incrementally to the validator's staked funds.
    • Purpose: To encourage active participation and ensure consistent network performance.
  2. Slashing

    • Immediate 10% stake reduction occurs in any of the following scenarios:

      1. Making two differing attestations for the same target checkpoint.
      2. Submitting an attestation whose source and target votes surround those in another attestation from the same validator.
      3. Proposing more than one distinct block at the same height or attesting to different head blocks with the same source and target checkpoints.
    • Purpose: To punish malicious actions that compromise network security or consensus integrity.


Validator Lifecycle and Penalty Application

A validator in OverProtocol goes through the following stages during its lifecycle. Penalties are applied during specific phases to ensure validators uphold their responsibilities:

  1. Pending Activation

    • After submitting a request to activate, the validator enters the pending state.
    • The validator must wait until the next activation window to begin duties. No penalties apply in this stage.
  2. Active

    • Once activated, the validator participates in attestation, block proposals, and other duties.
    • Penalties Applied:
      • Inactivity Penalty: For failing to perform assigned duties (e.g., missing attestations).
      • Slashing: For malicious actions, such as double-signing or surrounding votes.
  3. Exiting

    • Validators that voluntarily request exit or are forced to exit due to penalties enter the exiting state.
    • This transition is not immediate; the validator continues performing duties until the exit becomes effective in the next epoch.
    • Penalties Apply: Validators in the exiting state are still subject to penalties, including Inactivity Penalties and Slashing, if they fail to fulfill their remaining responsibilities during this period.
  4. Exited

    • Once the exit is finalized, the validator no longer participates in consensus and is free to withdraw their remaining stake.
    • No penalties apply after the validator has fully exited the system.

Special Cases

Bailout

  • Validators whose penalties exceed 2% of their original stake are automatically exited from the network.
  • Purpose: To quickly remove problematic validators and prevent excessive losses to their stake while maintaining network health.

Inactivity Leak

  • Triggered when the chain fails to finalize for 4 consecutive epochs, indicating that more than one-third of the validators are experiencing issues.
  • A forced recovery protocol is initiated to restore the liveness of the validator set:
    • Validators with high Inactivity Scores are automatically exited.
    • This mechanism ensures the remaining validators can stabilize the network and resume normal operations.
  • Purpose: To address severe liveness issues and protect the network from prolonged downtime.

Why Penalties Matter

OverProtocol’s penalty system ensures:

  • Network Security: Malicious actions are swiftly penalized, reducing threats to consensus integrity.
  • Validator Accountability: Validators are incentivized to perform their duties diligently.
  • Stability and Liveness: Special cases like Bailout and Inactivity Leak prevent prolonged disruptions, ensuring a robust and reliable network.

Balancing Rewards and Penalties

OverProtocol’s system ensures a balanced approach:

  1. Fair Incentives: Validators are rewarded generously for honest and consistent behavior.
  2. Proportional Penalties: Misbehavior is penalized in proportion to its impact on the network.
  3. Transparency: All rewards and penalties are governed by clear, pre-defined rules to foster trust and predictability.
- + \ No newline at end of file diff --git a/learn/consensus/validator-cycle.html b/learn/consensus/validator-cycle.html index fae44ad..d5bad66 100644 --- a/learn/consensus/validator-cycle.html +++ b/learn/consensus/validator-cycle.html @@ -4,13 +4,13 @@ Validator Cycle | OverProtocol Docs - +

Validator Cycle

Validators in OverProtocol progress through a well-defined lifecycle, ensuring the network operates securely and efficiently. Each stage of the cycle plays a critical role in maintaining network stability, accountability, and scalability.


Stages of the Validator Cycle

1. Pending Activation

  • Description: Validators who have submitted their activation request and staked the required amount of OVER enter the pending state.
  • Key Details:
    • Validators wait for the next activation epoch to join the active set.
    • No duties or penalties are applied during this stage.
  • Purpose: Ensures orderly onboarding of new validators.

2. Active

  • Description: Once activated, validators begin participating in the network by performing duties such as attestation and block proposals.
  • Key Details:
    • Validators are rewarded based on their performance in attestation and block validation.
    • Penalties, such as inactivity penalties or slashing, apply for failing to meet responsibilities or for malicious actions.
  • Purpose: Maintains network security and liveness.

3. Exiting

  • Description: Validators who request to exit or are forced to exit due to penalties enter the exiting state.
  • Key Details:
    • Validators continue their duties until the exit becomes effective in the next epoch.
    • Penalties still apply during this period, encouraging responsible behavior until the end.
    • The Bailout mechanism protects validators with excessive penalties by expediting their exit.
  • Purpose: Provides a controlled and accountable transition out of the active set.

4. Exited

  • Description: Once the exit is finalized, validators leave the active set and can no longer participate in network operations.
  • Key Details:
    • Exited validators retain their stake but are no longer subject to penalties or rewards.
    • They must wait for the withdrawal epoch to reclaim their remaining funds.
  • Purpose: Marks the validator as inactive while preserving their remaining assets.

5. Withdrawable

  • Description: Validators in the exited state become withdrawable once the withdrawal epoch is reached.
  • Key Details:
    • The staked funds (minus any penalties) are available for withdrawal.
    • Validators no longer have any ties or responsibilities to the network.
  • Purpose: Completes the validator’s lifecycle by returning their remaining stake.

6. Slashed

  • Description: Validators who commit severe misbehavior, such as double-signing or submitting conflicting attestations, are slashed and removed from the active set.
  • Key Details:
    • A 10% immediate reduction in staked funds is applied.
    • Slashed validators are automatically exited and must wait to withdraw their remaining stake after the long withdrawal epoch(= 8192 epochs).
    • This state is permanent, and slashed validators cannot rejoin the network.
  • Purpose: Protects the network from malicious actors by enforcing severe penalties.

Validator State Transitions

StateNext StateCondition for Transition
Pending ActivationActiveValidator is included in the next activation epoch.
ActiveExitingValidator requests exit or is forced to exit due to penalties.
ExitingExitedExit becomes effective in the next epoch.
ExitedWithdrawableWithdrawal epoch is reached.
WithdrawableN/AValidator withdraws remaining funds.
Active/ExitingSlashedValidator engages in severe misbehavior (e.g., double-signing).
- + \ No newline at end of file diff --git a/learn/design-principles.html b/learn/design-principles.html index 20ed274..40b2cc2 100644 --- a/learn/design-principles.html +++ b/learn/design-principles.html @@ -4,7 +4,7 @@ Design Principles of OverProtocol | OverProtocol Docs - + @@ -15,7 +15,7 @@ OverProtocol focuses on efficient architecture to maintain stability and prevent rising costs.

  • Designed to grow without becoming resource-intensive.
  • Ensures a thriving ecosystem for years to come.

🛠 Simplify Network Access

"Blockchain should be easy to use."
Intuitive interfaces and streamlined processes make OverProtocol accessible for both developers and users.

  • User-friendly design for seamless interactions.
  • Simplified tools for developers to build applications.

🔍 Foster Clarity and Trust

"Transparency builds trust."
OverProtocol prioritizes integrity and ensures that all participants understand how the network operates.

  • Transparent processes encourage engagement.
  • Clear communication fosters trust and collaboration.

Why These Principles Matter

These design principles are more than just guidelines—they shape OverProtocol into a network for real people:

  • A small business can integrate blockchain without high costs.
  • A student with a basic laptop can run a node.
  • Developers can build applications without expensive infrastructure.

By embracing these principles, OverProtocol achieves its mission of making blockchain truly inclusive, accessible, and sustainable.


What's Next?

Explore how OverProtocol's core mechanisms bring these principles to life by implementing innovative technologies like lightweight nodes and advanced consensus models.

- + \ No newline at end of file diff --git a/learn/layered-architecture/ethanos.html b/learn/layered-architecture/ethanos.html index e49431a..8b484c0 100644 --- a/learn/layered-architecture/ethanos.html +++ b/learn/layered-architecture/ethanos.html @@ -4,13 +4,13 @@ Ethanos | OverProtocol Docs - +

Ethanos

info

The Ethanos Algorithm has been designed and implemented to optimize blockchain data management, ensuring sustainability and scalability. However, Ethanos introduces additional overhead when the state size is relatively small. To maximize efficiency, Ethanos will remain disabled until the state size within OverProtocol grows to a sufficient level.

In the meantime, the OverProtocol team is diligently working on further enhancing Ethanos alongside other core mechanisms to lighten the protocol. These ongoing efforts focus on improving contract storage efficiency and refining the algorithm's performance, ensuring it evolves seamlessly to meet the network's growing demands.

Ethanos is an effective mechanism for managing blockchain's state and history. It periodically resets the state, expiring old data and referencing previous cycles to manage a bounded state size. This approach lowers entry barriers, promotes decentralization, and fosters an inclusive blockchain system.

What is the Problem?

It is essential to address the ever-increasing data size issue in blockchain systems. The account-based blockchain system, which records the global state of accounts and balances separately from transactions, offers a simpler and more intuitive framework for developing smart contracts. These tiny Turing-complete programs execute specific tasks using account states when triggered by transactions, and their integrity is verified by every node in the blockchain.

Typically, as time progresses, the number of accounts and transactions in any blockchain grows, leading to an infinite increase in state and history data. This growth in data size results in higher memory usage, more disk operations, and significant performance burdens. Consequently, this also creates substantial barriers for new participants attempting to synchronize and engage with the blockchain system.

How Ethanos Works

Differentiating States

Ethanos segments the state into three tiers: active, staged, and inactive. Both active and staged states are maintained within the Over Layer, while inactive states are transferred to the Nether Layer.

Ethanos manages its operations through what are called sweep epochs, which are defined time cycles in the system, each composed of several blocks. At the start of each sweep epoch, Ethanos constructs a new empty state trie for the active states. It also references the entire set of the states from the previous epoch's last block, known as the "superblock", now designated as staged states for the current epoch. Both of these states are housed in the Over Layer.

During each transaction within an epoch, the current state trie is updated as follows: If a transaction involves a specific account, the system first checks the current epoch’s state trie. If the account is not found there, it then searches the previous epoch’s state trie. If located, the account details are seamlessly transferred to the current state trie. If the account is absent from both state tries, it indicates that the account has either been inactive in past epochs or is a completely new account. In both scenarios, Ethanos treats these as new accounts.

If an account from the last epoch’s trie is not involved in any transaction during the current epoch, it is then classified as inactive in the subsequent epoch and considered expired. These accounts enter a dormant status and are categorized as dormant accounts within the Nether Layer. However, being expired does not mean the account is permanently lost; a dormant account can be reactivated through restoration from the Nether Layer.

From the account's perspective, each interaction or read operation restores two life points. As each sweep epoch passes, all accounts lose one life point. If an account goes through two consecutive sweep epochs without any life point recovery, it can no longer reside in the Over Layer.

Distinguishing History

Ethanos employs the weak subjectivity point to purge data corresponding to the block body. This approach is straightforward but requires a mechanism to ensure the availability of the purged history. This aspect is still under research and development, with plans to leverage a light layer to facilitate this process.

Restoration Process

To restore a dormant account, proof is required of the last epoch in which the account was active. This is crucial to prevent attempts to recover an account to a state before assets were transferred out in subsequent epochs. The trie structure in which the state is stored can efficiently prove whether a specific account was present within a state, as long as there is a valid root value, using a Merkle proof.

Restoration involves providing both an existence proof for the state of the last active epoch's superblock and non-existence proofs for the epochs during which the account was inactive. Combining these proofs allows for the restoration of the account's state to its condition in the current epoch. This process ensures that restoration is both secure and accurate, preventing unauthorized manipulations of account histories.

Dealing with Crumb Accounts; Restored Epoch

As mentioned, Ethanos does not differentiate between expired accounts and accounts that never existed. In the current epoch, if an empty account receives funds and its value is initialized, the holder of the account's private key can begin to send transactions and engage in activities using this account. An account that was previously expired but has been reinitialized and put back into use is referred to as a "crumb account." The existence of crumb accounts adds complexity to the restoration process.

While we could have eliminated crumb accounts by requiring restoration to go back to the genesis epoch before activating any account, we chose not to adopt this approach for UX reasons. One significant issue with crumb accounts is that they undermine the purpose of the nonce, which exists to record the number of transactions an account has made, thereby preventing any transaction from being executed multiple times.

If nonces are reset to zero every time an account is initialized in each epoch, it could allow for the reuse of previously utilized nonces in transactions involving crumb accounts. This situation would make the network vulnerable to specific types of replay attacks. To mitigate such risks while maintaining the efficiency of the restoration process and the simplicity of nonce values, we decided to add a field called "restored epoch" to each account.

The "restored epoch" value for an account created in a specific epoch is set to max(0, current epoch number - 1). This signifies that the account did not exist in the state of one epoch prior, relative to the current epoch. The "restored epoch" value remains constant as long as the account remains active. For example, if an account is initialized with a "restored epoch" value of 2 in Epoch 3 and continues to be active until Epoch 9, the "restored epoch" value would still be 2. This constant value helps in tracking the initial restoration point of an account throughout its active period, providing a clear reference for any processes or checks that rely on the historical status of the account.

Restoration Process with Restored Epoch

The restoration process unfolds as follows: For the account to be restored, verification starts with a non-existence proof for the state of two epochs prior to the current one and proceeds in sequence until the last active state where an existence proof is available. A key consideration here is that the account being activated could be a crumb account. For such crumb accounts, there is no need to verify beyond the restored epoch value. Instead, proofs should be sequentially submitted up to the restored epoch minus one.

The restoration completes with the merging of the results after proofs are verified. For balances, this involves performing a sum operation, and similarly for nonces. The Restored Epoch value is determined by taking the minimum value, which indicates that the account's balance and nonce have been verified up to that particular epoch. For instance, accounts with a restored epoch value of 0 at any point signify that their balance and nonce have been consistently preserved from the genesis to the present.

Restoration can occur in parts. For example, an account that became active in epoch 6 does not necessarily need to be restored back to epoch 0. It can continue to operate with a restored epoch value of 5, thereby simplifying the restoration process and reducing unnecessary computational effort.

Specification

Sweep

A configuration known as SWEEP_EPOCH has been introduced to determine the frequency at which inactive accounts are expired. SWEEP_EPOCH defines the interval for performing sweeps, with sweeps occurring every epoch as designated by this setting.

The state trie captures the activities of each account in every epoch. At each superblock, the final block of the epoch, the current state trie is frozen and a new empty state trie is created. This frozen trie is referred to as a checkpoint trie.

In the following epochs, whenever an account's state needs updating and the account is not found in the current trie, a process is initiated to retrieve the account information from the previous checkpoint trie and integrate it into the current trie. If the account is already present in the current trie, the update is performed immediately.

Restored Epoch

When an account expires, its state values are reset to empty.

A restored_epoch field is added to each account to record the epoch during which it was last restored. This field is crucial for determining if an account has undergone restoration previously. The initial value for restored_epoch is set to max(0, current epoch number - 1).

The restored_epoch serves a function similar to that of the nonce during the restoration process by making it possible to selectively determine the point of restoration. This significantly reduces the complexity of verification as it eliminates the need to validate the state starting from the genesis block.

Furthermore, restored_epoch plays a vital role in contract creation. It helps ensure the uniqueness of contract addresses by preventing the regeneration of an address that has been previously used. This feature maintains the integrity and uniqueness of contract deployments on the blockchain.

Restoration Data

The format for restoration data is crucial for facilitating the recovery of accounts. The required data fields for initiating a recovery transaction include:

[chain_id, expire_epoch, target, target_epoch, fee, fee_recipient, signature_y_parity, signature_r, signature_s]

  • chain_id: Identifies the specific blockchain network where the recovery transaction will occur.
  • expire_epoch: Specifies the epoch limit for which this recovery data is valid.
  • target: The account address to be recovered.
  • target_epoch: The earliest epoch from which the account's data needs to be restored.
  • fee: The fee to be paid for the recovery to fee_recipient.
  • fee_recipient: The address designated to receive the fee for facilitating the recovery.
  • signature_y_parity, signature_r, signature_s: Components of the signature that authenticate the recovery request. This signature must be generated by the account responsible for paying the fee.

This data structure incentivizes data providers to provide the necessary historical data for recovery while also allowing an alternative account to cover the recovery fee. This mechanism ensures that recovery transactions are both secure and financially supported.

Restoration Transaction

For efficient recovery mechanisms within blockchain systems, it is essential to integrate recovery data within transactional frameworks. To facilitate this, we propose a new transaction type under EIP-2718 specifically designed for account restoration.

This new type extends the existing structure of EIP-1559 transactions by adding a restore_data field. The complete field structure is as follows:

[chain_id, nonce, max_priority_fee_per_gas, max_fee_per_gas, gas_limit, destination, amount, data, access_list, restore_data, signature_y_parity, signature_r, signature_s]

Restoration Process(Pseudocode)

Restoration process is done by following steps:

  1. Collect the account's state proofs for each required epoch
  2. Construct and send a restoration transaction with the collected proofs
  3. Upon receiving the restoration transaction:
    • For each proofs of epoch:
      1. Verify the proof
      2. Get the state of the epoch.
      3. Apply the state
def restore_account(account, proofs):
restored_epoch = account.restored_epoch

for proof in proofs:
root_hash = get_last_checkpoint_block_by_epoch(restored_epoch).state_root

if is_accurate_merkle_proof(root_hash, account, proof): # Proof is non-void proof
restored_account = extract_account(merkle_proof)
account.restored_epoch = restored_account.restored_epoch
account.balance += restored_account.balance
account.nonce += restored_account.nonce
elif is_accurate_void_proof(root_hash, account, proof): # Proof is void proof
restored_epoch -= 1
else: # Proof is invalid
raise Exception("Inaccurate proof")

Restoration Cost Breakdown

The recovery process entails several operations such as reading or verifying data and performing decoding tasks. Each task contributes to the total cost, which is determined by the number of epochs involved in the recovery and the amount of data processed.

Here is a breakdown of the costs associated with different operations during the recovery process:

OperationGas
read epochCoverage20
read nonce20
read balance20
Keccak256100
Ecrecover3000
CallValueTransfer9000
CallNewAccount25000
read header800 per epoch
RLP decoding1 per word
verifyProof100 per epoch, 2 per word

Cost Per Epoch

For each epoch involved in the recovery process, the incurred costs include a verifyProof operation and a read header operation, each contributing to an approximate total of 900 gas per epoch. If the proof is for the existence proof, an additional RLP decoding operation is also necessary.

Variable costs are determined by the length of the input data, involving one RLP decoding and one verifyProof operation, both of which scale with the size of the input. These contribute an additional cost of 3 gas per word.

The formula for calculating the total recovery cost is structured as follows:

Total Restoration Cost=37000+900×Epoch+3×words+Memory Cost\text{Total Restoration Cost} = 37000 + 900\times{Epoch} + 3\times{words} + \text{Memory Cost}

Here, 37000 gas covers the initial operations such as account creation and transaction verification, 900 gas for each epoch reflects the fixed costs per epoch, 3 gas per word accounts for variable decoding and proof verification costs, and additional memory costs.

- + \ No newline at end of file diff --git a/learn/layered-architecture/overview.html b/learn/layered-architecture/overview.html index f2dd476..39a1a6f 100644 --- a/learn/layered-architecture/overview.html +++ b/learn/layered-architecture/overview.html @@ -4,13 +4,13 @@ Layered Architecture | OverProtocol Docs - +

Layered Architecture

OverProtocol introduces an innovative layered approach to blockchain data, designed to optimize both accessibility and sustainability. By segmenting data into essential and non-essential components, the system ensures that blockchain participation remains lightweight and scalable.


🌟 Key Concepts

data_hierarchy_diagram

  • Essential States: Accounts that are frequently accessed or have been recently used.
  • Non-essential States: Accounts that are rarely or never accessed.
  • Essential History: Data from newer blocks, crucial for immediate blockchain operations.
  • Non-essential History: Data from preceding blocks, used less frequently but important for overall network integrity.

⚙️ Layers of Blockchain Data

Over Layer

The Over Layer contains:

  • Active States: Vital for ongoing transactions and operations.
  • Recent History: Recent block data for verification.
  • Block Header Information: Metadata essential for network synchronization.

Nether Layer

The Nether Layer stores:

  • Inactive States: Accounts with low activity.
  • Older History: Archived block data for historical validation.

This separation ensures that only the most essential data is actively maintained, reducing resource demands on participating nodes.


🚀 How Ethanos Powers the Layers

The Ethanos Algorithm underpins this layered structure by:

  1. Defining clear criteria for categorizing data into Over and Nether Layers.
  2. Providing mechanisms to restore inactive or historical data back into the Over Layer when needed.

By enforcing a bounded Over Layer size, Ethanos ensures the blockchain remains:

  • Sustainable: Reducing unnecessary resource consumption.
  • Scalable: Allowing more participants to join with minimal technical overhead.

Why It Matters

This layered architecture represents a breakthrough in blockchain design. By prioritizing active data while archiving less essential components, OverProtocol achieves:

  • Efficiency: Lower hardware and storage requirements.
  • Accessibility: Inclusive participation, even for devices with limited capacity.
  • Scalability: Sustainable growth as the network expands.

For a deep dive into the Ethanos Algorithm, explore the next section: Ethanos Algorithm →

- + \ No newline at end of file diff --git a/learn/tokenomics/distribution.html b/learn/tokenomics/distribution.html index 05083c1..e8eca41 100644 --- a/learn/tokenomics/distribution.html +++ b/learn/tokenomics/distribution.html @@ -3,14 +3,14 @@ -Token Distribution | OverProtocol Docs - +Token Distribution | OverProtocol Docs +
-

Token Distribution

OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued.


Token Allocation

1. Staking Rewards

Staking rewards are structured to ensure the network’s stability and security. These rewards include a minimum guaranteed allocation of 200M OVER and an adjustable allocation of 100M OVER. Each year, 20M OVER is distributed as a fixed reward to participants. In addition, the network dynamically adjusts extra rewards through a feedback mechanism. Further details are described below.

2. DAO Treasury

The DAO Treasury serves as a funding resource for ecosystem development, distributed linearly over the first 10 years. The community decides how to allocate these funds through voting, and directing resources to initiatives such as new dApp development, network improvements, and user education campaigns. Additionally, transaction fees and other network-generated revenue are continuously added to the DAO Treasury, providing a steady stream of resources to support ongoing ecosystem growth.

3. Over Community Access Program(OCAP)

Designed to boost engagement and adoption, this program supports activities like liquidity provision and airdrops. It encourages network participation from small-scale contributors and new users, helping to establish a strong initial user base for OverProtocol.

4. Development and Strategic Investments

Development and strategic investments are allocated to ensure the stable establishment and sustainable growth of the network during its early stages. These funds are distributed to the development team and early investors through a 2-year schedule, which includes a 6-month cliff and 18 months of linear vesting.

alloc_chart

Initial Supply

At the genesis of OverProtocol, a portion of the token supply will be available in circulation to support the network's stability and early operation.

Circulating Supply at Genesis

  1. OCAP (Core Contributors):

    • Entire allocation is circulating from the start.
    • These tokens represent contributors’ rewards for establishing the protocol and community and are immediately accessible to support network growth.
  2. OT (Investors) and OF (Foundation Operations):

    • Both categories are subject to a 2-year vesting schedule with a 6-month cliff.
    • However, 20% of their allocation is unlocked at genesis and used for validator staking to stabilize the network during its early stages.

Purpose of Initial Circulating Supply

  • Validator Staking: The unlocked tokens from OT and OF are deployed as staking resources to secure the blockchain and ensure robust consensus in the network’s early phase.
  • Ecosystem Stability: OCAP’s immediate liquidity facilitates the rapid establishment of key ecosystem participants, such as developers and early adopters.
  • Controlled Inflation: By carefully managing the initial supply, OverProtocol balances security and decentralization without overwhelming the token economy.

Staking Rewards

Minimum Guaranteed Rewards

OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch.

Adjustable Rewards

The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to this page for a comprehensive overview of the feedback mechanism.

After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers.

YearMinimum IssuanceMaximum Issuance
Year 1 ~ 1020M OVER30M OVER
Year 11 ~0 OVER10M OVER
- +

Token Distribution

OverProtocol has a supply schedule that releases 1 billion OVER over 10 years. Upon entering the 11th year, no additional tokens will be issued.


Token Allocation

1. Staking Rewards

Staking rewards are structured to ensure the network’s stability and security. These rewards include a minimum guaranteed allocation of 200M OVER and an adjustable allocation of 100M OVER. Each year, 20M OVER is distributed as a fixed reward to participants. In addition, the network dynamically adjusts extra rewards through a feedback mechanism. Further details are described below.

2. DAO Treasury

The DAO Treasury serves as a funding resource for ecosystem development, distributed linearly over the first 10 years. The community decides how to allocate these funds through voting, and directing resources to initiatives such as new dApp development, network improvements, and user education campaigns. Additionally, transaction fees and other network-generated revenue are continuously added to the DAO Treasury, providing a steady stream of resources to support ongoing ecosystem growth.

3. Over Community Access Program(OCAP)

Designed to boost engagement and adoption, this program supports activities like liquidity provision and airdrops. It encourages network participation from small-scale contributors and new users, helping to establish a strong initial user base for OverProtocol.

4. Development and Strategic Investments

Development and strategic investments are allocated to ensure the stable establishment and sustainable growth of the network during its early stages. These funds are distributed to the development team and early investors through a 2-year schedule, which includes a 6-month cliff and 18 months of linear vesting.

alloc_chart

Initial Supply

At the genesis of OverProtocol, 190 million OVER tokens (19% of the total supply) will be circulating to support the network's stability and early operation.

Circulating Supply at Genesis

  1. OCAP

    • A total of 150 million OVER from OCAP is circulating at genesis.
    • These tokens are allocated for airdrops, liquidity provision, and user engagement to drive initial adoption and ecosystem participation.
  2. Over Foundation(OF) and Over Technologies(OT)

    • Both categories are subject to a 2-year vesting schedule with a 6-month cliff.
    • However, 20% of their allocation (20 million OVER each) is unlocked at genesis.
    • These unlocked tokens are deployed as validator staking resources, ensuring network security and stability in the early phases.

Purpose of Initial Circulating Supply

  • Validator Staking: Tokens from OF and OT are used to stabilize the blockchain by operating validators during the early phase.
  • Ecosystem Growth: OCAP provides resources for airdrops, liquidity incentives, and user rewards to encourage network adoption.
  • Controlled Inflation: The careful management of initial supply prevents excessive inflation while maintaining security and decentralization.

Initial Supply Summary

CategoryInitial Circulating Supply (OVER)Purpose
OCAP150MAirdrops, liquidity, and user incentives.
OF20MValidator staking.
OT20MValidator staking.
Total190M

Staking Rewards

Minimum Guaranteed Rewards

OverProtocol's token issuance plan allocates 200 million OVER as the minimum guaranteed reward, distributed in equal annual amounts to stakers. These rewards are proportionally distributed to validators based on their staking balance and required participation rate every epoch.

Adjustable Rewards

The actual release of staking rewards is adjusted by the protocol's predefined feedback mechanism. This mechanism acts as a reserve system to modulate the issuance levels: it reduces actual issuance when staking rewards are sufficient and increases issuance when additional rewards are needed. These adjustments are made based on the current staking rate, ranging between a minimum guaranteed reward of 20 million OVER per year to a maximum of 30 million OVER per year. Importantly, this reserve system is not managed by external entities but is governed by an internal feedback mechanism within the protocol. Refer to this page for a comprehensive overview of the feedback mechanism.

After the 10-year issuance period is over, any remaining reserve for the adjustable rewards will continue to be distributed to stakers.

YearMinimum IssuanceMaximum Issuance
Year 1 ~ 1020M OVER30M OVER
Year 11 ~0 OVER10M OVER
+ \ No newline at end of file diff --git a/learn/tokenomics/fee.html b/learn/tokenomics/fee.html index 62a4b6e..2d253ac 100644 --- a/learn/tokenomics/fee.html +++ b/learn/tokenomics/fee.html @@ -4,13 +4,13 @@ Fees | OverProtocol Docs - +

Fees

Currently Effective

Transaction fee

The Transaction Fee is a charge applied to each transaction within the OverProtocol's on-chain activity. This fee serves to reduce the total circulating supply of OVER tokens.

There are two primary objectives that we aim to achieve through the transaction fee design. Firstly, we seek to align user gas usage with an appropriate gas target, ensuring efficient network operation. Secondly, we aim to induce deflationary pressure through the application of base fees, thereby promoting a balanced economic environment within the network. For this purpose, we use the commonly known EIP-1559, and adjust its design which we plan to achieve through several future updates.

In the protocol's initial stages, the base fee is collected and directed to the DAO Treasury, supporting various ecosystem development initiatives. As the protocol matures, the collection strategy evolves: instead of accruing in the treasury, the base fee is directly burned from each transaction. This nuanced approach balances the initial growth needs with a longer-term strategy of reducing token supply, thereby sustaining the protocol’s economic health.

Future Plans

Storage Rent Fee

The Storage rent fee is a charge on the contract accounts levied every certain period. It charges the use of storing data on the blockchain and reduces the total circulating supply of the OVER tokens.

Storage rent is a proposed economic mechanism designed to address the inefficiency of the 'pay once, use forever' model for state storage. In traditional blockchain models, once a user pays a fee to store data or execute a transaction, the associated data remains on the blockchain indefinitely, leading to an ever-growing state. This growth poses significant scalability and efficiency challenges.

With the storage rent fee, a blockchain storage user would consistenly pay the rent to compensate for the ongoing use of the storage space. This fee incentivizes users to only retain necessary and actively used data, thereby managing the size and efficiency of the blockchain's state. That is, we can expect users to be more judicious about the data they store on the blockchain and to potentially clean up or remove data that is no longer needed.

Such a fee is levied on every Ethanos epoch, and the amount depends on the quantity of data stored with the duration for which it was stored. The storage fee is collected and directed to the DAO Treasury, supporting various ecosystem development initiatives.

Why it was hard to collect Storage Rent Fees

Implementing a storage rent fee design in conventional blockchain architectures is challenging due to the immense size of the state. For the protocol to levy storage rent, it must navigate through all state accounts to determine the appropriate charges and identify the account holders responsible for these fees. Additionally, the protocol needs to decide on the timing for such traversals. This process, under typical blockchain designs, presents significant complexities and operational inefficiencies, making the implementation of a storage rent fee system difficult. Consequently, in many cases, once a fee is paid for storing new values in the state, the space is occupied indefinitely, bypassing ongoing storage costs.

OverProtocol's Approach

Through its innovative Ethanos technique, the OverProtocol effectively manages state size and introduces periodic intervals, streamlining the process of imposing storage rent fees. This approach allows for a straightforward determination of when and which contract accounts should be charged. The whitepaper, OverProtocol: Towards True Decentralization, elaborates on Ethanos, but here we present its core principles.

OverProtocol distinguishes between active and inactive accounts by resetting states at regular intervals, leveraging the consistency of activity across these cycles. Active accounts, identified by their continuous operation through cycles, are seamlessly transferred from the finalized state of the previous cycle to the current cycle's state. This transfer occurs at the first transactional interaction in the new cycle.

At this juncture, storage rent is levied on contract accounts, employing the efficiency of the Ethanos technique without necessitating external state traversal. This efficiency is further enhanced by the protocol's managed state size. Additionally, accounts in OverProtocol are equipped with metadata that assesses their storage size, creating a system where larger storage spaces incur higher rent. This design facilitates a fair and usage-based charging model.

The storage rent design is still under development, with the goal of establishing a user-friendly framework that simultaneously fosters a robust storage economy.

- + \ No newline at end of file diff --git a/learn/tokenomics/feedback.html b/learn/tokenomics/feedback.html index d18f0f7..7ac8c13 100644 --- a/learn/tokenomics/feedback.html +++ b/learn/tokenomics/feedback.html @@ -4,13 +4,13 @@ Deposit and Yield | OverProtocol Docs - +

Deposit and Yield

OverProtocol employs a proof-of-stake mechanism, requiring validators to deposit OVER tokens to participate in the network's block creation process. The yield is the key to attracting the deposit, and is the reward given to the validators. Let's delve into the role and significance of this deposit and yield in a PoS blockchain, focusing on OverProtocol's system design.

Deposit

The deposit in OverProtocol serves as an economic safeguard, deterring actions that could undermine the blockchain's integrity. To gain significant control over the chain, an attacker would need to acquire more than two-thirds of the total deposited tokens. Additionally, owning a third or more of these tokens could disrupt the consensus algorithm's finalization process. Therefore, the network's economic security is strengthened by increasing both the number of participants and the volume of deposited tokens.

While a higher token deposit undoubtedly enhances the chain's safety, the utility of OVER tokens goes beyond security. These tokens play a crucial role in the network, including paying network fees, acting as intermediaries in exchanges, and supporting liquidity in the OverProtocol's economic activities. Consequently, an excessively high deposit requirement could impede the ecosystem's growth and dynamism by limiting the availability of tokens for these essential functions.

Therefore, it is crucial to maintain an optimal deposit amount for the OverProtocol. This balance ensures that the deposit is not so low as to compromise the network's security, nor so high as to diminish the monetary value and utility of the OVER token. Striking this balance is key to preserving both the integrity of the blockchain and the dynamic functionality of the token within the ecosystem.

Target Deposit Ratio

The target deposit ratio is defined as the desired proportion of staked OVER tokens relative to the total circulating supply, as illustrated in the figure below:

Target Deposit Ratio

Initially, the OverProtocol sets a high target deposit ratio. This approach is adopted because, at the outset, the mainnet token often has a low market price, undermining its ability to secure the chain. A higher target deposit ratio compensates for this low value, ensuring adequate security.

However, reducing the deposit ratio can also be beneficial. The tokens not staked are crucial for on-chain activities, enhancing the monetary value of the OVER token. Therefore, once the deposit level is sufficient to assure security, it is advantageous to lower the target deposit ratio.

As the chain matures and expands its utility, gaining monetary value, a lower target deposit ratio becomes adequate for maintaining security. This gradual adjustment in the target deposit ratio is strategic, aiming to strike a harmonious balance between encouraging broad participation and efficiently managing the network's operational demands.

Target Deposit Amount

The target deposit amount represents the target aggregate quantity of OVER tokens staked within the system, as depicted in the picture below:

Target Deposit

This figure is crucial as it indicates the volume of tokens committed to securing the Proof of Stake (PoS) system. Therefore, setting an appropriate target deposit amount is a key strategic decision.

In the early stages, to align with the high target deposit ratio, it is essential to rapidly increase the total amount of tokens staked in the system. As the system evolves and stabilizes, the necessity to accumulate large new deposits diminishes. Eventually, a saturation point is reached where the accumulated deposits are sufficient to ensure system security. Following this, similar to the rationale behind reducing the target deposit ratio, the target deposit amount is capped. This cap, the max target deposit, is implemented to enhance the monetary utility of the non-staked OVER tokens, thereby supporting broader economic activities within the ecosystem.

Yield

Yield is the interest rate that measures the proportion of newly issued staking rewards given to the stakers in comparison to their original stake. Validators, responsible for executing assigned duties, earn these rewards. When the reward is minimal, approaching zero, it discourages contributions to the network, while a higher reward increases participation demand. This concept of yield plays a crucial role in maintaining a specific deposit ratio.

Base Yield

OverProtocol establishes a predetermined base yield as the foundational yield rate for staking. This base yield, applied to the maximum target deposit amount, determines the total allocation of OVER tokens as rewards for each epoch, thus forming the reward pool. In this system, the reward pool amount remains constant for each epoch, regardless of the time period. Consequently, the actual yield for each deposit is influenced by the total amount of stake deposits.

In the early stages, when a smaller amount of deposits shares the fixed reward pool, the reward distributed per token deposit is high, leading to a higher yield. However, as the total deposit volume increases, the yield per deposit decreases proportionately. Ultimately, when the deposit amount reaches the maximum target, the yield stabilizes, aligning with the predetermined base yield level.

The Feedback Mechanism

The base yield and the reward pool establish the foundational yield for validators at each specific moment within our protocol. However, the yield is dynamically adjusted from this baseline to assist the system in reaching the target deposit amount. If the actual deposit is below the target, the yield is increased; conversely, if it exceeds the target, the yield is decreased. This crucial adjustment process is known as the feedback mechanism.

The Need for Feedback Mechanism

The feedback mechanism is essential in managing fluctuations in yield demand, particularly when actual deposit deviates from our target deposit assumptions. For example, consider a scenario where the actual demand for yields is lower than expected, resulting in a deposit ratio below our projections, as depicted in the picture below. The converse situation is also plausible :

Discrepancy

Such discrepancies arise from changes in yield demand. A notable instance occurs when the US Federal Reserve raises interest rates, increasing the opportunity cost of staking in OverProtocol. Under these circumstances, the same capital might yield higher returns in alternative financial instruments. Therefore, even if OverProtocol maintains a consistent yield level, validators might prefer investing in other assets, leading to a decrease in deposits within OverProtocol. This situation illustrates how actual yield demand can diverge from our initial assumptions.

Irrespective of external factors, maintaining a specific deposit ratio is crucial for the security of OverProtocol. To ensure this, we have implemented a feedback mechanism that dynamically adjusts our yields. Modifying the yield either upwards or downwards serves as an incentive or disincentive for participation, thereby influencing deposit levels.

Feedback Mechanism

The feedback mechanism functions by assessing the discrepancy between the target and actual deposit ratios, subsequently fine-tuning the yields. The Validator Pending Queue\textit{Validator Pending Queue} comprises validators attempting to enter the system, while the Validator Exit Queue\textit{Validator Exit Queue} includes those trying to exit. The net demand difference, derived from the size disparity between these two queues, indicates the overall interest in becoming an OVER validator. By adding the original number of validators for the current epoch, we can calculate the number of validators for the next epoch as follows:

Validatornext=Validatorcurrent+Pending QueuesizeExit Queuesize\begin{align} \text{Validator}_{\text{next}} = \text{Validator}_{\text{current}} + \text{Pending Queue}_{\text{size}} - \text{Exit Queue}_{\text{size}} \end{align}

If the total deposit amount, inferred from Validatornext\text{Validator}_{\text{next}}, exceeds the target deposit amount for that timeframe, the yield decreases, and vice versa. To safeguard against attacks and prevent excessively high or low yield levels, we propose implementing upper and lower yield bounds. Additionally, the speed of feedback adjustment is a critical aspect. Denoting the feedback adjustment as f(t)f(t), its speed is defined as:

df(t)dt=kMaturity FactorScaling Factor\begin{align} \frac{df(t)}{dt} = k \cdot \text{Maturity Factor} \cdot \text{Scaling Factor} \end{align}

The Maturity Factor\textit{Maturity Factor} is introduced to steer the system towards a more stable and mature state. The system evaluates the current deposit level against the maximum target deposit amount, increasing the rate of feedback change proportionally to the discrepancy. In essence, the farther the system is from the maximum target, the faster the adjustments occur. The Scaling Factor\textit{Scaling Factor} enables the system to rapidly align with the target, where a larger discrepancy between the actual and target deposit amounts accelerates the feedback change rate.

To implement such a feedback mechanism, the system requires two key components: the Adaptive Validator Churn Limit\textit{Adaptive Validator Churn Limit} and The Issuance Reserve\textit{The Issuance Reserve}. The explanation follows.

The Issuance Reserve

The primary function of the issuance reserve is to manage the allocation of additional rewards when needed. Specifically, the reserve is pre-allocated 100 million OVER and serves as a resource for the feedback system to augment rewards when necessary. If the system determines that more rewards should be distributed, the additional amount is provided from this reserve. However, no more than this pre-allocated amount will be issued. The management of the issuance reserve is handled at the protocol level, protecting it from risks such as account hacking and ensuring its use is strictly limited to the dynamic adjustment of yields.

- + \ No newline at end of file diff --git a/learn/tokenomics/overview.html b/learn/tokenomics/overview.html index c86c4a5..cca8445 100644 --- a/learn/tokenomics/overview.html +++ b/learn/tokenomics/overview.html @@ -4,13 +4,13 @@ Tokenomics Overview | OverProtocol Docs - +

Tokenomics Overview

The OVER token is the native cryptocurrency of OverProtocol, serving as the backbone of its economic ecosystem. Designed for sustainability, inclusivity, and growth, the tokenomics of OverProtocol ensures that the network remains secure, decentralized, and accessible.


Core Roles of OVER

The OVER token is designed to support multiple key functions within the OverProtocol ecosystem:

  1. Network Security

    • Validators stake OVER to participate in the Proof of Stake (PoS) consensus mechanism.
    • Staking aligns validators’ incentives with the network’s security and reliability.
  2. Transaction Fees

    • OVER is used to pay for transaction and smart contract execution fees.
    • Fees collected are partially allocated to the DAO Treasury, contributing to long-term sustainability.
  3. Incentives and Rewards

    • Validators and active participants are rewarded in OVER for their contributions to network security and scalability.
    • Reward mechanisms encourage consistent participation and maintain a decentralized validator set.

OverProtocol’s tokenomics is designed to create a self-sustaining, inclusive, and scalable blockchain economy. By aligning economic incentives with network goals, the OVER token plays a critical role in fostering innovation, security, and global adoption.


Why Tokenomics Matters

The tokenomics of OverProtocol ensures:

  • Alignment of Incentives: Economic rewards and penalties are designed to encourage responsible participation.
  • Sustainability: OVER's role in staking and transaction fees supports long-term network health.
  • Security: Staking mechanisms protect the network from malicious activity by ensuring validators have a vested interest in the system.

For more details on staking and validator rewards, refer to the Validator Requirements Section →.

- + \ No newline at end of file diff --git a/operators.html b/operators.html index 88f0ee0..e34b89c 100644 --- a/operators.html +++ b/operators.html @@ -4,13 +4,13 @@ Ready to Run Your Own Node? | OverProtocol Docs - +

Ready to Run Your Own Node?

You’ve arrived at the gateway to becoming a key player in the OverProtocol ecosystem. Running a node isn’t just a task—it’s a journey, and you’re the hero. Ahead lies a path filled with challenges and triumphs, and at the end, the reward of powering a decentralized future.


Ready to Get Started?

Take the first step toward becoming an OverProtocol node operator. Here's what to do next:


Your Mission: Power the Network

As a node operator, you’ll take on a vital role in ensuring OverProtocol’s success:

  • Validate Transactions 🔒: Help secure the network by processing and validating transactions.
  • Maintain Decentralization 🌍: Ensure OverProtocol remains resilient and accessible for all.
  • Strengthen the Ecosystem ⚡: Every node adds power, stability, and trust to the network.

Together, we can build a stronger, more inclusive blockchain. Your contribution makes all the difference.


Let's Get Started!

Your journey as an OverProtocol node operator begins here. Ready to power the network and shape the future of blockchain?

Together, we’re building something extraordinary. Let’s make it happen! 🌟

- + \ No newline at end of file diff --git a/operators/CLI-options/chronos.html b/operators/CLI-options/chronos.html index 5c0b3f2..11d185d 100644 --- a/operators/CLI-options/chronos.html +++ b/operators/CLI-options/chronos.html @@ -4,13 +4,13 @@ Command Line Options | OverProtocol Docs - +

Command Line Options

beacon-chain

The Chronos beacon-chain binary is the node client responsible for the consensus layer in the Over Protocol. The beacon-chain allows users to modify various settings according to their needs, and a description of these settings can be displayed using the help command as shown below.

beacon-chain help
NAME:
beacon-chain - this is a beacon chain implementation for Over Protocol
USAGE:
beacon-chain [options] command [command options] [arguments...]

AUTHOR:


GLOBAL OPTIONS:
db Defines commands for interacting with the Over Protocol Beacon Node database
generate-auth-secret creates a random, 32 byte hex string in a plaintext file to be used for authenticating JSON-RPC requests. If no --output-file flag is defined, the file will be created in the current working directory
help, h Shows a list of commands or help for one command

cmd OPTIONS:
--accept-terms-of-use Accepts Terms and Conditions (for non-interactive environments). (default: false)
--api-timeout value Specifies the timeout value for API requests in seconds. (default: 10s)
--bootstrap-node value [ --bootstrap-node value ] The address of bootstrap node. Beacon node will connect for peer discovery via DHT. Multiple nodes can be passed by using the flag multiple times but not comma-separated. You can also pass YAML files containing multiple nodes.
--chain-config-file value Path to a YAML file with chain config values.
--clear-db Prompt for clearing any previously stored data at the data directory. (default: false)
--config-file value Filepath to a yaml file with flag values.
--datadir value Data directory for the databases. (default: "/Users/syjn99/Library/Over")
--disable-monitoring Disables monitoring service. (default: false)
--e2e-config Enables the E2E testing config, only for use within end-to-end testing. (default: false)
--enable-tracing Enables request tracing. (default: false)
--force-clear-db Clears any previously stored data at the data directory. (default: false)
--grpc-max-msg-size value Integer to define max receive message call size (in bytes).
If serving a public gRPC server, set this to a more reasonable size to avoid
resource exhaustion from large messages.
Validators with as many as 10000 keys can be run with a max message size of less than
50Mb. The default here is set to a very high value for local users. (default: 2147483647)
--max-goroutines value Specifies the upper limit of goroutines running before a status check fails (default: 5000)
--minimal-config Uses minimal config with parameters as defined in the spec. (default: false)
--monitor-indices value [ --monitor-indices value ] List of validator indices to track performance
--monitoring-host value Host used for listening and responding metrics for prometheus. (default: "127.0.0.1")
--monitoring-port value Port used to listening and respond metrics for Prometheus. (default: 8080)
--no-discovery Enable only local network p2p and do not connect to cloud bootstrap nodes (default: false)
--p2p-quic-port value The QUIC port used by libp2p. (default: 13000)
--p2p-tcp-port value The TCP port used by libp2p. (default: 13000)
--p2p-udp-port value The UDP port used by the discovery service discv5. (default: 12000)
--relay-node value The address of relay node. The beacon node will connect to the relay node and advertise their address via the relay node to other peers
--restore-source-file value Filepath to the backed-up database file which will be used to restore the database
--restore-target-dir value Target directory of the restored database (default: "/Users/syjn99/Library/Over")
--rpc-max-page-size value Max number of items returned per page in RPC responses for paginated endpoints. (default: 0)
--trace-sample-fraction value Indicates what fraction of p2p messages are sampled for tracing. (default: 0.2)
--tracing-endpoint value Tracing endpoint defines where beacon chain traces are exposed to Jaeger. (default: "http://127.0.0.1:14268/api/traces")
--tracing-process-name process_name Name to apply to tracing tag process_name.
--verbosity value Logging verbosity. (trace, debug, info, warn, error, fatal, panic) (default: "info")

debug OPTIONS:
--blockprofilerate value Turns on block profiling with the given rate. (default: 0)
--cpuprofile value Writes CPU profile to the given file.
--memprofilerate value Turns on memory profiling with the given rate. (default: 524288)
--mutexprofilefraction value Turns on mutex profiling with the given rate. (default: 0)
--pprof Enables the pprof HTTP server. (default: false)
--pprofaddr value pprof HTTP server listening interface. (default: "127.0.0.1")
--pprofport value pprof HTTP server listening port. (default: 6060)
--trace value Writes execution trace to the given file.

beacon-chain OPTIONS:
--auth-token-file value Path to auth token file used for OverScape API. Set this flag to enable the close API for OverScape.
--backfill-batch-size value Number of blocks per backfill batch. A larger number will request more blocks at once from peers, but also consume more system memory to hold batches in memory during processing. This has a multiplicative effect with backfill-worker-count. (default: 32)
--backfill-oldest-slot value Specifies the oldest slot that backfill should download. If this value is greater than current_slot - MIN_EPOCHS_FOR_BLOCK_REQUESTS, it will be ignored with a warning log. (default: 0)
--backfill-worker-count value Number of concurrent backfill batch requests. A larger number will better utilize network resources, up to a system-dependent limit, but will also consume more system memory to hold batches in memory during processing. Multiply by backfill-batch-size and average block size (~2MB before deneb) to find the right number for your system. This has a multiplicative effect with backfill-batch-size. (default: 2)
--blob-batch-limit value The amount of blobs the local peer is bounded to request and respond to in a batch. (default: 64)
--blob-batch-limit-burst-factor value The factor by which blob batch limit may increase on burst. (default: 2)
--blob-path value Location for blob storage. Default location will be a 'blobs' directory next to the beacon db.
--blob-retention-epochs value, --extend-blob-retention-epoch value Override the default blob retention period (measured in epochs). The node will exit with an error at startup if the value is less than the default of 4096 epochs. (default: 4096)
--block-batch-limit value The amount of blocks the local peer is bounded to request and respond to in a batch. Maximum 128 (default: 64)
--block-batch-limit-burst-factor value The factor by which block batch limit may increase on burst. (default: 2)
--chain-id value Sets the chain id of the beacon chain (default: 0)
--checkpoint-block value Rather than syncing from genesis, you can start processing from a ssz-serialized BeaconState+Block. This flag allows you to specify a local file containing the checkpoint Block to load.
--checkpoint-state value Rather than syncing from genesis, you can start processing from a ssz-serialized BeaconState+Block. This flag allows you to specify a local file containing the checkpoint BeaconState to load.
--checkpoint-sync-url value URL of a synced beacon node to trust in obtaining checkpoint sync data. As an additional safety measure, it is strongly recommended to only use this option in conjunction with --weak-subjectivity-checkpoint flag
--contract-deployment-block value The eth1 block in which the deposit contract was deployed. (default: 11184524)
--deposit-contract value Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate. (default: "000000000000000000000000000000000beac017")
--disable-debug-rpc-endpoints Disables the debug Beacon API namespace. (default: false)
--enable-experimental-backfill Backfill is still experimental at this time. It will only be enabled if this flag is specified and the node was started using checkpoint sync. (default: false)
--enable-over-node-rpc-endpoints Enables the OverNode rpc service, containing utility endpoints for OverNode. auth-token-file flag must be set to enable close API. (default: false)
--engine-endpoint-timeout-seconds value Sets the execution engine timeout (seconds) for execution payload semantics (forkchoiceUpdated, newPayload) (default: 0)
--eth1-header-req-limit value Sets the maximum number of headers that a deposit log query can fetch. (default: 1000)
--execution-endpoint value An execution client http endpoint. Can contain auth header as well in the format (default: "http://localhost:8551")
--execution-headers value A comma separated list of key value pairs to pass as HTTP headers for all execution client calls. Example: --execution-headers=key1=value1,key2=value2
--gc-percent value The percentage of freshly allocated data to live data on which the gc will be run again. (default: 100)
--genesis-beacon-api-url value URL of a synced beacon node to trust for obtaining genesis state. As an additional safety measure, it is strongly recommended to only use this option in conjunction with --weak-subjectivity-checkpoint flag
--genesis-state value Load a genesis state from ssz file. Testnet genesis files can be found in the eth2-clients/eth2-testnets repository on github.
--historical-slasher-node Enables required flags for serving historical data to a slasher client. Results in additional storage usage (default: false)
--http-cors-domain value, --grpc-gateway-corsdomain value Comma separated list of domains from which to accept cross origin requests. (default: "http://localhost:3000, http://0.0.0.0:3000, http://127.0.0.1:3000, http://localhost:4200, http://127.0.0.1:4200, http://0.0.0.0:4200, http://localhost:7500, http://127.0.0.1:7500, http://0.0.0.0:7500")
--http-host value, --grpc-gateway-host value Host on which the HTTP server runs on. (default: "127.0.0.1")
--http-modules prysm,eth Comma-separated list of API module names. Possible values: prysm,eth. (default: "prysm,eth")
--http-port value, --grpc-gateway-port value Port on which the HTTP server runs on. (default: 3500)
--interop-eth1data-votes Enable mocking of eth1 data votes for proposers to package into blocks (default: false)
--jwt-id value JWT claims id. Could be used to identify the client
--jwt-secret value REQUIRED if connecting to an execution node via HTTP. Provides a path to a file containing a hex-encoded string representing a 32 byte secret used for authentication with an execution node via HTTP. If this is not set, all requests to execution nodes via HTTP for consensus-related calls will fail, which will prevent your validators from performing their duties. This is not required if using an IPC connection.
--local-block-value-boost value A percentage boost for local block construction as a Uint64. This is used to prioritize local block construction over relay/builder block constructionBoost is an additional percentage to multiple local block value. Use builder block if: builder_bid_value * 100 > local_block_value * (local-block-value-boost + 100) (default: 10)
--max-builder-consecutive-missed-slots value Number of consecutive skip slot to fallback from using relay/builder to local execution engine for block construction (default: 3)
--max-builder-epoch-missed-slots value Number of total skip slot to fallback from using relay/builder to local execution engine for block construction in last epoch rolling window. The values are on the basis of the networks and the default value for mainnet is 5. (default: 0)
--max-concurrent-dials value Sets the maximum number of peers that a node will attempt to dial with from discovery. By default we will dials as many peers as possible. (default: 0)
--min-builder-bid value An absolute value in Gwei that the builder bid has to have in order for this beacon node to use the builder's block. Anything less than this value and the beacon will revert to local building. (default: 0)
--min-builder-to-local-difference value An absolute value in Gwei that the builder bid has to have in order for this beacon node to use the builder's block. Anything less than this value and the beacon will revert to local building. (default: 0)
--minimum-peers-per-subnet value Sets the minimum number of peers that a node will attempt to peer with that are subscribed to a subnet. (default: 6)
--network-id value Sets the network id of the beacon chain. (default: 0)
--rpc-host value Host on which the RPC server should listen (default: "127.0.0.1")
--rpc-port value RPC port exposed by a beacon node (default: 4000)
--slasher-datadir value Directory for the slasher database (default: "/Users/syjn99/Library/Over")
--slots-per-archive-point value The slot durations of when an archived state gets saved in the beaconDB. (default: 2048)
--subscribe-all-subnets Subscribe to all possible attestation and sync subnets. (default: false)
--tls-cert value Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.
--tls-key value Key for secure gRPC. Pass this and the tls-cert flag in order to use gRPC securely.
--weak-subjectivity-checkpoint block_root:epoch_number Input in block_root:epoch_number format. This guarantees that syncing leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the node will treat it as a critical and irrecoverable failure

merge OPTIONS:
--suggested-fee-recipient value Post bellatrix, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst bellatrix is in development state. Validator client can override this value through the preparebeaconproposer api. (default: "0x0000000000000000000000000000000000000000")
--terminal-block-hash-epoch-override value Sets the block hash epoch to manual overrides the default TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. Incorrect usage will result in your node experience consensus failure. (default: 0)
--terminal-block-hash-override value Sets the block hash to manual overrides the default TERMINAL_BLOCK_HASH value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash. Incorrect usage will result in your node experience consensus failure.
--terminal-total-difficulty-override value Sets the total difficulty to manual overrides the default TERMINAL_TOTAL_DIFFICULTY value. WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal difficulty. Incorrect usage will result in your node experience consensus failure.

p2p OPTIONS:
--enable-upnp Enable the service (Beacon chain or Validator) to use UPnP when possible. (default: false)
--min-sync-peers value The required number of valid peers to connect with before syncing. (default: 3)
--p2p-allowlist value The CIDR subnet for allowing only certain peer connections. Using "public" would allow only public subnets. Example: 192.168.0.0/16 would permit connections to peers on your local network only. The default is to accept all connections.
--p2p-colocation-limit value The maximum number of peers we can see from a single ip or ipv6 subnet. (default: 5)
--p2p-colocation-whitelist value [ --p2p-colocation-whitelist value ] Whitelist of CIDR subnets that not scoring peer with IP-colocation factor.
--p2p-denylist value [ --p2p-denylist value ] The CIDR subnets for denying certainty peer connections. Using "private" would deny all private subnets. Example: 192.168.0.0/16 would deny connections from peers on your local network only. The default is to accept all connections.
--p2p-host-dns value The DNS address advertised by libp2p. This may be used to advertise an external DNS.
--p2p-host-ip value The IP address advertised by libp2p. This may be used to advertise an external IP.
--p2p-ip-tracker-ban-time value The interval in minutes to prune the ip tracker, default is 120m (default: 2h0m0s)
--p2p-local-ip value The local ip address to listen for incoming data.
--p2p-max-peers value The max number of p2p peers to maintain. (default: 70)
--p2p-metadata value The file containing the metadata to communicate with other peers.
--p2p-priv-key value The file containing the private key to use in communications with other peers.
--p2p-static-id Enables the peer id of the node to be fixed by saving the generated network key to the default key path. (default: false)
--peer value [ --peer value ] Connect with this peer, this flag may be used multiple times. This peer is recognized as a trusted peer.
--pubsub-queue-size value The size of the pubsub validation and outbound queue for the node. (default: 1000)

log OPTIONS:
--log-compress Compress the log files (default: false)
--log-file value Specifies log file name, relative or absolute.
--log-format value Specifies log formatting. Supports: text, json, fluentd, journald. (default: "text")
--log-maxage value Maximum number of days to retain a log file (default: 30)
--log-maxbackups value Maximum number of log files to retain (default: 10)
--log-maxsize value Maximum size in MBs of a single log file (default: 100)
--log-rotate Enables log file rotation (default: false)

features OPTIONS:
--blob-save-fsync Forces new blob files to be fysnc'd before continuing, ensuring durable blob writes. (default: false)
--dev Enables experimental features still in development. These features may not be stable. (default: false)
--disable-broadcast-slashings Disables broadcasting slashings submitted to the beacon node. (default: false)
--disable-committee-aware-packing Changes the attestation packing algorithm to one that is not aware of attesting committees. (default: false)
--disable-experimental-state Turns off the latest and greatest changes to the beacon state. Disabling this is safe to do after the feature has been enabled. (default: false)
--disable-grpc-connection-logging Disables displaying logs for newly connected grpc clients. (default: false)
--disable-peer-scorer (Danger): Disables P2P peer scorer. Do NOT use this in production! (default: false)
--disable-registration-cache Temporary flag for disabling the validator registration cache instead of using the DB. Note: registrations do not clear on restart while using the DB. (default: false)
--disable-resource-manager Disables running the libp2p resource manager. (default: false)
--disable-staking-contract-check Disables checking of staking contract deposits when proposing blocks, useful for devnets. (default: false)
--disable-verbose-sig-verification Disables identifying invalid signatures if batch verification fails when processing block. (default: false)
--dolphin Runs Chronos configured for the Dolphin test network. (default: false)
--enable-discovery-reboot Experimental: Enables the discovery listener to rebooted in the event of connectivity issues. (default: false)
--enable-full-ssz-data-logging Enables displaying logs for full ssz data on rejected gossip messages. (default: false)
--enable-historical-state-representation Enables the beacon chain to save historical states in a space efficient manner. (Warning): Once enabled, this feature migrates your database in to a new schema and there is no going back. At worst, your entire database might get corrupted. (default: false)
--enable-quic Enables connection using the QUIC protocol for peers which support it. (default: false)
--interop-write-ssz-state-transitions Writes SSZ states to disk after attempted state transitio. (default: false)
--mainnet Runs on Over Protocol main network. This is the default and can be omitted. (default: true)
--prepare-all-payloads Informs the engine to prepare all local payloads. Useful for relayers and builders. (default: false)
--save-full-execution-payloads Saves beacon blocks with full execution payloads instead of execution payload headers in the database. (default: false)
--save-invalid-blob-temp Writes invalid blobs to temp directory. (default: false)
--save-invalid-block-temp Writes invalid blocks to temp directory. (default: false)
--slasher Enables a slasher in the beacon node for detecting slashable offenses. (default: false)

interop OPTIONS:
--genesis-state value Load a genesis state from ssz file. Testnet genesis files can be found in the eth2-clients/eth2-testnets repository on github.
--interop-genesis-time value Specify the genesis time for interop genesis state generation. Must be used with --interop-num-validators (default: 0)
--interop-num-validators value Specify number of genesis validators to generate for interop. Must be used with --interop-genesis-time (default: 0)

deprecated OPTIONS:
--db-backup-output-dir value Output directory for db backups.

Validator

validator help
NAME:
validator - Launches an Over Protocol validator client that interacts with a beacon chain, starts proposer and attester services, p2p connections, and more.

USAGE:
validator [options] command [command options] [arguments...]

VERSION:
Chronos/v1.5.4_juntmp/8434f9a98982c439d39e9ef563e9458681f36198. Built at: 2024-12-07 11:16:40+00:00

global OPTIONS:
wallet Defines commands for interacting with Over Protocol validator wallets.
accounts Defines commands for interacting with Over Protocol validator accounts.
slashing-protection-history Defines commands for interacting your validator's slashing protection history.
db Defines commands for interacting with the Chronos validator database.
help, h Shows a list of commands or help for one command

cmd OPTIONS:
--accept-terms-of-use Accepts Terms and Conditions (for non-interactive environments). (default: false)
--api-timeout value Specifies the timeout value for API requests in seconds. (default: 10s)
--chain-config-file value Path to a YAML file with chain config values.
--clear-db Prompt for clearing any previously stored data at the data directory. (default: false)
--config-file value Filepath to a yaml file with flag values.
--datadir value Data directory for the databases. (default: "/Users/syjn99/Library/Over")
--db-backup-output-dir value Output directory for db backups.
--disable-monitoring Disables monitoring service. (default: false)
--e2e-config Enables the E2E testing config, only for use within end-to-end testing. (default: false)
--enable-db-backup-webhook Serves HTTP handler to initiate database backups.
The handler is served on the monitoring port at path /db/backup. (default: false)
--enable-tracing Enables request tracing. (default: false)
--force-clear-db Clears any previously stored data at the data directory. (default: false)
--grpc-max-msg-size value Integer to define max receive message call size (in bytes).
If serving a public gRPC server, set this to a more reasonable size to avoid
resource exhaustion from large messages.
Validators with as many as 10000 keys can be run with a max message size of less than
50Mb. The default here is set to a very high value for local users. (default: 2147483647)
--log-compress Compress the log files (default: false)
--log-file value Specifies log file name, relative or absolute.
--log-format value Specifies log formatting. Supports: text, json, fluentd, journald. (default: "text")
--log-maxage value Maximum number of days to retain a log file (default: 30)
--log-maxbackups value Maximum number of log files to retain (default: 10)
--log-maxsize value Maximum size in MBs of a single log file (default: 100)
--log-rotate Enables log file rotation (default: false)
--minimal-config Uses minimal config with parameters as defined in the spec. (default: false)
--monitoring-host value Host used for listening and responding metrics for prometheus. (default: "127.0.0.1")
--monitoring-port value Port used to listening and respond metrics for Prometheus. (default: 8081)
--trace-sample-fraction value Indicates what fraction of p2p messages are sampled for tracing. (default: 0.2)
--tracing-endpoint value Tracing endpoint defines where beacon chain traces are exposed to Jaeger. (default: "http://127.0.0.1:14268/api/traces")
--tracing-process-name process_name Name to apply to tracing tag process_name.
--verbosity value Logging verbosity. (trace, debug, info, warn, error, fatal, panic) (default: "info")
--wallet-dir value Path to a wallet directory on-disk for Chronos validator accounts. (default: "/Users/syjn99/Library/OverValidators/chronos-wallet-v2")
--wallet-password-file value Path to a plain-text, .txt file containing your wallet password.

debug OPTIONS:
--blockprofilerate value Turns on block profiling with the given rate. (default: 0)
--cpuprofile value Writes CPU profile to the given file.
--memprofilerate value Turns on memory profiling with the given rate. (default: 524288)
--mutexprofilefraction value Turns on mutex profiling with the given rate. (default: 0)
--pprof Enables the pprof HTTP server. (default: false)
--pprofaddr value pprof HTTP server listening interface. (default: "127.0.0.1")
--pprofport value pprof HTTP server listening port. (default: 6060)
--trace value Writes execution trace to the given file.

rpc OPTIONS:
--beacon-rest-api-provider value Beacon node REST API provider endpoint. (default: "http://127.0.0.1:3500")
--beacon-rpc-provider value Beacon node RPC provider endpoint. (default: "127.0.0.1:4000")
--grpc-headers value Comma separated list of key value pairs to pass as gRPC headers for all gRPC calls.
Example: --grpc-headers=key=value
--grpc-retries value Number of attempts to retry gRPC requests. (default: 5)
--grpc-retry-delay value Amount of time between gRPC retry requests. (default: 1s)
--http-cors-domain value, --grpc-gateway-corsdomain value Comma separated list of domains from which to accept cross origin requests (browser enforced). (default: "http://localhost:7500,http://127.0.0.1:7500,http://0.0.0.0:7500,http://localhost:4242,http://127.0.0.1:4242,http://localhost:4200,http://0.0.0.0:4242,http://127.0.0.1:4200,http://0.0.0.0:4200,http://localhost:3000,http://0.0.0.0:3000,http://127.0.0.1:3000")
--http-host value, --grpc-gateway-host value Host on which the HTTP server runs on. (default: "127.0.0.1")
--http-port value, --grpc-gateway-port value Port on which the HTTP server runs on. (default: 7500)
--rpc Enables the RPC server for the validator client (without Web UI). (default: false)
--rpc-host value Host on which the RPC server should listen. (default: "127.0.0.1")
--rpc-port value RPC port exposed by a validator client. (default: 7000)
--tls-cert value Certificate for secure gRPC. Pass this and the tls-key flag in order to use gRPC securely.

proposer OPTIONS:
--enable-builder, --enable-validator-registration Enables builder validator registration APIs for the validator client to update settings
such as fee recipient and gas limit. This flag is not required if using proposer
settings config file. (default: false)
--graffiti value String to include in proposed blocks.
--graffiti-file value Path to a YAML file with graffiti values.
--proposer-settings-file value Sets path to a YAML or JSON file containing validator settings used when proposing blocks such as
fee recipient and gas limit. File format found in docs.
--proposer-settings-url value Sets URL to a REST endpoint containing validator settings used when proposing blocks such as
fee recipient and gas limit. File format found in docs
--suggested-fee-recipient value Sets ALL validators' mapping to a suggested eth address to receive gas fees when proposing a block.
Note that this is only a suggestion when integrating with a Builder API, which may choose to specify
a different fee recipient as payment for the blocks it builds.For additional setting overrides use the
--proposer-settings-file or --proposer-settings-url flags. (default: "0x0000000000000000000000000000000000000000")
--suggested-gas-limit value Sets gas limit for the builder to use for constructing a payload for all the validators. (default: "30000000")
--validators-registration-batch-size value Sets the maximum size for one batch of validator registrations. Use a non-positive value to disable batching. (default: 0)

remote signer OPTIONS:
--validators-external-signer-key-file value, --remote-signer-keys-file value A file path used to load remote public validator keys and persist them through restarts.
--validators-external-signer-public-keys value, --remote-signer-keys value [ --validators-external-signer-public-keys value, --remote-signer-keys value ] Comma separated list of public keys OR an external url endpoint for the validator to retrieve public keys from for usage with web3signer.
--validators-external-signer-url value, --remote-signer-url value URL for consensys' web3signer software to use with the Chronos validator client.

slasher OPTIONS:
--slasher-rpc-provider value Slasher node RPC provider endpoint. (default: "127.0.0.1:4002")
--slasher-tls-cert value Certificate for secure slasher gRPC. Pass this and the tls-key flag in order to use gRPC securely.

misc OPTIONS:
--disable-account-metrics Disables prometheus metrics for validator accounts. Operators with high volumes
of validating keys may wish to disable granular prometheus metrics as it increases
the data cardinality. (default: false)
--disable-rewards-penalties-logging Disables reward/penalty logging during cluster deployment. (default: false)
--distributed To enable the use of Chronos validator client in Distributed Validator Cluster (default: false)
--keymanager-token-file value, --validator-api-bearer-file value Path to auth token file used for validator apis. (default: "/Users/syjn99/Library/OverValidators/chronos-wallet-v2/auth-token")
--over-node Enables OverNode APIs for the validator client. (default: false)

features OPTIONS:
--attest-timely Fixes validator can attest timely after current block processes. See #8185 for more details. (default: false)
--dolphin Runs Chronos configured for the Dolphin test network. (default: false)
--dynamic-key-reload-debounce-interval value (Advanced): Specifies the time duration the validator waits to reload new keys if they have changed on disk.
Can be any type of duration such as 1.5s, 1000ms, 1m. (default: 1s)
--enable-beacon-rest-api (Experimental): Enables of the beacon REST API when querying a beacon node. (default: false)
--enable-doppelganger Enables the validator to perform a doppelganger check.
This is not a foolproof method to find duplicate instances in the network.
Your validator will still be vulnerable if it is being run in unsafe configurations. (default: false)
--enable-minimal-slashing-protection (Experimental): Enables the minimal slashing protection. See EIP-3076 for more details. (default: false)
--enable-slashing-protection-history-pruning Enables the pruning of the validator client's slashing protection database. (default: false)
--mainnet Runs on Over Protocol main network. This is the default and can be omitted. (default: true)

interop OPTIONS:
--interop-num-validators value Number of validators to deterministically generate.
Example: --interop-start-index=5 --interop-num-validators=3 would generate keys from index 5 to 7. (default: 0)
--interop-start-index value Start index to deterministically generate validator keys when used in combination with --interop-num-validators.
Example: --interop-start-index=5 --interop-num-validators=3 would generate keys from index 5 to 7. (default: 0)

- + \ No newline at end of file diff --git a/operators/CLI-options/kairos.html b/operators/CLI-options/kairos.html index dd919ae..b1f4362 100644 --- a/operators/CLI-options/kairos.html +++ b/operators/CLI-options/kairos.html @@ -4,13 +4,13 @@ Command Line Options | OverProtocol Docs - +

Command Line Options

geth

As the Kairos project is a fork of the Geth project, it provides most of the command line options available in geth. You can obtain information and descriptions of these options by using the command line help in the Geth binary.

geth --help                                                                                                            
NAME:
geth - the kairos command line interface

USAGE:
geth [global options] command [command options] [arguments...]

VERSION:
1.4.3-stable-d7024590

COMMANDS:
account Manage accounts
attach Start an interactive JavaScript environment (connect to node)
console Start an interactive JavaScript environment
db Low level database operations
dump Dump a specific block from storage
dumpconfig Export configuration values in a TOML format
dumpgenesis Dumps genesis block JSON configuration to stdout
export Export blockchain into file
export-history Export blockchain history to Era archives
import Import a blockchain file
import-history Import an Era archive
import-preimages Import the preimage database from an RLP stream
init Bootstrap and initialize a new genesis block
js (DEPRECATED) Execute the specified JavaScript files
license Display license information
removedb Remove blockchain and state databases
show-deprecated-flags Show flags that have been deprecated
snapshot A set of commands based on the snapshot
verkle A set of experimental verkle tree management commands
version Print version numbers
version-check Checks (online) for known Geth security vulnerabilities
wallet Manage Ethereum presale wallets
help, h Shows a list of commands or help for one command

GLOBAL OPTIONS:
ACCOUNT


--allow-insecure-unlock (default: false) ($GETH_ALLOW_INSECURE_UNLOCK)
Allow insecure account unlocking when account-related RPCs are exposed by http

--keystore value ($GETH_KEYSTORE)
Directory for the keystore (default = inside the datadir)

--lightkdf (default: false) ($GETH_LIGHTKDF)
Reduce key-derivation RAM & CPU usage at some expense of KDF strength

--password value ($GETH_PASSWORD)
Password file to use for non-interactive password input

--pcscdpath value ($GETH_PCSCDPATH)
Path to the smartcard daemon (pcscd) socket file

--signer value ($GETH_SIGNER)
External signer (url or path to ipc file)

--unlock value ($GETH_UNLOCK)
Comma separated list of accounts to unlock

--usb (default: false) ($GETH_USB)
Enable monitoring and management of USB hardware wallets

ALIASED (deprecated)


--cache.trie.journal value ($GETH_CACHE_TRIE_JOURNAL)
Disk journal directory for trie cache to survive node restarts

--cache.trie.rejournal value (default: 0s) ($GETH_CACHE_TRIE_REJOURNAL)
Time interval to regenerate the trie cache journal

--light.egress value (default: 0) ($GETH_LIGHT_EGRESS)
Outgoing bandwidth limit for serving light clients (deprecated)

--light.ingress value (default: 0) ($GETH_LIGHT_INGRESS)
Incoming bandwidth limit for serving light clients (deprecated)

--light.maxpeers value (default: 0) ($GETH_LIGHT_MAXPEERS)
Maximum number of light clients to serve, or light servers to attach to
(deprecated)

--light.nopruning (default: false) ($GETH_LIGHT_NOPRUNING)
Disable ancient light chain data pruning (deprecated)

--light.nosyncserve (default: false) ($GETH_LIGHT_NOSYNCSERVE)
Enables serving light clients before syncing (deprecated)

--light.serve value (default: 0) ($GETH_LIGHT_SERVE)
Maximum percentage of time allowed for serving LES requests (deprecated)

--log.backtrace value ($GETH_LOG_BACKTRACE)
Request a stack trace at a specific logging statement (deprecated)

--log.debug (default: false) ($GETH_LOG_DEBUG)
Prepends log messages with call-site location (deprecated)

--metrics.expensive (default: false) ($GETH_METRICS_EXPENSIVE)
Enable expensive metrics collection and reporting (deprecated)

--mine (default: false) ($GETH_MINE)
Enable mining (deprecated)

--miner.etherbase value ($GETH_MINER_ETHERBASE)
0x prefixed public address for block mining rewards (deprecated)

--miner.newpayload-timeout value (default: 2s) ($GETH_MINER_NEWPAYLOAD_TIMEOUT)
Specify the maximum time allowance for creating a new payload (deprecated)

--nousb (default: false) ($GETH_NOUSB)
Disables monitoring for and managing USB hardware wallets (deprecated)

--txlookuplimit value (default: 2350000) ($GETH_TXLOOKUPLIMIT)
Number of recent blocks to maintain transactions index for (default = about one
year, 0 = entire chain) (deprecated, use history.transactions instead)

--v5disc (default: false) ($GETH_V5DISC)
Enables the experimental RLPx V5 (Topic Discovery) mechanism (deprecated, use
--discv5 instead)

--whitelist value ($GETH_WHITELIST)
Comma separated block number-to-hash mappings to enforce (<number>=<hash>)
(deprecated in favor of --eth.requiredblocks)

API AND CONSOLE


--authrpc.addr value (default: "localhost") ($GETH_AUTHRPC_ADDR)
Listening address for authenticated APIs

--authrpc.jwtsecret value ($GETH_AUTHRPC_JWTSECRET)
Path to a JWT secret to use for authenticated RPC endpoints

--authrpc.port value (default: 8551) ($GETH_AUTHRPC_PORT)
Listening port for authenticated APIs

--authrpc.vhosts value (default: "localhost") ($GETH_AUTHRPC_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--exec value ($GETH_EXEC)
Execute JavaScript statement

--graphql (default: false) ($GETH_GRAPHQL)
Enable GraphQL on the HTTP-RPC server. Note that GraphQL can only be started if
an HTTP server is started as well.

--graphql.corsdomain value ($GETH_GRAPHQL_CORSDOMAIN)
Comma separated list of domains from which to accept cross origin requests
(browser enforced)

--graphql.vhosts value (default: "localhost") ($GETH_GRAPHQL_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--header value, -H value ($GETH_HEADER)
Pass custom headers to the RPC server when using --remotedb or the geth attach
console. This flag can be given multiple times.

--http (default: false) ($GETH_HTTP)
Enable the HTTP-RPC server

--http.addr value (default: "localhost") ($GETH_HTTP_ADDR)
HTTP-RPC server listening interface

--http.api value ($GETH_HTTP_API)
API's offered over the HTTP-RPC interface

--http.corsdomain value ($GETH_HTTP_CORSDOMAIN)
Comma separated list of domains from which to accept cross origin requests
(browser enforced)

--http.port value (default: 8545) ($GETH_HTTP_PORT)
HTTP-RPC server listening port

--http.rpcprefix value ($GETH_HTTP_RPCPREFIX)
HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.

--http.vhosts value (default: "localhost") ($GETH_HTTP_VHOSTS)
Comma separated list of virtual hostnames from which to accept requests (server
enforced). Accepts '*' wildcard.

--ipcdisable (default: false) ($GETH_IPCDISABLE)
Disable the IPC-RPC server

--ipcpath value ($GETH_IPCPATH)
Filename for IPC socket/pipe within the datadir (explicit paths escape it)

--jspath value (default: .) ($GETH_JSPATH)
JavaScript root path for `loadScript`

--preload value ($GETH_PRELOAD)
Comma separated list of JavaScript files to preload into the console

--rpc.allow-unprotected-txs (default: false) ($GETH_RPC_ALLOW_UNPROTECTED_TXS)
Allow for unprotected (non EIP155 signed) transactions to be submitted via RPC

--rpc.batch-request-limit value (default: 1000) ($GETH_RPC_BATCH_REQUEST_LIMIT)
Maximum number of requests in a batch

--rpc.batch-response-max-size value (default: 25000000) ($GETH_RPC_BATCH_RESPONSE_MAX_SIZE)
Maximum number of bytes returned from a batched call

--rpc.enabledeprecatedpersonal (default: false) ($GETH_RPC_ENABLEDEPRECATEDPERSONAL)
Enables the (deprecated) personal namespace

--rpc.evmtimeout value (default: 5s) ($GETH_RPC_EVMTIMEOUT)
Sets a timeout used for eth_call (0=infinite)

--rpc.gascap value (default: 50000000) ($GETH_RPC_GASCAP)
Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite)

--rpc.txfeecap value (default: 1) ($GETH_RPC_TXFEECAP)
Sets a cap on transaction fee (in ether) that can be sent via the RPC APIs (0 =
no cap)

--ws (default: false) ($GETH_WS)
Enable the WS-RPC server

--ws.addr value (default: "localhost") ($GETH_WS_ADDR)
WS-RPC server listening interface

--ws.api value ($GETH_WS_API)
API's offered over the WS-RPC interface

--ws.origins value ($GETH_WS_ORIGINS)
Origins from which to accept websockets requests

--ws.port value (default: 8546) ($GETH_WS_PORT)
WS-RPC server listening port

--ws.rpcprefix value ($GETH_WS_RPCPREFIX)
HTTP path prefix on which JSON-RPC is served. Use '/' to serve on all paths.

BEACON CHAIN


--beacon.api value ($GETH_BEACON_API)
Beacon node (CL) light client API URL. This flag can be given multiple times.

--beacon.api.header value ($GETH_BEACON_API_HEADER)
Pass custom HTTP header fields to the remote beacon node API in "key:value"
format. This flag can be given multiple times.

--beacon.checkpoint value ($GETH_BEACON_CHECKPOINT)
Beacon chain weak subjectivity checkpoint block hash

--beacon.config value ($GETH_BEACON_CONFIG)
Beacon chain config YAML file

--beacon.genesis.gvroot value ($GETH_BEACON_GENESIS_GVROOT)
Beacon chain genesis validators root

--beacon.genesis.time value (default: 0) ($GETH_BEACON_GENESIS_TIME)
Beacon chain genesis time

--beacon.nofilter (default: false) ($GETH_BEACON_NOFILTER)
Disable future slot signature filter

--beacon.threshold value (default: 342) ($GETH_BEACON_THRESHOLD)
Beacon sync committee participation threshold

DEVELOPER CHAIN


--dev (default: false) ($GETH_DEV)
Ephemeral proof-of-authority network with a pre-funded developer account, mining
enabled

--dev.gaslimit value (default: 11500000) ($GETH_DEV_GASLIMIT)
Initial block gas limit

--dev.period value (default: 0) ($GETH_DEV_PERIOD)
Block period to use in developer mode (0 = mine only if transaction pending)

ETHEREUM


--bloomfilter.size value (default: 2048) ($GETH_BLOOMFILTER_SIZE)
Megabytes of memory allocated to bloom-filter for pruning

--config value ($GETH_CONFIG)
TOML configuration file

--datadir value (default: /Users/syjn99/Library/OverProtocol) ($GETH_DATADIR)
Data directory for the databases and keystore

--datadir.ancient value ($GETH_DATADIR_ANCIENT)
Root directory for ancient data (default = inside chaindata)

--datadir.minfreedisk value ($GETH_DATADIR_MINFREEDISK)
Minimum free disk space in MB, once reached triggers auto shut down (default =
--cache.gc converted to MB, 0 = disabled)

--db.engine value ($GETH_DB_ENGINE)
Backing database implementation to use ('pebble' or 'leveldb')

--dolphin (default: false) ($GETH_DOLPHIN)
Dolphin test network

--eth.requiredblocks value ($GETH_ETH_REQUIREDBLOCKS)
Comma separated block number-to-hash mappings to require for peering
(<number>=<hash>)

--exitwhensynced (default: false) ($GETH_EXITWHENSYNCED)
Exits after block synchronisation completes

--mainnet (default: false) ($GETH_MAINNET)
Overprotocol mainnet

--networkid value (default: 0) ($GETH_NETWORKID)
Explicitly set network id (integer)(For testnets: use --dolphin instead)

--override.cancun value (default: 0) ($GETH_OVERRIDE_CANCUN)
Manually specify the Cancun fork timestamp, overriding the bundled setting

--override.verkle value (default: 0) ($GETH_OVERRIDE_VERKLE)
Manually specify the Verkle fork timestamp, overriding the bundled setting

--snapshot (default: true) ($GETH_SNAPSHOT)
Enables snapshot-database mode (default = enable)

GAS PRICE ORACLE


--gpo.blocks value (default: 20) ($GETH_GPO_BLOCKS)
Number of recent blocks to check for gas prices

--gpo.ignoreprice value (default: 2) ($GETH_GPO_IGNOREPRICE)
Gas price below which gpo will ignore transactions

--gpo.maxprice value (default: 500000000000) ($GETH_GPO_MAXPRICE)
Maximum transaction priority fee (or gasprice before London fork) to be
recommended by gpo

--gpo.percentile value (default: 60) ($GETH_GPO_PERCENTILE)
Suggested gas price is the given percentile of a set of recent transaction gas
prices

LOGGING AND DEBUGGING


--log.compress (default: false) ($GETH_LOG_COMPRESS)
Compress the log files

--log.file value ($GETH_LOG_FILE)
Write logs to a file

--log.format value ($GETH_LOG_FORMAT)
Log format to use (json|logfmt|terminal)

--log.maxage value (default: 30) ($GETH_LOG_MAXAGE)
Maximum number of days to retain a log file

--log.maxbackups value (default: 10) ($GETH_LOG_MAXBACKUPS)
Maximum number of log files to retain

--log.maxsize value (default: 100) ($GETH_LOG_MAXSIZE)
Maximum size in MBs of a single log file

--log.rotate (default: false) ($GETH_LOG_ROTATE)
Enables log file rotation

--log.vmodule value ($GETH_LOG_VMODULE)
Per-module verbosity: comma-separated list of <pattern>=<level> (e.g.
eth/*=5,p2p=4)

--nocompaction (default: false) ($GETH_NOCOMPACTION)
Disables db compaction after import

--pprof (default: false) ($GETH_PPROF)
Enable the pprof HTTP server

--pprof.addr value (default: "127.0.0.1") ($GETH_PPROF_ADDR)
pprof HTTP server listening interface

--pprof.blockprofilerate value (default: 0) ($GETH_PPROF_BLOCKPROFILERATE)
Turn on block profiling with the given rate

--pprof.cpuprofile value ($GETH_PPROF_CPUPROFILE)
Write CPU profile to the given file

--pprof.memprofilerate value (default: 524288) ($GETH_PPROF_MEMPROFILERATE)
Turn on memory profiling with the given rate

--pprof.port value (default: 6060) ($GETH_PPROF_PORT)
pprof HTTP server listening port

--remotedb value ($GETH_REMOTEDB)
URL for remote database

--trace value ($GETH_TRACE)
Write execution trace to the given file

--verbosity value (default: 3) ($GETH_VERBOSITY)
Logging verbosity: 0=silent, 1=error, 2=warn, 3=info, 4=debug, 5=detail

METRICS AND STATS


--ethstats value ($GETH_ETHSTATS)
Reporting URL of a ethstats service (nodename:secret@host:port)

--metrics (default: false) ($GETH_METRICS)
Enable metrics collection and reporting

--metrics.addr value ($GETH_METRICS_ADDR)
Enable stand-alone metrics HTTP server listening interface.

--metrics.influxdb (default: false) ($GETH_METRICS_INFLUXDB)
Enable metrics export/push to an external InfluxDB database

--metrics.influxdb.bucket value (default: "geth") ($GETH_METRICS_INFLUXDB_BUCKET)
InfluxDB bucket name to push reported metrics to (v2 only)

--metrics.influxdb.database value (default: "geth") ($GETH_METRICS_INFLUXDB_DATABASE)
InfluxDB database name to push reported metrics to

--metrics.influxdb.endpoint value (default: "http://localhost:8086") ($GETH_METRICS_INFLUXDB_ENDPOINT)
InfluxDB API endpoint to report metrics to

--metrics.influxdb.organization value (default: "geth") ($GETH_METRICS_INFLUXDB_ORGANIZATION)
InfluxDB organization name (v2 only)

--metrics.influxdb.password value (default: "test") ($GETH_METRICS_INFLUXDB_PASSWORD)
Password to authorize access to the database

--metrics.influxdb.tags value (default: "host=localhost") ($GETH_METRICS_INFLUXDB_TAGS)
Comma-separated InfluxDB tags (key/values) attached to all measurements

--metrics.influxdb.token value (default: "test") ($GETH_METRICS_INFLUXDB_TOKEN)
Token to authorize access to the database (v2 only)

--metrics.influxdb.username value (default: "test") ($GETH_METRICS_INFLUXDB_USERNAME)
Username to authorize access to the database

--metrics.influxdbv2 (default: false) ($GETH_METRICS_INFLUXDBV2)
Enable metrics export/push to an external InfluxDB v2 database

--metrics.port value (default: 6060) ($GETH_METRICS_PORT)
Metrics HTTP server listening port.
Please note that --metrics.addr must be set
to start the server.

MINER


--miner.extradata value ($GETH_MINER_EXTRADATA)
Block extra data set by the miner (default = client version)

--miner.gaslimit value (default: 30000000) ($GETH_MINER_GASLIMIT)
Target gas ceiling for mined blocks

--miner.gasprice value (default: 1000000) ($GETH_MINER_GASPRICE)
Minimum gas price for mining a transaction

--miner.pending.feeRecipient value ($GETH_MINER_PENDING_FEERECIPIENT)
0x prefixed public address for the pending block producer (not used for actual
block production)

--miner.recommit value (default: 2s) ($GETH_MINER_RECOMMIT)
Time interval to recreate the block being mined

MISC


--help, -h (default: false)
show help

--synctarget value ($GETH_SYNCTARGET)
Hash of the block to full sync to (dev testing feature)

--version, -v (default: false)
print the version

NETWORKING


--bootnodes value ($GETH_BOOTNODES)
Comma separated enode URLs for P2P discovery bootstrap

--discovery.dns value ($GETH_DISCOVERY_DNS)
Sets DNS discovery entry points (use "" to disable DNS)

--discovery.port value (default: 30303) ($GETH_DISCOVERY_PORT)
Use a custom UDP port for P2P discovery

--discovery.v4, --discv4 (default: false) ($GETH_DISCOVERY_V4)
Enables the V4 discovery mechanism

--discovery.v5, --discv5 (default: true) ($GETH_DISCOVERY_V5)
Enables the V5 discovery mechanism

--identity value ($GETH_IDENTITY)
Custom node name

--maxpeers value (default: 50) ($GETH_MAXPEERS)
Maximum number of network peers (network disabled if set to 0)

--maxpendpeers value (default: 0) ($GETH_MAXPENDPEERS)
Maximum number of pending connection attempts (defaults used if set to 0)

--nat value (default: "any") ($GETH_NAT)
NAT port mapping mechanism (any|none|upnp|pmp|pmp:<IP>|extip:<IP>)

--netrestrict value ($GETH_NETRESTRICT)
Restricts network communication to the given IP networks (CIDR masks)

--nodekey value ($GETH_NODEKEY)
P2P node key file

--nodekeyhex value ($GETH_NODEKEYHEX)
P2P node key as hex (for testing)

--nodiscover (default: false) ($GETH_NODISCOVER)
Disables the peer discovery mechanism (manual peer addition)

--port value (default: 30303) ($GETH_PORT)
Network listening port

PERFORMANCE TUNING


--cache value (default: 1024) ($GETH_CACHE)
Megabytes of memory allocated to internal caching (default = 4096 mainnet full
node, 128 light mode)

--cache.blocklogs value (default: 32) ($GETH_CACHE_BLOCKLOGS)
Size (in number of blocks) of the log cache for filtering

--cache.database value (default: 50) ($GETH_CACHE_DATABASE)
Percentage of cache memory allowance to use for database io

--cache.gc value (default: 25) ($GETH_CACHE_GC)
Percentage of cache memory allowance to use for trie pruning (default = 25% full
mode, 0% archive mode)

--cache.noprefetch (default: false) ($GETH_CACHE_NOPREFETCH)
Disable heuristic state prefetch during block import (less CPU and disk IO, more
time waiting for data)

--cache.preimages (default: false) ($GETH_CACHE_PREIMAGES)
Enable recording the SHA3/keccak preimages of trie keys

--cache.snapshot value (default: 10) ($GETH_CACHE_SNAPSHOT)
Percentage of cache memory allowance to use for snapshot caching (default = 10%
full mode, 20% archive mode)

--cache.trie value (default: 15) ($GETH_CACHE_TRIE)
Percentage of cache memory allowance to use for trie caching (default = 15% full
mode, 30% archive mode)

--crypto.kzg value (default: "gokzg") ($GETH_CRYPTO_KZG)
KZG library implementation to use; gokzg (recommended) or ckzg

--fdlimit value (default: 0) ($GETH_FDLIMIT)
Raise the open file descriptor resource limit (default = system fd limit)

STATE HISTORY MANAGEMENT


--gcmode value (default: "full") ($GETH_GCMODE)
Blockchain garbage collection mode, only relevant in state.scheme=hash ("full",
"archive")

--history.state value (default: 90000) ($GETH_HISTORY_STATE)
Number of recent blocks to retain state history for (default = 90,000 blocks, 0
= entire chain)

--history.transactions value (default: 2350000) ($GETH_HISTORY_TRANSACTIONS)
Number of recent blocks to maintain transactions index for (default = about one
year, 0 = entire chain)

--state.scheme value ($GETH_STATE_SCHEME)
Scheme to use for storing ethereum state ('hash' or 'path')

--syncmode value (default: snap) ($GETH_SYNCMODE)
Blockchain sync mode ("snap" or "full")

TRANSACTION POOL (BLOB)


--blobpool.datacap value (default: 2684354560) ($GETH_BLOBPOOL_DATACAP)
Disk space to allocate for pending blob transactions (soft limit)

--blobpool.datadir value (default: "blobpool") ($GETH_BLOBPOOL_DATADIR)
Data directory to store blob transactions in

--blobpool.pricebump value (default: 100) ($GETH_BLOBPOOL_PRICEBUMP)
Price bump percentage to replace an already existing blob transaction

TRANSACTION POOL (EVM)


--txpool.accountqueue value (default: 64) ($GETH_TXPOOL_ACCOUNTQUEUE)
Maximum number of non-executable transaction slots permitted per account

--txpool.accountslots value (default: 16) ($GETH_TXPOOL_ACCOUNTSLOTS)
Minimum number of executable transaction slots guaranteed per account

--txpool.globalqueue value (default: 1024) ($GETH_TXPOOL_GLOBALQUEUE)
Maximum number of non-executable transaction slots for all accounts

--txpool.globalslots value (default: 5120) ($GETH_TXPOOL_GLOBALSLOTS)
Maximum number of executable transaction slots for all accounts

--txpool.journal value (default: "transactions.rlp") ($GETH_TXPOOL_JOURNAL)
Disk journal for local transaction to survive node restarts

--txpool.lifetime value (default: 3h0m0s) ($GETH_TXPOOL_LIFETIME)
Maximum amount of time non-executable transaction are queued

--txpool.locals value ($GETH_TXPOOL_LOCALS)
Comma separated accounts to treat as locals (no flush, priority inclusion)

--txpool.nolocals (default: false) ($GETH_TXPOOL_NOLOCALS)
Disables price exemptions for locally submitted transactions

--txpool.pricebump value (default: 10) ($GETH_TXPOOL_PRICEBUMP)
Price bump percentage to replace an already existing transaction

--txpool.pricelimit value (default: 1) ($GETH_TXPOOL_PRICELIMIT)
Minimum gas price tip to enforce for acceptance into the pool

--txpool.rejournal value (default: 1h0m0s) ($GETH_TXPOOL_REJOURNAL)
Time interval to regenerate the local transaction journal

VIRTUAL MACHINE


--vmdebug (default: false) ($GETH_VMDEBUG)
Record information useful for VM and contract debugging

--vmtrace value ($GETH_VMTRACE)
Name of tracer which should record internal VM operations (costly)

--vmtrace.jsonconfig value (default: "{}") ($GETH_VMTRACE_JSONCONFIG)
Tracer configuration (JSON)


COPYRIGHT:
Copyright 2013-2024 The go-ethereum Authors

- + \ No newline at end of file diff --git a/operators/advanced-guides/run-with-docker.html b/operators/advanced-guides/run-with-docker.html index 8708d2f..ed000d8 100644 --- a/operators/advanced-guides/run-with-docker.html +++ b/operators/advanced-guides/run-with-docker.html @@ -4,7 +4,7 @@ Run with Docker | OverProtocol Docs - + @@ -12,7 +12,7 @@

Run with Docker

As both clients must communicate with each other via the Engine API, we highly recommend running with Docker Compose as it eliminates the need to manage container networking manually.

We provide an official repository with useful scripts: overprotocol/docker-scripts. Here is a brief guide to run a full Over Protocol node. For running a validator node, please refer to the repository.

  1. Clone docker-scripts repo
git clone https://github.com/overprotocol/docker-scripts.git
mv docker-scripts overprotocol
cd overprotocol
  1. Check if Docker is available
docker -v
  1. Export your public IP for discovery
export PUBLIC_IP=$(curl -s ifconfig.me)
echo $PUBLIC_IP
  1. Initialize data directory and JWT token
make init
  1. Run a full node
docker compose -f mainnet.yml up -d
  1. Health check your node
curl 127.0.0.1:3500/eth/v1/node/syncing | jq

This will fetch the current status of the consensus client. If sync_distance is equal to 0, this means your node is well synchronized.

{
"data": {
"head_slot": "22916",
"sync_distance": "0",
"is_syncing": false,
"is_optimistic": false,
"el_offline": false
}
}

For debugging purposes, you can check the logs with the following commands:

docker logs kairos -f  # To inspect the execution client
docker logs chronos -f # To inspect the consensus client

Congratulations! You’re now running an OverProtocol node.

- + \ No newline at end of file diff --git a/operators/faqs.html b/operators/faqs.html index af2bb7f..75eb1ad 100644 --- a/operators/faqs.html +++ b/operators/faqs.html @@ -4,14 +4,14 @@ OverProtocol Validator FAQs | OverProtocol Docs - +

OverProtocol Validator FAQs

Welcome to the OverProtocol Validator FAQ! 🙌

Welcome! This FAQ is your go-to resource for understanding the ins and outs of operating validators on OverProtocol. Whether you’re just getting started or managing multiple validators, we’re here to guide you through every step with clarity and confidence.

Validators are critical to OverProtocol’s security and decentralization, and we appreciate your dedication to the network. Let’s dive into your most pressing questions!


1. What is the Role of a Validator?

Validators are the backbone of OverProtocol. Here’s what they do:

  • Validate Blocks: Ensure that new blocks and transactions meet the network’s consensus rules.
  • Propose Blocks: Occasionally create new blocks, playing a crucial role in the Proof of Stake (PoS) mechanism.
  • Earn Rewards: Gain rewards for honest participation, proportional to your stake.

2. How Do I Become a Validator?

  • Stake a minimum of 256 OVER to register a validator.
  • Use the Staking menu in OverScape or manually register your validator keys.
  • Keep your validator online to participate in the consensus process and earn rewards.

Refer to our Validator Setup Guide for detailed instructions.

3. How Are Rewards Earned?

Rewards are earned for:

  • Block Validation: Validating transactions and adding them to the blockchain.
  • Proposing Blocks: Proposing valid blocks as part of the consensus mechanism.

Reward Frequency: Every 6 minutes (1 epoch). Rewards are sent automatically to the withdrawal address.

4. What Hardware Do I Need for a Node?

Minimum Requirements:

  • CPU: Dual-core processor.
  • Memory: 8GB RAM.
  • Storage: 50GB SSD.

Recommended for Optimal Performance:

  • CPU: Quad-core or higher.
  • Memory: 16GB RAM or more.
  • Storage: 128GB SSD or more.

5. Does My Internet Connection Matter?

Yes! A stable and reliable connection is crucial.

Minimum Speed: 8 Mbps download. Recommended Speed: 25 Mbps or higher for validators or high-load nodes.

6. How Do I Monitor My Node?

Use OverScape to monitor your node’s performance or access Chronos metrics directly via command-line tools. Keep an eye on:

  • Node synchronization status.
  • Peer connections.
  • Resource usage (CPU, memory, storage).

8. What Are Validator States?

Validators in OverProtocol have the following states:

  • Pending: Waiting for activation after staking.
  • Active: Participating in consensus and earning rewards.
  • Exited: No longer participating.
  • Slashed: Penalized for misbehavior and removed from the network.

9. Why is My Validator Inactive?

Inactive validators may result from:

  • Network Downtime: Ensure your node is online and synced.
  • Software Issues: Confirm you’re using the latest version of OverScape or validator clients.

If you have any troubles, join a community channel and feel free to ask some helps!

10. What Happens If My Node Goes Offline?

  • Validators will stop earning rewards during downtime.
  • Prolonged inactivity may lead to penalties and eventually force the validator to exit the network.
  • Use OverScape or monitoring tools to receive alerts and address issues promptly.

11. Can I Operate Validators on Multiple Devices?

DON'T DO THAT.

Running the same validator on multiple devices can lead to slashing penalties. If you need to move a validator to a new device:

  • Backup your recovery phrase.
  • Stop the validator on the old device.
  • Wait at least more thant 13 minutes before starting it on the new device.

12. Can I Add More OVER After Staking?

Yes, you can add more OVER to an active validator.

Additional deposits must be a minimum of 32 OVER.

13. When Can I Withdraw My Rewards?

Validator rewards are processed automatically:

  • Accumulated rewards are sent to the withdrawal address every 1–5 days.
  • No manual claims are necessary.

14. Can I Partially Withdraw Staked OVER?

Currently, validators must withdraw their full staked amount when stopping activity.

Partial withdrawals are not yet supported but will be available in a future update.

15. How Do I Stop Validating and Withdraw My OVER?

Initiating a Voluntary Exit (Manually)

For users who decide to cease staking and wish to withdraw their entire balance, a "voluntary exit" process must be initiated. This involves signing and broadcasting a voluntary exit message using your validator keys. The process is facilitated by your validator client and must be submitted to your beacon node. Importantly, this action does not require any gas fees, as it is a part of the consensus layer's functionality. You will have to rely on the following-like command:

$ prysmctl validator exit --wallet-dir=<path/to/your/wallet/directory> --beacon-rpc-provider=<127.0.0.1:4000>

Alternatively, you can use Bazel to initiate a voluntary exit from the source as follows:

$ bazel run //cmd/prysmctl -- validator exit --wallet-dir=<path/to/your/wallet/directory> --beacon-rpc-provider=<127.0.0.1:4000> 

16. What Should I Know About Withdrawals?

Withdrawals are only possible under the following conditions:

  • 256 Epoch Rule: Validators can only request withdrawals after 256 epochs (~24 hours) of activation.
  • Full Withdrawals Only: Partial withdrawals are not yet supported.
- + \ No newline at end of file diff --git a/operators/operate-restoration-client.html b/operators/operate-restoration-client.html index 8684534..c68f075 100644 --- a/operators/operate-restoration-client.html +++ b/operators/operate-restoration-client.html @@ -4,13 +4,13 @@ Operate Restoration Client | OverProtocol Docs - +

Operate Restoration Client

To restore an expired account, you need to retrieve the proof of historical state. This requires running an execution client that stores historical state data. By operating both the execution client and the restoration client, you can help users restore expired accounts and receive additional rewards.

How to run a restoration client

Restoration client is controlled using the command line. Here’s how to set it up:

restoration --help                                                                                                             
Usage of restoration:
-corsdomain string
Comma separated list of domains from which to accept cross origin requests (browser enforced) (default "*")
-ipc string
The ipc endpoint of a local geth node
-keystore string
Directory for the keystore (default = inside the datadir)
-minimum-reward string
Minimum reward for sending restoration transaction (default "1000000000000000000")
-passphrase string
Passphrase file for unlocking signer account
-port string
Server listening port (default ":32311")
-rpc string
The rpc endpoint of a local or remote geth node
-signer string
Signer address for signing restoration transaction and receiving reward
caution

The execution client must be synced with full sync mode and store an unlimited number of epochs.

$ geth --syncmode full --epochLimit 0
- + \ No newline at end of file diff --git a/operators/operate-validators.html b/operators/operate-validators.html index a912886..264b64e 100644 --- a/operators/operate-validators.html +++ b/operators/operate-validators.html @@ -4,7 +4,7 @@ Operate Validators | OverProtocol Docs - + @@ -14,7 +14,7 @@ The execution layer's account needs 256 OVER per validator account it tries to enroll.

Then you should run the following-styled code in your machine to sender deposit transactions the with the validator keys generated in step 2. The deposit contract's address is set to 0x000000000000000000000000000000000beac017 and the deposit contract ABI is set as the following link: DepositContract.abi.json.

const { ethers } = require("ethers"); // ethers.js v5

const provider = new ethers.providers.JsonRpcProvider(
"http://127.0.0.1:22000"
); // RPC port of Kairos

const depositContractAddress = '0x000000000000000000000000000000000beac017';
const depositContractABI = require('./DepositContract.abi.json');

// Replace these with your own values
async function stake(privateKey) {
const wallet = new ethers.Wallet(privateKey, provider);

const stakingContract = new ethers.Contract(
depositContractAddress,
depositContractABI,
wallet
);

const amount = ethers.utils.parseEther("256");

let depositDatas;
depositDatas = require("./deposit_data.json"); // The deposit data you've generated from step 2.

for (let i = 0; i < depositDatas.length; i++) {
const tx = await stakingContract.deposit(
"0x" + depositDatas[i].pubkey,
"0x" + depositDatas[i].withdrawal_credentials,
"0x" + depositDatas[i].signature,
"0x" + depositDatas[i].deposit_data_root,
{
value: amount,
gasLimit: 2000000,
}
);

try {
const receipt = await tx.wait();
console.log(`Transaction ${i + 1}:`);
console.log(`Transaction Hash: ${receipt.transactionHash}`);
} catch (error) {
console.error(`Error in transaction ${i + 1}: ${error.message}`);
}
}
}

stake(YOUR_PRIVATE_KEY_WITH_0x_PREFIX)

If you've succeeded in registering your validator to the blockchain, you should now run your validator software. Follow steps 4 and 5.

Run Your Validator

Transfer Validator Keys

Run validator client to import the validator keys with the command similar to the following:

$ validator accounts import --keys-dir=<path/to/your/validator/keys> --wallet-dir=<path/to/your/wallet/directory>

If you successfully imported validator keys, the result will be:

Importing accounts, this may take a while...
Importing accounts... 100% [==========================================================] [0s:0s]
[2024-06-04 15:41:33] INFO local-keymanager: Successfully imported validator key(s) publicKeys=<YOUR_VALIDATOR_PUBKEYS>
[2024-06-04 15:41:33] INFO accounts: Imported accounts <YOUR_VALIDATOR_PUBKEYS>, view all of them by running `accounts list`

Run Your Validator Client

Run validator client to run the validator on your node like following:

$ validator --wallet-dir=<path/to/your/wallet/directory> --suggested-fee-recipient=<YOUR_WALLET_ADDRESS>

--suggested-fee-recipient will allow you to earn block priority fees. If no --suggested-fee-recipient is set neither on the validator client nor on the beacon node, the corresponding fees will be sent to the burn address, and forever lost.

- + \ No newline at end of file diff --git a/operators/run-a-node.html b/operators/run-a-node.html index 85f706e..654568e 100644 --- a/operators/run-a-node.html +++ b/operators/run-a-node.html @@ -4,13 +4,13 @@ Run a Node | OverProtocol Docs - +

Run a Node

Join the Network Revolution 🚀

Running an OverProtocol node isn’t just about contributing to a blockchain—it’s about becoming part of a decentralized movement that’s reshaping the future of technology. Whether you’re a beginner or a seasoned tech enthusiast, this guide will help you set up your node and start making an impact.


Ready, Set, Node! 🖥️

Running an OverProtocol node is your gateway to joining a decentralized, secure, and inclusive blockchain network. Choose the setup method that works best for you, whether you're a beginner looking for simplicity or an expert seeking full control.

Start with OverScape (Beginner-Friendly 🌟)

OverScape is designed to make node setup as easy as possible—perfect for first-timers or those who prefer a no-fuss experience.

  1. Download OverScape

    Head over to the OverScape website to get the installation package.

  2. Install the Software

    Follow the on-screen instructions to install OverScape. This includes agreeing to terms, selecting an installation directory, and setting up your firewall to allow OverScape to connect to the network.

  3. Launch and Sync

    Open OverScape, and it will automatically start syncing with the OverProtocol blockchain, downloading necessary data.

  4. Configure Your Node

    Adjust basic settings through the intuitive interface. Advanced settings are also available for more customization if needed.

Build from Source (Advanced 🛠️)

info

If you're familiar with Docker and want to set up your own node using it, simply visit this page.

For experienced users, building the node software from source provides maximum flexibility and customization.

  1. Clone the Repositories

    Access the official OverProtocol GitHub repository and clone the following:

    • Kairos: Execution client for processing transactions and maintaining the blockchain state.
    • Chronos: Consensus client for achieving network consensus.
  2. Compile the Source Code

    Navigate to the cloned directory in your command line tool and run the build commands specified in the build documentation.

  3. Configure Your Node

    After building, configure your node’s settings, including network options and security measures. This may involve editing configuration files manually.

    info

    If you are planning to run a validator, it is strongly advised to use the --suggested-fee-recipient=<WALLET ADDRESS> option. When your validator proposes a block, it will allow you to earn block priority fees, also sometimes called "tips".

  4. Run the Node

    Execute the node software. You might need to use command line options to start it with specific parameters tailored to your needs.

Run with Binaries (Intermediate ⚙️)

Precompiled binaries provide a balance between ease of use and flexibility, allowing you to set up your node quickly without compiling from source.

  1. Prepare Your Environment

    Create a directory named overprotocol with two subfolders: execution and consensus.

    overprotocol
    ├── consensus
    └── execution
  2. Download Binaries

    Select the execution client and the consensus client binary zip files for your operating system from the links below and download it to your local machine and extract it to corresponding directory above.

    Operating SystemKairos (Execution Client)Chronos (Consensus Client)
    Linux x64DownloadDownload
    MacOS X (Apple)DownloadDownload
    MacOS X (Intel)DownloadDownload
    WindowsDownloadDownload

    Then your binary directory structure should look like this:

    overprotocol
    ├── consensus
    │   ├── beacon-chain
    │   ├── prysmctl
    │   └── validator
    └── execution
    ├── bootnode
    └── geth
  3. Run the Execution Client

    Navigate to the execution folder and start the client:

    mkdir data
    ./geth --datadir=./data
  4. Run the Consensus Client

    There are two main ways to sync a consensus node: from genesis, and from a checkpoint. It is considerably faster to sync from a checkpoint. In the following examples, we'll use the checkpoint sync endpoint provided by Over Foundation.

    In the consensus folder, start the client using a checkpoint sync for faster setup:

    mkdir data
    ./beacon-chain --datadir=./data --jwt-secret ../execution/data/geth/jwtsecret --checkpoint-sync-url="https://mainnet-checkpoint.over.network"

    Syncing from a checkpoint usually takes a couple of minutes.

    If you wish to sync from genesis, you need to remove --checkpoint-sync-url flag from the previous command. Syncing from genesis usually takes a couple of hours, but it can take longer depending on your network and hardware specs.

    info

    If you are planning to run a validator, it is strongly advised to use the --suggested-fee-recipient=<WALLET ADDRESS> option. When your validator proposes a block, it will allow you to earn block priority fees, also sometimes called "tips".

    Congratulations! you’re now running a full OverProtocol node.


Choose the Right Method for You

  • New to Nodes? 👉 Start with OverScape for a simple, guided experience.
  • Want Control? 👉 Use binaries to customize your setup while saving time.
  • Advanced User? 👉 Build from source to fully optimize and personalize your node.

Once your node is running and synced, check out how to stake and validate to take the next step in securing the OverProtocol network.


Network Configurations 🛜

KeyValue
NetworkOverProtocol Mainnet
RPC URLhttps://rpc.overprotocol.com/
Chain ID54176
Currency symbolOVER
Block Explorer URLhttps://scan.over.network/

Port and Firewall Configurations 🧱

Proper port and firewall configurations are essential to ensure your OverProtocol node operates smoothly and maintains reliable connections with peers. If you’ve followed the default setup instructions, your node will use the following network settings. Incorrect firewall settings are a common cause of connection issues, so double-check these configurations for optimal performance.

Port/protocolFirewall ruleReason/caveats
8545/TCPBlock all traffic.This is the JSON-RPC port for your execution node's Query API. You (and apps) can use this port to check execution node status, query execution-layer chain data, and even submit transactions. This port generally shouldn't be exposed to the outside world.
3500/TCPBlock all traffic.This is the JSON-RPC port for your beacon node's Query API. You (and apps) can use this port to check beacon node status and query consensus-layer chain data. This port generally shouldn't be exposed to the outside world.
8551/TCPBlock all traffic.Your beacon node connects to your execution node's Engine API using this port. Inbound and outbound traffic should be allowed through this port only if your local beacon node is connecting to a remote execution node.
4000/TCPBlock all traffic.Your validator uses this port to connect to your beacon node via gRPC. Inbound and outbound traffic should be allowed through this port only if your local validator is connecting to a remote beacon node.
*/UDP+TCPAllow outbound traffic.To discover peers, Chronos' beacon node dials out through random ports. Allowing outbound TCP/UDP traffic from any port will help Chronos find peers.
13000/TCPAllow inbound and outbound traffic.After we discover peers, we dial them through this port to establish an ongoing connection for libp2p and through which all gossip/p2p request and responses will flow.
12000/UDPAllow inbound and outbound traffic.Your beacon node exposes this UDP port so that other Over nodes can discover your node, request chain data, and provide chain data.
30303/TCP+UDPAllow inbound and outbound traffic.30303/TCP is your execution node's listener port, while 30303/UDP is its discovery port. This rule lets your execution node connect to other peers.

By following these guidelines, you’ll ensure your OverProtocol node runs securely and efficiently. If you encounter any issues, check the firewall configuration first or reach out to the OverProtocol community for support.


Node Types in OverProtocol

OverProtocol offers multiple node types, each serving specific roles within the network. Understanding these types will help you decide how to best participate based on your goals and resources.

1. Full Nodes

Full nodes are the backbone of the network, used primarily for querying data and interacting with the blockchain. They:

  • Maintain essential blockchain data to validate new transactions and blocks.
  • Ensure the network remains decentralized and accessible.
  • Are the default setup for most users when creating a node.

Ideal for:

  • Users looking for a straightforward way to support the network.
  • Those interested in accessing blockchain data or running decentralized applications (dApps).

2. Archive Nodes

Archive nodes store the entire state of the blockchain from its genesis, preserving every transaction and historical state. These nodes:

  • Require significant disk space and computational power.
  • Are typically used for advanced use cases like deep blockchain analytics and historical data retrieval.

Ideal for:

  • Developers and researchers needing access to full historical data.
  • Enterprises requiring comprehensive blockchain records.

3. Validator Nodes

Validator nodes are critical for securing the network and maintaining its integrity. They:

  • Propose and vote on blocks as part of OverProtocol’s consensus mechanism.
  • Require staking of OVER tokens to participate in validation and earn rewards.
  • Play a pivotal role in decentralization and trust within the network.

Ideal for:

  • Users committed to actively securing the network.
  • Those with resources to stake OVER and operate a high-uptime node.

Choosing the Right Node Type

Each node type contributes to OverProtocol in unique ways and comes with different levels of responsibility and resource requirements. Consider:

  • Full Nodes: For general network participation and dApp interaction.
  • Archive Nodes: For advanced analytics and historical data needs.
  • Validator Nodes: For securing the blockchain and earning rewards.

By selecting the node type that aligns with your goals, you’ll play an essential role in supporting and engaging with the OverProtocol ecosystem.

Synchronization Modes

The synchronization process is crucial for ensuring that your OverProtocol node stays up-to-date with the latest blockchain state. This involves downloading data from peers, verifying its integrity, and building a local blockchain database. Given OverProtocol’s layered architecture, each layer employs distinct strategies to manage data effectively.

These modes offer different trade-offs between speed, disk usage, bandwidth, and security. The choice of sync mode depends on your hardware, network capacity, and operational goals.


Execution Layer Sync Modes

In the execution layer, there are two primary synchronization modes to become a full node: Full Sync and Snap Sync. OverScape users can easily select the execution sync modes, upon blockchain data download.

Full Sync:

  • This mode involves downloading all blocks, including headers, transactions, and receipts, from the genesis block onward.
  • It generates the state of the blockchain incrementally by executing every transactions.
  • This method minimizes trust as it verifies every transaction independently, providing the highest level of security.
  • Due to the comprehensive nature of the data processing involved, this sync can take days, depending on the number of transactions in the blockchain’s history.
$ geth --syncmode full

Snap Sync:

  • This mode starts from a more recent "trusted" checkpoint rather than the genesis block.
  • This mode leverages periodic snapshots of the blockchain state, allowing the node to regenerate necessary state data on demand rather than maintaining a complete historical state database.
  • It is the fastest synchronization strategy and is the default setting on networks.
  • This mode significantly reduces disk usage and network bandwidth requirements.
$ geth --syncmode snap

Becoming an Archive Node:

There is an option to become an archive node. Currently, there is no option for OverScape users to become an archive node. Client software runners could become an archive node by running the execution client with the following tag:

$ geth --gcmode archive

If the combination is geth --syncmode full --gcmode archive then all blockchain data from the genesis block is written down in the database. If the combination is geth --syncmode snap --gcmode archive the blockchain data from the trusted checkpoint.

Consensus Layer Sync Modes

There are two ways to sync the consensus layer: full sync, and checkpoint sync. OverScape users can only choose to sync consensus layer through checkpoint sync as it is set by default. The chronos client software runners can choose between the two sync modes.

Full sync:

  • This sync mode downloads the beacon chain data, if it has lower head information than its peers.
  • When bootstrapping a node, it has no beacon chain data, so it downloads all the beacon chain data starting from the genesis.

Checkpoint sync (Full sync from checkpoint):

  • This mode enhances the user experience by allowing consensus layer to sync from a recent weak subjectivity checkpoint instead of from the genesis block.
  • This approach drastically reduces the full sync time.
  • The source of the checkpoint data is crucial and should be chosen with care, as the node will inherently trust the third party providing the data.
  • Append the following tags to enable the checkpoint sync
$ beacon-chain --checkpoint-sync-url value

Choosing the Right Mode

Sync ModeSpeedDisk UsageSecurityRecommended For
Full Sync (Execution)SlowHighHighestSecurity-focused users.
Snap Sync (Execution)FastModerateHighGeneral users and OverScape.
Full Sync (Consensus)SlowHighHighestAdvanced users with specific needs.
Checkpoint Sync (Consensus)FastLowModerateDefault for quick setup.

What's Next

Once your node is up, running, and synced with the OverProtocol network, the next step is to take an active role in securing the blockchain by registering and operating validators. Validators play a critical role in the consensus process, contributing to the network’s security and stability.

Becoming a validator lets you directly contribute to OverProtocol’s decentralization while earning staking rewards. Let’s build the future of blockchain—together! 🌟

- + \ No newline at end of file diff --git a/operators/system-requirements.html b/operators/system-requirements.html index 98d137c..ce9e71b 100644 --- a/operators/system-requirements.html +++ b/operators/system-requirements.html @@ -4,13 +4,13 @@ System Requirements | OverProtocol Docs - +

System Requirements

This guide will help you select the right hardware and prepare the ideal environment for running an OverProtocol node. Whether you’re using existing equipment or planning to purchase something new, this resource simplifies the process.


1. Check Your Network Environment 📡

Before choosing hardware, ensure your network environment is ready to support a node:

  • Stable Connection: A consistent internet connection is crucial. Frequent disruptions can cause your node to fall behind in syncing with the blockchain.
  • Bandwidth: While OverProtocol doesn’t demand extremely high bandwidth, your connection should handle frequent data transfers seamlessly.
    • Recommended: 8 Mbps download speed or higher.
    • Ideal: 25 Mbps or higher for performant validators or heavy use cases.

Tip: A wired (Ethernet) connection is preferable over Wi-Fi for stability.

2. Minimum Hardware Requirements ✅

To participate as an OverProtocol node operator, your hardware should meet these baseline requirements. This ensures smooth operation and minimal interruptions:

  • CPU: Dual-core processor or higher
  • Memory: 8GB RAM
  • Storage: SSD with at least 50GB free space

This setup is sufficient for most node operators. However, if you plan to validate or run multiple workloads, a more powerful system will deliver better results.

3. What Happens With Better Hardware? 🌟

While the minimum requirements are enough to run a node, upgrading your hardware improves performance, reliability, and long-term stability. Here’s what better hardware brings:

  • Faster CPU: Reduces processing time for transactions and blockchain updates.
  • More RAM: Handles complex tasks and large datasets more efficiently.
  • Larger SSD: Allows you to store and manage more metadata and historical data.

Example of Recommended Specs:

  • CPU: Quad-core processor or higher
  • Memory: 16GB RAM or more
  • Storage: SSD with 128GB or more free space

4. Preparing Your Node Environment 🌐

After ensuring your hardware meets the requirements, optimize your setup for long-term performance:

  • Dedicated Machine: If possible, use a machine specifically for your node to avoid interference from other applications.
  • Uninterruptible Power Supply (UPS): Protect against power outages to ensure your node stays online.
  • Cooling: Keep your hardware in a well-ventilated area to prevent overheating during 24/7 operation.
  • Energy Efficiency: Choose power-efficient components to reduce operational costs and environmental impact.

5. Considering a Cloud Server? ☁️

If you prefer remote management or lack physical space, a cloud server can be a great option. Choose a service that offers:

  • Customizable plans: So you can upgrade resources as needed.
  • Geographical diversity: To align with decentralization goals.
  • Reliability: Ensure the provider has a strong uptime record.

🌟 Popular Cloud Options:

  • AWS, Google Cloud, or Azure for advanced control.
  • Smaller providers for cost-efficient solutions (e.g., DigitalOcean, Vultr).

Get Started Today!

Choosing the right computer is the first step to becoming an OverProtocol node operator. Once your setup is ready:

Running a node is easier than you think—with the right computer, you’re ready to make an impact! 🌟

- + \ No newline at end of file