diff --git a/404.html b/404.html index 548cdc73..96b35b17 100644 --- a/404.html +++ b/404.html @@ -5,13 +5,13 @@ Page Not Found | GraphOps Docs - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/8ef6d289.4519e42a.js b/assets/js/8ef6d289.4519e42a.js deleted file mode 100644 index 25ec4a9c..00000000 --- a/assets/js/8ef6d289.4519e42a.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[4939],{3905:(e,t,a)=>{a.d(t,{Zo:()=>l,kt:()=>m});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),c=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},l=function(e){var t=c(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,l=s(e,["components","mdxType","originalType","parentName"]),u=c(a),h=r,m=u["".concat(p,".").concat(h)]||u[h]||d[h]||o;return a?n.createElement(m,i(i({ref:t},l),{},{components:a})):n.createElement(m,i({ref:t},l))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=h;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s[u]="string"==typeof e?e:r,i[1]=s;for(var c=2;c{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>s,toc:()=>c});var n=a(7462),r=(a(7294),a(3905));const o={sidebar_position:1},i="Launchpad Documentation",s={unversionedId:"launchpad/docs-map",id:"launchpad/docs-map",title:"Launchpad Documentation",description:"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults. Suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack. Key components of Launchpad include the Launchpad Starter (graphops/launchpad-starter), which serves as the initial setup point for new deployments; Launchpad Charts (graphops/launchpad-charts), a collection of Helm Charts for blockchains and web3 applications; and Launchpad Namespaces (graphops/launchpad-namespaces), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management.",source:"@site/docs/launchpad/docs-map.md",sourceDirName:"launchpad",slug:"/launchpad/docs-map",permalink:"/launchpad/docs-map",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/docs-map.md",tags:[],version:"current",sidebarPosition:1,frontMatter:{sidebar_position:1},sidebar:"launchpadSidebar",next:{title:"Introduction",permalink:"/launchpad/intro"}},p={},c=[{value:"First steps",id:"first-steps",level:2},{value:"Getting help",id:"getting-help",level:2},{value:"Getting Involved",id:"getting-involved",level:2}],l={toc:c},u="wrapper";function d(e){let{components:t,...a}=e;return(0,r.kt)(u,(0,n.Z)({},l,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"launchpad-documentation"},"Launchpad Documentation"),(0,r.kt)("p",null,"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults. Suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack. Key components of Launchpad include the Launchpad Starter (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-starter")),"), which serves as the initial setup point for new deployments; Launchpad Charts (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts")),"), a collection of Helm Charts for blockchains and web3 applications; and Launchpad Namespaces (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")),"), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management."),(0,r.kt)("p",null,"Here's a guide to help you navigate this documentation based on the information you're seeking:"),(0,r.kt)("h2",{id:"first-steps"},"First steps"),(0,r.kt)("p",null,"Are you new to Launchpad or to Kubernetes? Here's a high-level overview of how this documentation is organised, to help you know where to look for the information you need:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"From Scratch")," - this includes overview of the project, details on the project modularity and how you can leverage features of Launchpad as best suits your needs: ",(0,r.kt)("a",{parentName:"li",href:"intro"},"Launchpad Introduction")," | ",(0,r.kt)("a",{parentName:"li",href:"modularity"},"Opt in features")," | ",(0,r.kt)("a",{parentName:"li",href:"client-side-tooling"},"Installation")," "),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Project Maintenance")," - find out how often new versions of the project are released and how GraphOps decides what namespaces and chains to support: ",(0,r.kt)("a",{parentName:"li",href:"release-channels"},"Release Channels")," | ",(0,r.kt)("a",{parentName:"li",href:"supported-namespaces"},"Supported Namespaces")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Tutorials")," - guides to get you started with Launchpad, everything you need to deploy different blockchain nodes and guides on what you need for highly available postgresql and monitoring: ",(0,r.kt)("a",{parentName:"li",href:"quick-start"},"Getting Started")," | ",(0,r.kt)("a",{parentName:"li",href:"tutorials/arbitrum-archive-kubernetes-guide"},"Arbitrum-One Guide")," | ",(0,r.kt)("a",{parentName:"li",href:"tutorials/celo-archive-kubernetes-guide"},"Celo-Mainnet Guide")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/tutorials/postgresql_ha"},"PostgreSQL HA")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/tutorials/monitoring-stack-with-HA"},"Monitoring Stack HA")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Advanced Tutorials")," - want to start with Launchpad but you don't yet have a Kubernetes Cluster? Find out how to create one: ",(0,r.kt)("a",{parentName:"li",href:"advanced-tutorials/kubernetes-create-cluster-with-kubeadm"},"Creating a Kubernetes Cluster with kubeadm")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/advanced-tutorials/advanced-kubernetes"},"Creating a Kubernetes Cluster with FCOS")," ")),(0,r.kt)("h2",{id:"getting-help"},"Getting help"),(0,r.kt)("p",null,"Having trouble? We'd like to help!"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"The ",(0,r.kt)("a",{parentName:"li",href:"faq"},"FAQ")," page has answers to many common questions."),(0,r.kt)("li",{parentName:"ul"},"Not found something you need? See ",(0,r.kt)("a",{parentName:"li",href:"faq#need-more-help"},"FAQ: Need more help")," for information on getting support and asking questions."),(0,r.kt)("li",{parentName:"ul"},"Request new features, check out existing bugs or report new ones in ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/issues"},"Launchpad Charts")," and ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces/issues"},"Launchpad Namespaces"))),(0,r.kt)("h2",{id:"getting-involved"},"Getting Involved"),(0,r.kt)("p",null,"Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. As such contributors are highly appreciated and welcome. Visit the github repos' guidance to contribute code to ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-charts/blob/main/CONTRIBUTING.md"},"Launchpad Charts")," or ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces/blob/main/CONTRIBUTING.md"},"Launchpad Namespaces")),(0,r.kt)("p",null,"You can also get involved by simply attending our biweekly ",(0,r.kt)("a",{parentName:"p",href:"https://discord.com/events/438038660412342282/1229824398127534130"},"Launchpad Office Hours(LOH)")," community call on discord. You can access previous LOH recordings ",(0,r.kt)("a",{parentName:"p",href:"https://www.youtube.com/watch?v=qC5KbhD3urc&list=PLpbkfkwg_V6Ceidcn06VSP9BoU0g14voq"},"here"),"."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/8ef6d289.8028956f.js b/assets/js/8ef6d289.8028956f.js new file mode 100644 index 00000000..4a633e2b --- /dev/null +++ b/assets/js/8ef6d289.8028956f.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[4939],{3905:(e,t,a)=>{a.d(t,{Zo:()=>c,kt:()=>m});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var p=n.createContext({}),l=function(e){var t=n.useContext(p),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=l(e.components);return n.createElement(p.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,p=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),u=l(a),h=r,m=u["".concat(p,".").concat(h)]||u[h]||d[h]||o;return a?n.createElement(m,i(i({ref:t},c),{},{components:a})):n.createElement(m,i({ref:t},c))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=h;var s={};for(var p in t)hasOwnProperty.call(t,p)&&(s[p]=t[p]);s.originalType=e,s[u]="string"==typeof e?e:r,i[1]=s;for(var l=2;l{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>s,toc:()=>l});var n=a(7462),r=(a(7294),a(3905));const o={sidebar_position:1},i="Launchpad Documentation",s={unversionedId:"launchpad/docs-map",id:"launchpad/docs-map",title:"Launchpad Documentation",description:"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults.",source:"@site/docs/launchpad/docs-map.md",sourceDirName:"launchpad",slug:"/launchpad/docs-map",permalink:"/launchpad/docs-map",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/docs-map.md",tags:[],version:"current",sidebarPosition:1,frontMatter:{sidebar_position:1},sidebar:"launchpadSidebar",next:{title:"Introduction",permalink:"/launchpad/intro"}},p={},l=[{value:"First steps",id:"first-steps",level:2},{value:"Getting help",id:"getting-help",level:2},{value:"Getting Involved",id:"getting-involved",level:2}],c={toc:l},u="wrapper";function d(e){let{components:t,...a}=e;return(0,r.kt)(u,(0,n.Z)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"launchpad-documentation"},"Launchpad Documentation"),(0,r.kt)("p",null,"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults."),(0,r.kt)("p",null,"Launchpad is suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack."),(0,r.kt)("p",null,"Key components of Launchpad include the Launchpad Starter (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-starter")),"), which serves as the initial setup point for new deployments; Launchpad Charts (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts")),"), a collection of Helm Charts for blockchains and web3 applications; Launchpad Namespaces (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")),"), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management; and Launchpad Taskfiles (",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-taskfiles"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-taskfiles")),"), a collection of Tasks defined with Taskfile."),(0,r.kt)("p",null,"Here's a guide to help you navigate this documentation based on the information you're seeking:"),(0,r.kt)("h2",{id:"first-steps"},"First steps"),(0,r.kt)("p",null,"Are you new to Launchpad or to Kubernetes? Here's a high-level overview of how this documentation is organised, to help you know where to look for the information you need:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"From Scratch")," - this includes overview of the project, details on the project modularity and how you can leverage features of Launchpad as best suits your needs: ",(0,r.kt)("a",{parentName:"li",href:"intro"},"Launchpad Introduction")," | ",(0,r.kt)("a",{parentName:"li",href:"modularity"},"Opt in features")," | ",(0,r.kt)("a",{parentName:"li",href:"client-side-tooling"},"Installation")," "),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Project Maintenance")," - find out how often new versions of the project are released and how GraphOps decides what namespaces and chains to support: ",(0,r.kt)("a",{parentName:"li",href:"release-channels"},"Release Channels")," | ",(0,r.kt)("a",{parentName:"li",href:"supported-namespaces"},"Supported Namespaces")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Tutorials")," - guides to get you started with Launchpad, everything you need to deploy different blockchain nodes and guides on what you need for highly available postgresql and monitoring: ",(0,r.kt)("a",{parentName:"li",href:"quick-start"},"Getting Started")," | ",(0,r.kt)("a",{parentName:"li",href:"tutorials/arbitrum-archive-kubernetes-guide"},"Arbitrum-One Guide")," | ",(0,r.kt)("a",{parentName:"li",href:"tutorials/celo-archive-kubernetes-guide"},"Celo-Mainnet Guide")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/tutorials/postgresql_ha"},"PostgreSQL HA")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/tutorials/monitoring-stack-with-HA"},"Monitoring Stack HA")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("strong",{parentName:"li"},"Advanced Tutorials")," - want to start with Launchpad but you don't yet have a Kubernetes Cluster? Find out how to create one: ",(0,r.kt)("a",{parentName:"li",href:"advanced-tutorials/kubernetes-create-cluster-with-kubeadm"},"Creating a Kubernetes Cluster with kubeadm")," | ",(0,r.kt)("a",{parentName:"li",href:"/launchpad/advanced-tutorials/advanced-kubernetes"},"Creating a Kubernetes Cluster with FCOS")," ")),(0,r.kt)("h2",{id:"getting-help"},"Getting help"),(0,r.kt)("p",null,"Having trouble? We'd like to help!"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"The ",(0,r.kt)("a",{parentName:"li",href:"faq"},"FAQ")," page has answers to many common questions."),(0,r.kt)("li",{parentName:"ul"},"Not found something you need? See ",(0,r.kt)("a",{parentName:"li",href:"faq#need-more-help"},"FAQ: Need more help")," for information on getting support and asking questions."),(0,r.kt)("li",{parentName:"ul"},"Request new features, check out existing bugs or report new ones in ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/issues"},"Launchpad Charts")," and ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces/issues"},"Launchpad Namespaces"))),(0,r.kt)("h2",{id:"getting-involved"},"Getting Involved"),(0,r.kt)("p",null,"Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. As such contributors are highly appreciated and welcome. Visit the github repos' guidance to contribute code to ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-charts/blob/main/CONTRIBUTING.md"},"Launchpad Charts")," or ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces/blob/main/CONTRIBUTING.md"},"Launchpad Namespaces")),(0,r.kt)("p",null,"You can also get involved by simply attending our biweekly ",(0,r.kt)("a",{parentName:"p",href:"https://discord.com/events/438038660412342282/1229824398127534130"},"Launchpad Office Hours(LOH)")," community call on discord. You can access previous LOH recordings ",(0,r.kt)("a",{parentName:"p",href:"https://www.youtube.com/watch?v=qC5KbhD3urc&list=PLpbkfkwg_V6Ceidcn06VSP9BoU0g14voq"},"here"),"."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.21b3d6bd.js b/assets/js/935f2afb.e3d5fd60.js similarity index 72% rename from assets/js/935f2afb.21b3d6bd.js rename to assets/js/935f2afb.e3d5fd60.js index ca22e95b..24589cd9 100644 --- a/assets/js/935f2afb.21b3d6bd.js +++ b/assets/js/935f2afb.e3d5fd60.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"launchpadSidebar":[{"type":"link","label":"Launchpad Documentation","href":"/launchpad/docs-map","docId":"launchpad/docs-map"},{"type":"link","label":"Introduction","href":"/launchpad/intro","docId":"launchpad/intro"},{"type":"link","label":"Modularity","href":"/launchpad/modularity","docId":"launchpad/modularity"},{"type":"link","label":"Prerequisites","href":"/launchpad/prerequisites","docId":"launchpad/prerequisites"},{"type":"link","label":"Quick Start","href":"/launchpad/quick-start","docId":"launchpad/quick-start"},{"type":"link","label":"Release Channels","href":"/launchpad/release-channels","docId":"launchpad/release-channels"},{"type":"link","label":"Supported Namespaces","href":"/launchpad/supported-namespaces","docId":"launchpad/supported-namespaces"},{"type":"link","label":"Design Principles","href":"/launchpad/design-principles","docId":"launchpad/design-principles"},{"type":"link","label":"Client Side Tooling","href":"/launchpad/client-side-tooling","docId":"launchpad/client-side-tooling"},{"type":"link","label":"Frequently Asked Questions (FAQs)","href":"/launchpad/faq","docId":"launchpad/faq"},{"type":"link","label":"Other Resources","href":"/launchpad/other-resources","docId":"launchpad/other-resources"},{"type":"category","label":"Guides","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Arbitrum Archive Mainnet Node Guide","href":"/launchpad/tutorials/arbitrum-archive-kubernetes-guide","docId":"launchpad/tutorials/arbitrum-archive-kubernetes-guide"},{"type":"link","label":"Celo Archive Mainnet Node Guide","href":"/launchpad/tutorials/celo-archive-kubernetes-guide","docId":"launchpad/tutorials/celo-archive-kubernetes-guide"},{"type":"link","label":"Deploying a Monitoring stack with HA","href":"/launchpad/tutorials/monitoring-stack-with-HA","docId":"launchpad/tutorials/monitoring-stack-with-HA"},{"type":"link","label":"Overview of High Availability in PostgreSQL","href":"/launchpad/tutorials/postgresql_ha","docId":"launchpad/tutorials/postgresql_ha"}]},{"type":"category","label":"Advanced Topics","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Considerations for Kubernetes installation using FCOS","href":"/launchpad/advanced-tutorials/advanced-kubernetes","docId":"launchpad/advanced-tutorials/advanced-kubernetes"},{"type":"link","label":"FCOS Installation","href":"/launchpad/advanced-tutorials/install-fcos","docId":"launchpad/advanced-tutorials/install-fcos"},{"type":"link","label":"Upgrading Kubernetes ClusterConfig with kubeadm","href":"/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config","docId":"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config"},{"type":"link","label":"Upgrading Kubernetes with kubeadm","href":"/launchpad/advanced-tutorials/kubeadm-upgrade-nodes","docId":"launchpad/advanced-tutorials/kubeadm-upgrade-nodes"},{"type":"link","label":"Kubernetes Guide - Bootstrapping with Kubeadm","href":"/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm","docId":"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm"}]}],"gnSidebar":[{"type":"link","label":"Introduction","href":"/graphcast/intro","docId":"graphcast/intro"},{"type":"link","label":"Design Principles","href":"/graphcast/design-principles","docId":"graphcast/design-principles"},{"type":"category","label":"Graphcast SDK","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Introduction","href":"/graphcast/sdk/intro","docId":"graphcast/sdk/intro"},{"type":"link","label":"Radio Development","href":"/graphcast/sdk/radio-dev","docId":"graphcast/sdk/radio-dev"},{"type":"link","label":"Registry Contract","href":"/graphcast/sdk/registry","docId":"graphcast/sdk/registry"}]},{"type":"category","label":"Radios","collapsible":true,"collapsed":false,"items":[{"type":"category","label":"Subgraph Radio","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Introduction","href":"/graphcast/radios/subgraph-radio/intro","docId":"graphcast/radios/subgraph-radio/intro"},{"type":"link","label":"POI Cross-checking","href":"/graphcast/radios/subgraph-radio/poi-cross-checking","docId":"graphcast/radios/subgraph-radio/poi-cross-checking"},{"type":"link","label":"Subgraph Upgrade Pre-syncing","href":"/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing","docId":"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing"},{"type":"link","label":"Notifications and Monitoring","href":"/graphcast/radios/subgraph-radio/monitoring","docId":"graphcast/radios/subgraph-radio/monitoring"},{"type":"link","label":"Advanced Configuration","href":"/graphcast/radios/subgraph-radio/advanced-configuration","docId":"graphcast/radios/subgraph-radio/advanced-configuration"},{"type":"link","label":"HTTP Server","href":"/graphcast/radios/subgraph-radio/http-server","docId":"graphcast/radios/subgraph-radio/http-server"}]},{"type":"link","label":"Graphcast CLI","href":"/graphcast/radios/graphcast-cli","docId":"graphcast/radios/graphcast-cli"},{"type":"link","label":"Listener Radio","href":"/graphcast/radios/listener-radio","docId":"graphcast/radios/listener-radio"}]}],"mipsSidebar":[{"type":"link","label":"Introduction","href":"/mips-resources/intro","docId":"mips-resources/intro"},{"type":"link","label":"MIPs FAQs","href":"/mips-resources/mips-faq","docId":"mips-resources/mips-faq"}]},"docs":{"graphcast/design-principles":{"id":"graphcast/design-principles","title":"Design Principles","description":"There are two main components of Graphcast","sidebar":"gnSidebar"},"graphcast/intro":{"id":"graphcast/intro","title":"Introduction","description":"Why Graphcast 1","sidebar":"gnSidebar"},"graphcast/radios/graphcast-cli":{"id":"graphcast/radios/graphcast-cli","title":"Graphcast CLI","description":"The source code for the Graphcast CLI is available on GitHub.","sidebar":"gnSidebar"},"graphcast/radios/listener-radio":{"id":"graphcast/radios/listener-radio","title":"Listener Radio","description":"The source code for Listener Radio is available on GitHub and Docker builds are automatically published as GitHub Packages.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/advanced-configuration":{"id":"graphcast/radios/subgraph-radio/advanced-configuration","title":"Advanced Configuration","description":"In the configuration table below is the full list of environment variables you can set, along with example values.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/http-server":{"id":"graphcast/radios/subgraph-radio/http-server","title":"HTTP Server","description":"The Radio spins up an HTTP server with a GraphQL API when SERVERHOST and SERVERPORT environment variables are set. The supported routes are:","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/intro":{"id":"graphcast/radios/subgraph-radio/intro","title":"Introduction","description":"Subgraph Radio","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/monitoring":{"id":"graphcast/radios/subgraph-radio/monitoring","title":"Notifications and Monitoring","description":"Notifications","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/poi-cross-checking":{"id":"graphcast/radios/subgraph-radio/poi-cross-checking","title":"POI Cross-checking","description":"An essential aspect of earning indexing rewards as an Indexer is the generation of valid Proof of Indexing hashes (POIs). These POIs provide evidence of the Indexer\'s possession of correct data. Submitting invalid POIs could lead to a Dispute and possible slashing by the protocol. With Subgraph Radio\'s POI feature, Indexers gain confidence knowing that their POIs are continually cross-verified against those of other participating Indexers. Should there be a discrepancy in POIs, Subgraph Radio functions as an early warning system, alerting the Indexer within minutes.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing":{"id":"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing","title":"Subgraph Upgrade Pre-syncing","description":"The Subgraph Upgrade Pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. Subgraph Developers can use the Graphcast CLI to send a message to all Indexers, interested in the given subgraph.","sidebar":"gnSidebar"},"graphcast/sdk/intro":{"id":"graphcast/sdk/intro","title":"Introduction","description":"Graphcast SDK is a decentralized, distributed peer-to-peer (P2P) communication tool that enables users across the network to exchange information in real-time. It is designed to overcome the high cost of signaling or coordination between blockchain participants by enabling off-chain communication (gossip/cheap talk). This is particularly useful for applications where real-time communication is essential but the cost of on-chain transactions is prohibitive.","sidebar":"gnSidebar"},"graphcast/sdk/radio-dev":{"id":"graphcast/sdk/radio-dev","title":"Radio Development","description":"Do you want to build robust, peer-to-peer messaging apps that automatically exchange valuable data with other Indexers in real time? Do you have an idea for what data could be useful to share that could lead to greater communication efficiency in The Graph network as a whole? Then you want to build a Radio on top of the Graphcast network.","sidebar":"gnSidebar"},"graphcast/sdk/registry":{"id":"graphcast/sdk/registry","title":"Registry Contract","description":"The Graphcast Registry contracts allow an address to set a GraphcastID by calling setGraphcastID(indexeraddress, graphcastIDaddress) as either an Indexer or an Indexer operator, or calling setGraphcastID(graphcastID_address) as the Indexer address. The relationship between an Indexer address to its GraphcastID is limited to 1:1, and cannot be set to itself. This restriction provides consistency and security for the Indexer identity to operate on Graphcast as one GraphcastID operating across Radio applications. To learn more about the registry, you can check out the Github repository.","sidebar":"gnSidebar"},"launchpad/advanced-tutorials/advanced-kubernetes":{"id":"launchpad/advanced-tutorials/advanced-kubernetes","title":"Considerations for Kubernetes installation using FCOS","description":"This guide provides a general walkthrough for installing Kubernetes using Fedora CoreOS (FCOS) as the base operating system.","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/install-fcos":{"id":"launchpad/advanced-tutorials/install-fcos","title":"FCOS Installation","description":"Fedora CoreOS (FCOS) is an open-source container-focused operating system that is:","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config":{"id":"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config","title":"Upgrading Kubernetes ClusterConfig with kubeadm","description":"When managing a Kubernetes cluster with kubeadm, there could be scenarios where you need to update the ClusterConfiguration independently of performing version upgrades. This guide walks you through those steps.","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubeadm-upgrade-nodes":{"id":"launchpad/advanced-tutorials/kubeadm-upgrade-nodes","title":"Upgrading Kubernetes with kubeadm","description":"In this guide we will use as an example upgrading from kubernetes v1.28.1 to v1.28.3","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm":{"id":"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm","title":"Kubernetes Guide - Bootstrapping with Kubeadm","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/client-side-tooling":{"id":"launchpad/client-side-tooling","title":"Client Side Tooling","description":"Launchpad comes with an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your cluster software stack.","sidebar":"launchpadSidebar"},"launchpad/design-principles":{"id":"launchpad/design-principles","title":"Design Principles","description":"Design Principles","sidebar":"launchpadSidebar"},"launchpad/docs-map":{"id":"launchpad/docs-map","title":"Launchpad Documentation","description":"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults. Suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack. Key components of Launchpad include the Launchpad Starter (graphops/launchpad-starter), which serves as the initial setup point for new deployments; Launchpad Charts (graphops/launchpad-charts), a collection of Helm Charts for blockchains and web3 applications; and Launchpad Namespaces (graphops/launchpad-namespaces), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management.","sidebar":"launchpadSidebar"},"launchpad/faq":{"id":"launchpad/faq","title":"Frequently Asked Questions (FAQs)","description":"Here are answers to some commonly asked questions. If you have a question that is not covered here, feel free to ask.","sidebar":"launchpadSidebar"},"launchpad/intro":{"id":"launchpad/intro","title":"Introduction","description":"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.","sidebar":"launchpadSidebar"},"launchpad/modularity":{"id":"launchpad/modularity","title":"Modularity","description":"The full Launchpad stack contains:","sidebar":"launchpadSidebar"},"launchpad/other-resources":{"id":"launchpad/other-resources","title":"Other Resources","description":"Kubernetes","sidebar":"launchpadSidebar"},"launchpad/prerequisites":{"id":"launchpad/prerequisites","title":"Prerequisites","description":"You will need some things to use Launchpad for your infrastructure:","sidebar":"launchpadSidebar"},"launchpad/quick-start":{"id":"launchpad/quick-start","title":"Quick Start","description":"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.","sidebar":"launchpadSidebar"},"launchpad/release-channels":{"id":"launchpad/release-channels","title":"Release Channels","description":"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.","sidebar":"launchpadSidebar"},"launchpad/supported-namespaces":{"id":"launchpad/supported-namespaces","title":"Supported Namespaces","description":"Launchpad includes a number of prepackaged Kubernetes namespaces (see Launchpad Namespaces repo), which in turn reference Helm Charts in the Launchpad Charts repository, as well as third-party Charts. GraphOps maintains support for these namespaces, meaning that we:","sidebar":"launchpadSidebar"},"launchpad/tutorials/arbitrum-archive-kubernetes-guide":{"id":"launchpad/tutorials/arbitrum-archive-kubernetes-guide","title":"Arbitrum Archive Mainnet Node Guide","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/tutorials/celo-archive-kubernetes-guide":{"id":"launchpad/tutorials/celo-archive-kubernetes-guide","title":"Celo Archive Mainnet Node Guide","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/tutorials/monitoring-stack-with-HA":{"id":"launchpad/tutorials/monitoring-stack-with-HA","title":"Deploying a Monitoring stack with HA","description":"Prerequisites","sidebar":"launchpadSidebar"},"launchpad/tutorials/postgresql_ha":{"id":"launchpad/tutorials/postgresql_ha","title":"Overview of High Availability in PostgreSQL","description":"One of the prerequisites of running an indexer stack is currently using PostgreSQL as a database for storing indexer metadata and subgraph data. To ensure redundancy of data and operations and enable systems to continue functioning despite individual component failures we want to account for the following areas as they relate to running PostgreSQL:","sidebar":"launchpadSidebar"},"mips-resources/intro":{"id":"mips-resources/intro","title":"Introduction","description":"It\'s an exciting time to be participating in The Graph ecosystem! During Graph Day 2022 Yaniv Tal announced the sunsetting of the hosted service, a moment The Graph ecosystem has been working towards for many years.","sidebar":"mipsSidebar"},"mips-resources/mips-faq":{"id":"mips-resources/mips-faq","title":"MIPs FAQs","description":"desc","sidebar":"mipsSidebar"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"launchpadSidebar":[{"type":"link","label":"Launchpad Documentation","href":"/launchpad/docs-map","docId":"launchpad/docs-map"},{"type":"link","label":"Introduction","href":"/launchpad/intro","docId":"launchpad/intro"},{"type":"link","label":"Modularity","href":"/launchpad/modularity","docId":"launchpad/modularity"},{"type":"link","label":"Prerequisites","href":"/launchpad/prerequisites","docId":"launchpad/prerequisites"},{"type":"link","label":"Quick Start","href":"/launchpad/quick-start","docId":"launchpad/quick-start"},{"type":"link","label":"Release Channels","href":"/launchpad/release-channels","docId":"launchpad/release-channels"},{"type":"link","label":"Supported Namespaces","href":"/launchpad/supported-namespaces","docId":"launchpad/supported-namespaces"},{"type":"link","label":"Design Principles","href":"/launchpad/design-principles","docId":"launchpad/design-principles"},{"type":"link","label":"Client Side Tooling","href":"/launchpad/client-side-tooling","docId":"launchpad/client-side-tooling"},{"type":"link","label":"Frequently Asked Questions (FAQs)","href":"/launchpad/faq","docId":"launchpad/faq"},{"type":"link","label":"Other Resources","href":"/launchpad/other-resources","docId":"launchpad/other-resources"},{"type":"category","label":"Guides","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Arbitrum Archive Mainnet Node Guide","href":"/launchpad/tutorials/arbitrum-archive-kubernetes-guide","docId":"launchpad/tutorials/arbitrum-archive-kubernetes-guide"},{"type":"link","label":"Celo Archive Mainnet Node Guide","href":"/launchpad/tutorials/celo-archive-kubernetes-guide","docId":"launchpad/tutorials/celo-archive-kubernetes-guide"},{"type":"link","label":"Deploying a Monitoring stack with HA","href":"/launchpad/tutorials/monitoring-stack-with-HA","docId":"launchpad/tutorials/monitoring-stack-with-HA"},{"type":"link","label":"Overview of High Availability in PostgreSQL","href":"/launchpad/tutorials/postgresql_ha","docId":"launchpad/tutorials/postgresql_ha"}]},{"type":"category","label":"Advanced Topics","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Considerations for Kubernetes installation using FCOS","href":"/launchpad/advanced-tutorials/advanced-kubernetes","docId":"launchpad/advanced-tutorials/advanced-kubernetes"},{"type":"link","label":"FCOS Installation","href":"/launchpad/advanced-tutorials/install-fcos","docId":"launchpad/advanced-tutorials/install-fcos"},{"type":"link","label":"Upgrading Kubernetes ClusterConfig with kubeadm","href":"/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config","docId":"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config"},{"type":"link","label":"Upgrading Kubernetes with kubeadm","href":"/launchpad/advanced-tutorials/kubeadm-upgrade-nodes","docId":"launchpad/advanced-tutorials/kubeadm-upgrade-nodes"},{"type":"link","label":"Kubernetes Guide - Bootstrapping with Kubeadm","href":"/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm","docId":"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm"}]}],"gnSidebar":[{"type":"link","label":"Introduction","href":"/graphcast/intro","docId":"graphcast/intro"},{"type":"link","label":"Design Principles","href":"/graphcast/design-principles","docId":"graphcast/design-principles"},{"type":"category","label":"Graphcast SDK","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Introduction","href":"/graphcast/sdk/intro","docId":"graphcast/sdk/intro"},{"type":"link","label":"Radio Development","href":"/graphcast/sdk/radio-dev","docId":"graphcast/sdk/radio-dev"},{"type":"link","label":"Registry Contract","href":"/graphcast/sdk/registry","docId":"graphcast/sdk/registry"}]},{"type":"category","label":"Radios","collapsible":true,"collapsed":false,"items":[{"type":"category","label":"Subgraph Radio","collapsible":true,"collapsed":false,"items":[{"type":"link","label":"Introduction","href":"/graphcast/radios/subgraph-radio/intro","docId":"graphcast/radios/subgraph-radio/intro"},{"type":"link","label":"POI Cross-checking","href":"/graphcast/radios/subgraph-radio/poi-cross-checking","docId":"graphcast/radios/subgraph-radio/poi-cross-checking"},{"type":"link","label":"Subgraph Upgrade Pre-syncing","href":"/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing","docId":"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing"},{"type":"link","label":"Notifications and Monitoring","href":"/graphcast/radios/subgraph-radio/monitoring","docId":"graphcast/radios/subgraph-radio/monitoring"},{"type":"link","label":"Advanced Configuration","href":"/graphcast/radios/subgraph-radio/advanced-configuration","docId":"graphcast/radios/subgraph-radio/advanced-configuration"},{"type":"link","label":"HTTP Server","href":"/graphcast/radios/subgraph-radio/http-server","docId":"graphcast/radios/subgraph-radio/http-server"}]},{"type":"link","label":"Graphcast CLI","href":"/graphcast/radios/graphcast-cli","docId":"graphcast/radios/graphcast-cli"},{"type":"link","label":"Listener Radio","href":"/graphcast/radios/listener-radio","docId":"graphcast/radios/listener-radio"}]}],"mipsSidebar":[{"type":"link","label":"Introduction","href":"/mips-resources/intro","docId":"mips-resources/intro"},{"type":"link","label":"MIPs FAQs","href":"/mips-resources/mips-faq","docId":"mips-resources/mips-faq"}]},"docs":{"graphcast/design-principles":{"id":"graphcast/design-principles","title":"Design Principles","description":"There are two main components of Graphcast","sidebar":"gnSidebar"},"graphcast/intro":{"id":"graphcast/intro","title":"Introduction","description":"Why Graphcast 1","sidebar":"gnSidebar"},"graphcast/radios/graphcast-cli":{"id":"graphcast/radios/graphcast-cli","title":"Graphcast CLI","description":"The source code for the Graphcast CLI is available on GitHub.","sidebar":"gnSidebar"},"graphcast/radios/listener-radio":{"id":"graphcast/radios/listener-radio","title":"Listener Radio","description":"The source code for Listener Radio is available on GitHub and Docker builds are automatically published as GitHub Packages.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/advanced-configuration":{"id":"graphcast/radios/subgraph-radio/advanced-configuration","title":"Advanced Configuration","description":"In the configuration table below is the full list of environment variables you can set, along with example values.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/http-server":{"id":"graphcast/radios/subgraph-radio/http-server","title":"HTTP Server","description":"The Radio spins up an HTTP server with a GraphQL API when SERVERHOST and SERVERPORT environment variables are set. The supported routes are:","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/intro":{"id":"graphcast/radios/subgraph-radio/intro","title":"Introduction","description":"Subgraph Radio","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/monitoring":{"id":"graphcast/radios/subgraph-radio/monitoring","title":"Notifications and Monitoring","description":"Notifications","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/poi-cross-checking":{"id":"graphcast/radios/subgraph-radio/poi-cross-checking","title":"POI Cross-checking","description":"An essential aspect of earning indexing rewards as an Indexer is the generation of valid Proof of Indexing hashes (POIs). These POIs provide evidence of the Indexer\'s possession of correct data. Submitting invalid POIs could lead to a Dispute and possible slashing by the protocol. With Subgraph Radio\'s POI feature, Indexers gain confidence knowing that their POIs are continually cross-verified against those of other participating Indexers. Should there be a discrepancy in POIs, Subgraph Radio functions as an early warning system, alerting the Indexer within minutes.","sidebar":"gnSidebar"},"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing":{"id":"graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing","title":"Subgraph Upgrade Pre-syncing","description":"The Subgraph Upgrade Pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. Subgraph Developers can use the Graphcast CLI to send a message to all Indexers, interested in the given subgraph.","sidebar":"gnSidebar"},"graphcast/sdk/intro":{"id":"graphcast/sdk/intro","title":"Introduction","description":"Graphcast SDK is a decentralized, distributed peer-to-peer (P2P) communication tool that enables users across the network to exchange information in real-time. It is designed to overcome the high cost of signaling or coordination between blockchain participants by enabling off-chain communication (gossip/cheap talk). This is particularly useful for applications where real-time communication is essential but the cost of on-chain transactions is prohibitive.","sidebar":"gnSidebar"},"graphcast/sdk/radio-dev":{"id":"graphcast/sdk/radio-dev","title":"Radio Development","description":"Do you want to build robust, peer-to-peer messaging apps that automatically exchange valuable data with other Indexers in real time? Do you have an idea for what data could be useful to share that could lead to greater communication efficiency in The Graph network as a whole? Then you want to build a Radio on top of the Graphcast network.","sidebar":"gnSidebar"},"graphcast/sdk/registry":{"id":"graphcast/sdk/registry","title":"Registry Contract","description":"The Graphcast Registry contracts allow an address to set a GraphcastID by calling setGraphcastID(indexeraddress, graphcastIDaddress) as either an Indexer or an Indexer operator, or calling setGraphcastID(graphcastID_address) as the Indexer address. The relationship between an Indexer address to its GraphcastID is limited to 1:1, and cannot be set to itself. This restriction provides consistency and security for the Indexer identity to operate on Graphcast as one GraphcastID operating across Radio applications. To learn more about the registry, you can check out the Github repository.","sidebar":"gnSidebar"},"launchpad/advanced-tutorials/advanced-kubernetes":{"id":"launchpad/advanced-tutorials/advanced-kubernetes","title":"Considerations for Kubernetes installation using FCOS","description":"This guide provides a general walkthrough for installing Kubernetes using Fedora CoreOS (FCOS) as the base operating system.","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/install-fcos":{"id":"launchpad/advanced-tutorials/install-fcos","title":"FCOS Installation","description":"Fedora CoreOS (FCOS) is an open-source container-focused operating system that is:","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config":{"id":"launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config","title":"Upgrading Kubernetes ClusterConfig with kubeadm","description":"When managing a Kubernetes cluster with kubeadm, there could be scenarios where you need to update the ClusterConfiguration independently of performing version upgrades. This guide walks you through those steps.","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubeadm-upgrade-nodes":{"id":"launchpad/advanced-tutorials/kubeadm-upgrade-nodes","title":"Upgrading Kubernetes with kubeadm","description":"In this guide we will use as an example upgrading from kubernetes v1.28.1 to v1.28.3","sidebar":"launchpadSidebar"},"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm":{"id":"launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm","title":"Kubernetes Guide - Bootstrapping with Kubeadm","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/client-side-tooling":{"id":"launchpad/client-side-tooling","title":"Client Side Tooling","description":"Launchpad comes with an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your cluster software stack.","sidebar":"launchpadSidebar"},"launchpad/design-principles":{"id":"launchpad/design-principles","title":"Design Principles","description":"Design Principles","sidebar":"launchpadSidebar"},"launchpad/docs-map":{"id":"launchpad/docs-map","title":"Launchpad Documentation","description":"Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults.","sidebar":"launchpadSidebar"},"launchpad/faq":{"id":"launchpad/faq","title":"Frequently Asked Questions (FAQs)","description":"Here are answers to some commonly asked questions. If you have a question that is not covered here, feel free to ask.","sidebar":"launchpadSidebar"},"launchpad/intro":{"id":"launchpad/intro","title":"Introduction","description":"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.","sidebar":"launchpadSidebar"},"launchpad/modularity":{"id":"launchpad/modularity","title":"Modularity","description":"The full Launchpad stack contains:","sidebar":"launchpadSidebar"},"launchpad/other-resources":{"id":"launchpad/other-resources","title":"Other Resources","description":"Kubernetes","sidebar":"launchpadSidebar"},"launchpad/prerequisites":{"id":"launchpad/prerequisites","title":"Prerequisites","description":"You will need some things to use Launchpad for your infrastructure:","sidebar":"launchpadSidebar"},"launchpad/quick-start":{"id":"launchpad/quick-start","title":"Quick Start","description":"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.","sidebar":"launchpadSidebar"},"launchpad/release-channels":{"id":"launchpad/release-channels","title":"Release Channels","description":"Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.","sidebar":"launchpadSidebar"},"launchpad/supported-namespaces":{"id":"launchpad/supported-namespaces","title":"Supported Namespaces","description":"Launchpad includes a number of prepackaged Kubernetes namespaces (see Launchpad Namespaces repo), which in turn reference Helm Charts in the Launchpad Charts repository, as well as third-party Charts. GraphOps maintains support for these namespaces, meaning that we:","sidebar":"launchpadSidebar"},"launchpad/tutorials/arbitrum-archive-kubernetes-guide":{"id":"launchpad/tutorials/arbitrum-archive-kubernetes-guide","title":"Arbitrum Archive Mainnet Node Guide","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/tutorials/celo-archive-kubernetes-guide":{"id":"launchpad/tutorials/celo-archive-kubernetes-guide","title":"Celo Archive Mainnet Node Guide","description":"Introduction","sidebar":"launchpadSidebar"},"launchpad/tutorials/monitoring-stack-with-HA":{"id":"launchpad/tutorials/monitoring-stack-with-HA","title":"Deploying a Monitoring stack with HA","description":"Prerequisites","sidebar":"launchpadSidebar"},"launchpad/tutorials/postgresql_ha":{"id":"launchpad/tutorials/postgresql_ha","title":"Overview of High Availability in PostgreSQL","description":"One of the prerequisites of running an indexer stack is currently using PostgreSQL as a database for storing indexer metadata and subgraph data. To ensure redundancy of data and operations and enable systems to continue functioning despite individual component failures we want to account for the following areas as they relate to running PostgreSQL:","sidebar":"launchpadSidebar"},"mips-resources/intro":{"id":"mips-resources/intro","title":"Introduction","description":"It\'s an exciting time to be participating in The Graph ecosystem! During Graph Day 2022 Yaniv Tal announced the sunsetting of the hosted service, a moment The Graph ecosystem has been working towards for many years.","sidebar":"mipsSidebar"},"mips-resources/mips-faq":{"id":"mips-resources/mips-faq","title":"MIPs FAQs","description":"desc","sidebar":"mipsSidebar"}}}')}}]); \ No newline at end of file diff --git a/assets/js/bd9dd2f9.3f20c11d.js b/assets/js/bd9dd2f9.3f20c11d.js new file mode 100644 index 00000000..0f978bb3 --- /dev/null +++ b/assets/js/bd9dd2f9.3f20c11d.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[4089],{3905:(e,t,a)=>{a.d(t,{Zo:()=>c,kt:()=>m});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var l=n.createContext({}),s=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},c=function(e){var t=s(e.components);return n.createElement(l.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,c=p(e,["components","mdxType","originalType","parentName"]),u=s(a),h=r,m=u["".concat(l,".").concat(h)]||u[h]||d[h]||o;return a?n.createElement(m,i(i({ref:t},c),{},{components:a})):n.createElement(m,i({ref:t},c))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=h;var p={};for(var l in t)hasOwnProperty.call(t,l)&&(p[l]=t[l]);p.originalType=e,p[u]="string"==typeof e?e:r,i[1]=p;for(var s=2;s{a.r(t),a.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>p,toc:()=>s});var n=a(7462),r=(a(7294),a(3905));const o={sidebar_position:2},i="Introduction",p={unversionedId:"launchpad/intro",id:"launchpad/intro",title:"Introduction",description:"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.",source:"@site/docs/launchpad/intro.md",sourceDirName:"launchpad",slug:"/launchpad/intro",permalink:"/launchpad/intro",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/intro.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{sidebar_position:2},sidebar:"launchpadSidebar",previous:{title:"Launchpad Documentation",permalink:"/launchpad/docs-map"},next:{title:"Modularity",permalink:"/launchpad/modularity"}},l={},s=[{value:"Features",id:"features",level:2},{value:"Next steps",id:"next-steps",level:2}],c={toc:s},u="wrapper";function d(e){let{components:t,...o}=e;return(0,r.kt)(u,(0,n.Z)({},c,o,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"introduction"},"Introduction"),(0,r.kt)("p",null,"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack."),(0,r.kt)("p",null,"There are four major components to be aware of:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Launchpad Starter (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-starter")),"): A starting point for every new Launchpad deployment"),(0,r.kt)("li",{parentName:"ol"},"Launchpad Charts (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts")),"): A collection of Helm Charts for blockchains and web3 apps"),(0,r.kt)("li",{parentName:"ol"},"Launchpad Namespaces (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")),"): A collection of preconfigured Kubernetes Namespaces using Helmfile"),(0,r.kt)("li",{parentName:"ol"},"Launchpad Taskfiles (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-taskfiles"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-taskfiles")),"): A collection of preconfigured Tasks using Taskfile")),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Launchpad components",src:a(9282).Z,width:"960",height:"540"})),(0,r.kt)("h2",{id:"features"},"Features"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Actively maintained by ",(0,r.kt)("a",{parentName:"li",href:"https://graphops.xyz"},"GraphOps")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/graphs/contributors"},"and contributors")),(0,r.kt)("li",{parentName:"ul"},"An opinionated starter (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"launchpad-starter")),") to define and manage your stack in a declarative, version controlled manner"),(0,r.kt)("li",{parentName:"ul"},"A collection of Helm Charts for deploying and monitoring blockchain nodes and Graph Protocol Indexers in Kubernetes, with P2P ",(0,r.kt)("inlineCode",{parentName:"li"},"NodePort")," support"),(0,r.kt)("li",{parentName:"ul"},"Preconfigured namespaces for core cluster functions (logging, monitoring, etc) and major blockchains"),(0,r.kt)("li",{parentName:"ul"},"An automated dependency update pipeline for ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts"))," and ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")))),(0,r.kt)("p",null,"Are you interested in exploring Launchpad but not ready to adopt the entire stack? Explore our ",(0,r.kt)("a",{parentName:"p",href:"modularity"},"Modularity")," page to discover how you can selectively integrate elements of Launchpad, like ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),", to fit your specific needs without committing to a full end-to-end implementation."),(0,r.kt)("h2",{id:"next-steps"},"Next steps"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Visit our ",(0,r.kt)("a",{parentName:"li",href:"docs-map"},"Documentation Map")," for an overview of where to find all the information you need"),(0,r.kt)("li",{parentName:"ul"},"Read the ",(0,r.kt)("a",{parentName:"li",href:"prerequisites"},"Prerequisites")," section to understand what you need to started"),(0,r.kt)("li",{parentName:"ul"},"Read the ",(0,r.kt)("a",{parentName:"li",href:"quick-start"},"Quick Start guide")," to get up and running"),(0,r.kt)("li",{parentName:"ul"},"Look at the repositories above on GitHub to understand how they work")))}d.isMDXComponent=!0},9282:(e,t,a)=>{a.d(t,{Z:()=>n});const n=a.p+"assets/images/launchpad-repos-slide-d3e54366242c65b376ed51cd586ff794.svg"}}]); \ No newline at end of file diff --git a/assets/js/bd9dd2f9.e6089dcb.js b/assets/js/bd9dd2f9.e6089dcb.js deleted file mode 100644 index 41c30a10..00000000 --- a/assets/js/bd9dd2f9.e6089dcb.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[4089],{3905:(e,t,a)=>{a.d(t,{Zo:()=>s,kt:()=>m});var n=a(7294);function r(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function o(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(r[a]=e[a]);return r}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(r[a]=e[a])}return r}var l=n.createContext({}),c=function(e){var t=n.useContext(l),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},s=function(e){var t=c(e.components);return n.createElement(l.Provider,{value:t},e.children)},u="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},h=n.forwardRef((function(e,t){var a=e.components,r=e.mdxType,o=e.originalType,l=e.parentName,s=p(e,["components","mdxType","originalType","parentName"]),u=c(a),h=r,m=u["".concat(l,".").concat(h)]||u[h]||d[h]||o;return a?n.createElement(m,i(i({ref:t},s),{},{components:a})):n.createElement(m,i({ref:t},s))}));function m(e,t){var a=arguments,r=t&&t.mdxType;if("string"==typeof e||r){var o=a.length,i=new Array(o);i[0]=h;var p={};for(var l in t)hasOwnProperty.call(t,l)&&(p[l]=t[l]);p.originalType=e,p[u]="string"==typeof e?e:r,i[1]=p;for(var c=2;c{a.r(t),a.d(t,{assets:()=>l,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>p,toc:()=>c});var n=a(7462),r=(a(7294),a(3905));const o={sidebar_position:2},i="Introduction",p={unversionedId:"launchpad/intro",id:"launchpad/intro",title:"Introduction",description:"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.",source:"@site/docs/launchpad/intro.md",sourceDirName:"launchpad",slug:"/launchpad/intro",permalink:"/launchpad/intro",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/intro.md",tags:[],version:"current",sidebarPosition:2,frontMatter:{sidebar_position:2},sidebar:"launchpadSidebar",previous:{title:"Launchpad Documentation",permalink:"/launchpad/docs-map"},next:{title:"Modularity",permalink:"/launchpad/modularity"}},l={},c=[{value:"Features",id:"features",level:2},{value:"Next steps",id:"next-steps",level:2}],s={toc:c},u="wrapper";function d(e){let{components:t,...o}=e;return(0,r.kt)(u,(0,n.Z)({},s,o,{components:t,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"introduction"},"Introduction"),(0,r.kt)("p",null,"Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack."),(0,r.kt)("p",null,"There are three major components to be aware of:"),(0,r.kt)("ol",null,(0,r.kt)("li",{parentName:"ol"},"Launchpad Starter (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-starter")),"): A starting point for every new Launchpad deployment"),(0,r.kt)("li",{parentName:"ol"},"Launchpad Charts (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts")),"): A collection of Helm Charts for blockchains and web3 apps"),(0,r.kt)("li",{parentName:"ol"},"Launchpad Namespaces (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")),"): A collection of preconfigured Kubernetes Namespaces using Helmfile")),(0,r.kt)("p",null,(0,r.kt)("img",{alt:"Launchpad components",src:a(9282).Z,width:"960",height:"540"})),(0,r.kt)("h2",{id:"features"},"Features"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Actively maintained by ",(0,r.kt)("a",{parentName:"li",href:"https://graphops.xyz"},"GraphOps")," ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts/graphs/contributors"},"and contributors")),(0,r.kt)("li",{parentName:"ul"},"An opinionated starter (",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-starter"},(0,r.kt)("inlineCode",{parentName:"a"},"launchpad-starter")),") to define and manage your stack in a declarative, version controlled manner"),(0,r.kt)("li",{parentName:"ul"},"A collection of Helm Charts for deploying and monitoring blockchain nodes and Graph Protocol Indexers in Kubernetes, with P2P ",(0,r.kt)("inlineCode",{parentName:"li"},"NodePort")," support"),(0,r.kt)("li",{parentName:"ul"},"Preconfigured namespaces for core cluster functions (logging, monitoring, etc) and major blockchains"),(0,r.kt)("li",{parentName:"ul"},"An automated dependency update pipeline for ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-charts"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-charts"))," and ",(0,r.kt)("a",{parentName:"li",href:"https://github.com/graphops/launchpad-namespaces"},(0,r.kt)("inlineCode",{parentName:"a"},"graphops/launchpad-namespaces")))),(0,r.kt)("p",null,"Are you interested in exploring Launchpad but not ready to adopt the entire stack? Explore our ",(0,r.kt)("a",{parentName:"p",href:"modularity"},"Modularity")," page to discover how you can selectively integrate elements of Launchpad, like ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),", ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-charts"),", and ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),", to fit your specific needs without committing to a full end-to-end implementation."),(0,r.kt)("h2",{id:"next-steps"},"Next steps"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},"Visit our ",(0,r.kt)("a",{parentName:"li",href:"docs-map"},"Documentation Map")," for an overview of where to find all the information you need"),(0,r.kt)("li",{parentName:"ul"},"Read the ",(0,r.kt)("a",{parentName:"li",href:"prerequisites"},"Prerequisites")," section to understand what you need to started"),(0,r.kt)("li",{parentName:"ul"},"Read the ",(0,r.kt)("a",{parentName:"li",href:"quick-start"},"Quick Start guide")," to get up and running"),(0,r.kt)("li",{parentName:"ul"},"Look at the repositories above on GitHub to understand how they work")))}d.isMDXComponent=!0},9282:(e,t,a)=>{a.d(t,{Z:()=>n});const n=a.p+"assets/images/launchpad-repos-slide-d3e54366242c65b376ed51cd586ff794.svg"}}]); \ No newline at end of file diff --git a/assets/js/d2eb8d4c.73065bf8.js b/assets/js/d2eb8d4c.73065bf8.js new file mode 100644 index 00000000..cf09a38d --- /dev/null +++ b/assets/js/d2eb8d4c.73065bf8.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[1263],{3905:(e,a,n)=>{n.d(a,{Zo:()=>c,kt:()=>d});var t=n(7294);function r(e,a,n){return a in e?Object.defineProperty(e,a,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[a]=n,e}function s(e,a){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);a&&(t=t.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),n.push.apply(n,t)}return n}function o(e){for(var a=1;a=0||(r[n]=e[n]);return r}(e,a);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var l=t.createContext({}),p=function(e){var a=t.useContext(l),n=a;return e&&(n="function"==typeof e?e(a):o(o({},a),e)),n},c=function(e){var a=p(e.components);return t.createElement(l.Provider,{value:a},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var a=e.children;return t.createElement(t.Fragment,{},a)}},m=t.forwardRef((function(e,a){var n=e.components,r=e.mdxType,s=e.originalType,l=e.parentName,c=i(e,["components","mdxType","originalType","parentName"]),u=p(n),m=r,d=u["".concat(l,".").concat(m)]||u[m]||h[m]||s;return n?t.createElement(d,o(o({ref:a},c),{},{components:n})):t.createElement(d,o({ref:a},c))}));function d(e,a){var n=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var s=n.length,o=new Array(s);o[0]=m;var i={};for(var l in a)hasOwnProperty.call(a,l)&&(i[l]=a[l]);i.originalType=e,i[u]="string"==typeof e?e:r,o[1]=i;for(var p=2;p{n.r(a),n.d(a,{assets:()=>l,contentTitle:()=>o,default:()=>h,frontMatter:()=>s,metadata:()=>i,toc:()=>p});var t=n(7462),r=(n(7294),n(3905));const s={sidebar_position:3},o="Quick Start",i={unversionedId:"launchpad/quick-start",id:"launchpad/quick-start",title:"Quick Start",description:"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.",source:"@site/docs/launchpad/quick-start.md",sourceDirName:"launchpad",slug:"/launchpad/quick-start",permalink:"/launchpad/quick-start",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/quick-start.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3},sidebar:"launchpadSidebar",previous:{title:"Prerequisites",permalink:"/launchpad/prerequisites"},next:{title:"Release Channels",permalink:"/launchpad/release-channels"}},l={},p=[{value:"Using Launchpad end to end",id:"using-launchpad-end-to-end",level:2},{value:"Install Taskfile",id:"install-taskfile",level:3},{value:"Use launchpad-starter for your new infra repo",id:"use-launchpad-starter-for-your-new-infra-repo",level:3},{value:"Setup the launchpad dependencies",id:"setup-the-launchpad-dependencies",level:3},{value:"Connect your Local environment to your Kubernetes cluster",id:"connect-your-local-environment-to-your-kubernetes-cluster",level:3},{value:"\ud83c\udf89 Milestone: Local environment configured!",id:"-milestone-local-environment-configured",level:3},{value:"Customize your helmfiles",id:"customize-your-helmfiles",level:3},{value:"Syncing your helmfile.yaml with the cluster",id:"syncing-your-helmfileyaml-with-the-cluster",level:3},{value:"\ud83c\udf89 Milestone: Kubernetes and core systems running!",id:"-milestone-kubernetes-and-core-systems-running",level:3},{value:"Deploy blockchain namespaces as desired",id:"deploy-blockchain-namespaces-as-desired",level:3},{value:"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia",id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia",level:4},{value:"Install the Graph Arbitrum Sepolia Indexer Stack",id:"install-the-graph-arbitrum-sepolia-indexer-stack",level:3},{value:"\ud83c\udf89 Milestone: Graph Indexer running and accessible",id:"-milestone-graph-indexer-running-and-accessible",level:3},{value:"Updates",id:"updates",level:2},{value:"Updating launchpad-namespace changes into your stack",id:"updating-launchpad-namespace-changes-into-your-stack",level:3},{value:"Pulling in starter changes",id:"pulling-in-starter-changes",level:3},{value:"Using Helmfile and Launchpad Charts",id:"using-helmfile-and-launchpad-charts",level:2},{value:"Prerequisites",id:"prerequisites",level:3},{value:"Deploying using Launchpad-charts directly",id:"deploying-using-launchpad-charts-directly",level:3},{value:"Key Consideration",id:"key-consideration",level:4},{value:"Deploying using Helmfile",id:"deploying-using-helmfile",level:3},{value:"Deploy blockchain namespaces as desired",id:"deploy-blockchain-namespaces-as-desired-1",level:3},{value:"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia",id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia-1",level:4},{value:"Install the Graph Arbitrum Sepolia Indexer Stack",id:"install-the-graph-arbitrum-sepolia-indexer-stack-1",level:3},{value:"\ud83c\udf89 Milestone: Graph Indexer running and accessible",id:"-milestone-graph-indexer-running-and-accessible-1",level:3}],c={toc:p},u="wrapper";function h(e){let{components:a,...n}=e;return(0,r.kt)(u,(0,t.Z)({},c,n,{components:a,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"quick-start"},"Quick Start"),(0,r.kt)("p",null,"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout ",(0,r.kt)("a",{parentName:"p",href:"/launchpad/modularity"},"this page")," for more info about the modularity of Launchpad. "),(0,r.kt)("p",null,"Make sure you have all the ",(0,r.kt)("a",{parentName:"p",href:"prerequisites"},"Prerequisites")," before starting."),(0,r.kt)("p",null,"To start jump to the relevant section based on how you're using the project:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"#using-launchpad-end-to-end"},"Using Launchpad End to End")," "),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"#using-helmfile-and-launchpad-charts"},"Using Launchpad Charts and Helmfile"))),(0,r.kt)("h2",{id:"using-launchpad-end-to-end"},"Using Launchpad end to end"),(0,r.kt)("p",null,"This section takes you through steps of getting started using all aspects of the Launchpad project."),(0,r.kt)("h3",{id:"install-taskfile"},"Install Taskfile"),(0,r.kt)("p",null,"Launchpad has a large number of tooling dependencies that will run on your local machine. The most important dependency is ",(0,r.kt)("a",{parentName:"p",href:"https://taskfile.dev"},"Taskfile"),"."),(0,r.kt)("p",null,"Follow the ",(0,r.kt)("a",{parentName:"p",href:"https://taskfile.dev/installation/"},"installation instructions")," for your environment and install Taskfile on your local machine before continuing."),(0,r.kt)("h3",{id:"use-launchpad-starter-for-your-new-infra-repo"},"Use launchpad-starter for your new infra repo"),(0,r.kt)("p",null,"Next, we are going to create the repository that will contain your new infrastructure's configuration."),(0,r.kt)("p",null,"First, prepare a new empty repository to hold your infrastructure repo. This could be a new repository on GitHub, GitLab, BitBucket, etc."),(0,r.kt)("p",null,"Next, we're going to clone ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),", and then replace the existing ",(0,r.kt)("inlineCode",{parentName:"p"},"origin")," remote with your new remote repository. This allows us to retain the commit history of ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),". A shared commit history will make future rebases against the upstream ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter")," much easier."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"# Clone the starter into my-new-infra and cd into it\ngit clone https://github.com/graphops/launchpad-starter my-new-infra\ncd my-new-infra\n\n# Set your own remote as origin\ngit remote remove origin\ngit remote add origin git@github.com:you/your-infra.git\n\n# Push to your new repo\ngit push origin main\n")),(0,r.kt)("p",null,"All work on your infrastructure will take place in this new repo. We recommend carefully version controlling all changes you make to your infrastructure configuration."),(0,r.kt)("h3",{id:"setup-the-launchpad-dependencies"},"Setup the launchpad dependencies"),(0,r.kt)("p",null,"Next, we should install all of the local tooling dependencies (like Helm or Kubectl) that we will need."),(0,r.kt)("p",null,"We can easily do that by running the ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad:update-deps")," command."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"# You may need to use sudo for this command\ntask launchpad:update-deps\n")),(0,r.kt)("h3",{id:"connect-your-local-environment-to-your-kubernetes-cluster"},"Connect your Local environment to your Kubernetes cluster"),(0,r.kt)("p",null,"To connect your local machine to a Kubernetes cluster, you can follow these general steps:"),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Get Cluster Configuration:")," Make sure your ",(0,r.kt)("a",{parentName:"p",href:"https://devopscube.com/kubernetes-kubeconfig-file/"},(0,r.kt)("inlineCode",{parentName:"a"},"kubeconfig"))," has been added to ",(0,r.kt)("inlineCode",{parentName:"p"},"~/.kube/config")," file. If you don't have this file, you may need to ask the administrator that created the cluster for the configuration."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Verify Configuration:")," Open the ",(0,r.kt)("inlineCode",{parentName:"p"},"config")," file in a text editor to verify that it contains the correct cluster details, including server URL, certificates, and context information."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Switch Context if working with multiple Kubernetes clusters:")," A context in Kubernetes is a combination of a cluster, a user, and a namespace. Use the ",(0,r.kt)("inlineCode",{parentName:"p"},"kubectl config use-context")," command to set your desired context. For example:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sh"},"kubectl config use-context \n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Test Connection:")," Run a simple ",(0,r.kt)("inlineCode",{parentName:"p"},"kubectl")," command to test if your local machine can connect to the cluster:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sh"},"kubectl get pods\n")),(0,r.kt)("p",null," This command should list the pods in the default namespace of your cluster."),(0,r.kt)("p",null,"Remember that each cluster might have specific setup steps or requirements, especially if it's managed by a cloud provider. Always refer to the documentation provided by the cluster administrator or the cloud provider for detailed instructions on connecting your local machine to the cluster."),(0,r.kt)("h3",{id:"-milestone-local-environment-configured"},"\ud83c\udf89 Milestone: Local environment configured!"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We now have our own private git repo containing the declarative configuration for our cluster deployments"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We have installed all the tooling dependencies on our local machine, which will be used to control the cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Copy ",(0,r.kt)("inlineCode",{parentName:"li"},"sample.helmfile.yaml")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"helmfile.yaml")," and edit it to select which Namespaces you would like to deploy on your Kubernetes cluster")),(0,r.kt)("h3",{id:"customize-your-helmfiles"},"Customize your helmfiles"),(0,r.kt)("p",null,"To get started with Helmfile, if you don\u2019t already have a ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),", you can begin by copying the provided sample configuration file named ",(0,r.kt)("inlineCode",{parentName:"p"},"sample.helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"cp sample.helmfile.yaml helmfile.yaml\n")),(0,r.kt)("p",null,"After copying, open ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," in your preferred text editor to make necessary modifications. Within this file, you will find a ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfiles:")," section which organizes deployment configurations by namespace through multiple helmfile paths:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: namespaces/storage.yaml\n - path: namespaces/sealed-secrets.yaml\n - path: namespaces/postgres-operator.yaml\n - path: namespaces/ingress.yaml\n - path: namespaces/monitoring.yaml\n - path: namespaces/eth-sepolia.yaml\n - path: namespaces/eth-mainnet.yaml\n - path: namespaces/arbitrum-sepolia.yaml\n - path: namespaces/graph-arbitrum-sepolia.yaml\n")),(0,r.kt)("p",null,"This structure allows you to manage deployments modularly. You can add or remove entries in this list to include new namespaces or exclude those you no longer need. Each path points to a specific helmfile that defines resources to be deployed within that namespace. For instance, looking at ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/storage.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-stable/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n")),(0,r.kt)("p",null,"In the example above, values can be set to override the default configurations in a given Namespace, allowing for customization according to specific requirements. Refer to Namespaces documentation available here for more examples on how to configure them, or to see which ones are available: ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces"},"Namespaces"),"."),(0,r.kt)("h3",{id:"syncing-your-helmfileyaml-with-the-cluster"},"Syncing your ",(0,r.kt)("inlineCode",{parentName:"h3"},"helmfile.yaml")," with the cluster"),(0,r.kt)("p",null,"You can list all the releases present in the helmfile.yaml, and their labels, by running ",(0,r.kt)("inlineCode",{parentName:"p"},"task releases:list"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"NAME NAMESPACE ENABLED INSTALLED LABELS CHART VERSION \nopenebs storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs/openebs 3.8.0 \nopenebs-zfs-localpv storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs-zfs-localpv/zfs-localpv 2.3.0 \nopenebs-zfs-storageclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0 \nopenebs-zfs-snapclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0 \npostgres-operator postgres-operator true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:postgres-operator postgres-operator-charts/postgres-operator 1.10.0 \ningress-nginx ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress ingress-nginx/ingress-nginx 4.7.1 \ncert-manager ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress jetstack/cert-manager v1.12.3 \ncert-manager-resources ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress graphops/resource-injector 0.2.0 \nsealed-secrets sealed-secrets true true launchpad.graphops.xyz/namespace:sealed-secrets sealed-secrets/sealed-secrets 2.1\n")),(0,r.kt)("p",null,"First, update the Helmfile configuration for the base namespaces. You will likely need to configure storage and ingress settings in their respective files, ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/storage.yaml")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/ingress.yaml"),", by customizing them with your specific values."),(0,r.kt)("p",null,"In particular, the storage namespace may be a requirement even for other base namespaces, so lets install that one first by running ",(0,r.kt)("inlineCode",{parentName:"p"},"task releases:apply -- launchpad.graphops.xyz/namespace=storage")),(0,r.kt)("p",null,"Next, let's go ahead and install all the remaining cluster services. You will be prompted to install each namespace, with a summary of changes to be made."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply -- monitoring\ntask releases:apply -- storage\ntask releases:apply -- sealed-secrets\ntask releases:apply -- postgres-operator\ntask releases:apply -- ingress\n")),(0,r.kt)("h3",{id:"-milestone-kubernetes-and-core-systems-running"},"\ud83c\udf89 Milestone: Kubernetes and core systems running!"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We connected to our hosts, configured them, and installed Kubernetes"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We installed core cluster services like Prometheus, Grafana, Loki and others"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Deploy blockchain nodes and the Graph Indexing stack")),(0,r.kt)("admonition",{type:"tip"},(0,r.kt)("p",{parentName:"admonition"},"You can now use ",(0,r.kt)("inlineCode",{parentName:"p"},"task indexer:forward-grafana")," to securely access your remote cluster's Grafana instance at http://localhost:3001")),(0,r.kt)("h3",{id:"deploy-blockchain-namespaces-as-desired"},"Deploy blockchain namespaces as desired"),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).")),(0,r.kt)("p",null,"Launchpad comes with Namespace definitions for a number of blockchain networks, including Ethereum Mainnet, Ethereum Sepolia Testnet, Gnosis Chain Mainnet, Polygon mainnet, Abitrum One, Arbitrum Sepolia, Celo Mainnet and others. Using those Namespaces, you can easily deploy blockchain nodes for the networks you want to index into your cluster."),(0,r.kt)("h4",{id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia"},"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia"),(0,r.kt)("p",null,"Make sure that your ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," includes a path that directing to ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/arbitrum-sepolia.yaml"),". Afterward, carefully examine the settings within ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/arbitrum-sepolia.yaml")," to confirm they are accurate and align with your specific needs:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@arbitrum/helmfile.yaml?ref=arbitrum-canary/latest\n selectorsInherited: true\n values:\n - flavor: sepolia\n helmDefaults:\n <<: *helmDefaults\n arbitrum-nitro:\n values:\n nitro:\n config:\n chain: 421614\n parentChainUrl: <> ## if setup with default ethereum ns values this would be http://proxyd-proxyd.eth-sepolia:8545 \n parentChainBeaconUrl: <> ## if setup with defaul ethereum ns values this would be http://nimbus.eth-sepolia:5052\n")),(0,r.kt)("p",null,"Deploy by syncing your cluster with the declarative ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply -- arbitrum-sepolia\n")),(0,r.kt)("h3",{id:"install-the-graph-arbitrum-sepolia-indexer-stack"},"Install the Graph Arbitrum Sepolia Indexer Stack"),(0,r.kt)("p",null,"Make sure that your ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," includes a path that directing to ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/graph-arbitrum-sepolia.yaml"),". Afterward, carefully examine the settings within ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/graph-arbitrum-sepolia.yaml")," to confirm they are accurate and align with your specific needs."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},'helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@graph/helmfile.yaml?ref=graph-canary/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n flavor: "arbitrum-sepolia"\n - graph-network-indexer:\n values:\n indexerDefaults:\n config:\n indexer-address: "<>"\n indexerAgent:\n config:\n public-indexer-url: "<>"\n graph-operator-mnemonic:\n values:\n resources:\n ### RECOMMENDED, safe to commit\n sealed-secret:\n apiVersion: bitnami.com/v1alpha1\n kind: SealedSecret\n metadata:\n name: graph-operator-mnemonic\n namespace: graph-arbitrum-sepolia\n spec:\n template:\n metadata:\n name: graph-operator-mnemonic\n namespace: graph-arbitrum-sepolia\n type: Opaque\n encryptedData:\n mnemonic: <> # Generate a SealedSecret encryptedData key with the "utils:seal-secrets" task, e.g.: task utils:seal-secrets -- -n graph-arbitrum-sepolia -s graph-operator-mnemonic -k mnemonic -v "your mnemonic words"\n graph-database:\n values:\n resources:\n postgres-cr-primary-subgraph-data:\n spec:\n volume:\n storageClass: "<>"\n postgres-cr-indexer-metadata:\n spec:\n volume:\n storageClass: "<>"\n')),(0,r.kt)("p",null,"Proceed to deploy:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply -- graph-arbitrum-sepolia\n")),(0,r.kt)("h3",{id:"-milestone-graph-indexer-running-and-accessible"},"\ud83c\udf89 Milestone: Graph Indexer running and accessible"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We (optionally) configured and deployed blockchain nodes into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We configured and deployed the Graph Indexing stack into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests")),(0,r.kt)("h2",{id:"updates"},"Updates"),(0,r.kt)("h3",{id:"updating-launchpad-namespace-changes-into-your-stack"},"Updating ",(0,r.kt)("inlineCode",{parentName:"h3"},"launchpad-namespace")," changes into your stack"),(0,r.kt)("p",null,"As new versions of key components in the stack are released, we will update ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),"'s templated definitions and the various release streams available. You can selectively inherit these updates with ease by changing the git ref as a means to track what release stream you may want, or to pin to any particular major, minor or patch version."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following latest"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-latest"),", or alternatively: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-stable/latest"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-latest\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following a specific major version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following a specific minor version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1.2"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"pinning to an exact version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1.2.2"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2.2\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following the latest canary"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-canary/latest"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-canary/latest\n")),(0,r.kt)("p",null,"We would recommend that you either follow the latest stable releases, or pin to a specific version."),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"For full implementation details and other comprehensive notes about ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces")," please visit the ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces/blob/main/README.md"},"github repo"),".")),(0,r.kt)("h3",{id:"pulling-in-starter-changes"},"Pulling in starter changes"),(0,r.kt)("p",null,"From time to time, you may want to update your infra repo with the latest changes from our starter."),(0,r.kt)("p",null,"Launchpad comes with a built in task to do this, but it does require you to handle any rebase conflicts:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task launchpad:pull-upstream-starter\n")),(0,r.kt)("h2",{id:"using-helmfile-and-launchpad-charts"},"Using Helmfile and Launchpad Charts"),(0,r.kt)("p",null,"This guide will cover two primary ways to deploy blockchain-related resources in Kubernetes using Launchpad charts: deploying all components at once using Helmfile and deploying individual components directly using Helm charts."),(0,r.kt)("h3",{id:"prerequisites"},"Prerequisites"),(0,r.kt)("p",null,"Ensure you have ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/helm/helm"},"helm"),", ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/helmfile/helmfile"},"helmfile")," and it's dependency ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/databus23/helm-diff"},"helm-diff")," installed on your local machine. This guide assumes familiarity with basic Helm and Helmfile operations."),(0,r.kt)("p",null,"Before proceeding with this guide, make sure the following tools are installed on your local machine:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/helm/helm"},"Helm"),": The package manager for Kubernetes, essential for managing and deploying applications."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/helmfile/helmfile"},"Helmfile"),": A tool to help streamline the use of Helm charts, enabling better management of Helm chart configurations."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/databus23/helm-diff"},"Helm-diff"),": A Helm plugin that helps visualize differences between your Helmfile configurations and what is actually deployed in your cluster. This plugin is a dependency for effectively using Helmfile."),(0,r.kt)("li",{parentName:"ul"},"(Optional)",(0,r.kt)("a",{parentName:"li",href:"https://github.com/kubernetes-sigs/kustomize"},"Kustomize"),": A tool for customizing Kubernetes configurations beyond what is available with Helm, useful for more complex deployment scenarios.\nThis guide assumes you are familiar with basic operations of Helm and Helmfile.")),(0,r.kt)("h3",{id:"deploying-using-launchpad-charts-directly"},"Deploying using Launchpad-charts directly"),(0,r.kt)("p",null,"If you prefer to use individual components of Launchpad, such as Launchpad Charts, you can add the Launchpad Helm repository and install charts directly:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helm repo add graphops https://graphops.github.io/launchpad-charts\nhelm install my-release graphops/ --values \n")),(0,r.kt)("h4",{id:"key-consideration"},"Key Consideration"),(0,r.kt)("p",null,"Before proceeding, it is important to note that most Kubernetes clusters do not come pre-configured with a ",(0,r.kt)("a",{parentName:"p",href:"https://kubernetes-csi.github.io/docs/"},"Container Storage Interface (CSI)")," for handling storage volumes. This guide relies on the ability to create storage volumes. It is also necessary to have an Ingress controller installed and configured, as it is essential for managing traffic to and from your applications."),(0,r.kt)("h3",{id:"deploying-using-helmfile"},"Deploying using Helmfile"),(0,r.kt)("p",null,"For a comprehensive deployment, managing all related Helm releases and their values via a single Helmfile offers simplicity and maintainability. This method is particularly effective when deploying complex stacks."),(0,r.kt)("h3",{id:"deploy-blockchain-namespaces-as-desired-1"},"Deploy blockchain namespaces as desired"),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).")),(0,r.kt)("h4",{id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia-1"},"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia"),(0,r.kt)("p",null,"The following ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," provides an example configuration for deploying Arbitrum Nitro on the Arbitrum Sepolia network. For an easier setup process, we recommend utilizing the ",(0,r.kt)("a",{parentName:"p",href:"#optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia"},"Launchpad Arbitrum namespace"),", which includes most of the necessary configurations pre-defined for your convenience."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"# helmfile.yaml\nrepositories:\n - name: graphops\n url: https://graphops.github.io/launchpad-charts\n\nreleases:\n - name: arbitrum-nitro\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/arbitrum-nitro\n version: 0.3.4\n values:\n - nitro:\n config:\n chain: 421614 # determines Arbitrum network - 421614 Sepolia\n parentChainUrl: http://your-eth-sepolia-url:8545 ## changeme\n parentChainBeaconUrl: http://your-eth-consensus-node-url:5052 ## changeme\n\n volumeClaimSpec:\n resources:\n requests:\n # -- The amount of disk space to provision for Arbitrum Nitro\n storage: 1Ti\n # -- The storage class to use when provisioning a persistent volume for Arbitrum-Nitro \n storageClassName: openebs-rawfile-localpv #\xa0change me as needed\n\n restoreSnapshot:\n enabled: false\n\n extraLabels:\n app.kubernetes.io/workload-type: blockchain-stateful\n app.kubernetes.io/blockchain: arbitrum-nitro\n\n # if using Prometheus for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n - name: proxyd-nitro\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/proxyd\n version: 0.5.3\n values:\n - backends:\n arbitrum-nitro:\n enabled: true\n # -- Define the RPC URL for the backend\n rpcUrl: http://arbitrum-nitro:8547\n # -- Define the WS URL for the backend\n wsUrl: ws://arbitrum-nitro:8548\n # -- Define additional configuration keys for the backend (see [proxyd config](https://github.com/ethereum-optimism/optimism/blob/5d309e6a6d5e1ef6a88c1ce827b7e6d47f033bbb/proxyd/example.config.toml#L47))\n extraConfig:\n consensus_skip_peer_count: true\n # -- Define which backend groups the backend is part of\n groups:\n - main\n\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n")),(0,r.kt)("p",null,"Deploy by syncing your cluster with the declarative ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helmfile -f path/to/helmfile.yaml sync\n")),(0,r.kt)("h3",{id:"install-the-graph-arbitrum-sepolia-indexer-stack-1"},"Install the Graph Arbitrum Sepolia Indexer Stack"),(0,r.kt)("p",null,"This section of the guide does not include the setup for ",(0,r.kt)("inlineCode",{parentName:"p"},"subgraph-data")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-metadata")," PostgreSQL databases necessary for ",(0,r.kt)("inlineCode",{parentName:"p"},"graph-node")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-agent"),". You are encouraged to explore ",(0,r.kt)("a",{parentName:"p",href:"https://www.postgresql.org/support/professional_hosting/"},"managed solutions"),", use ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/bitnami/charts/tree/main/bitnami/postgresql"},"Bitnami's chart"),", or deploy ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/zalando/postgres-operator/tree/master"},"Zalando's Operator")," as part of the Launchpad Namespaces which includes a ready-to-use Postgres setup or independently."),(0,r.kt)("p",null,"Include the necessary configurations for ",(0,r.kt)("inlineCode",{parentName:"p"},"graph-node")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-agent")," in your helmfile.yaml as shown in the previous sections, adjusting PostgreSQL references and other settings to fit your specific requirements."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},'releases:\n - name: graph-node\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-node\n version: 0.5.3\n values:\n # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-node\n - graphNodeDefaults:\n env:\n # Graph Node configuration\n IPFS: "https://ipfs.network.thegraph.com"\n GRAPH_ALLOW_NON_DETERMINISTIC_FULLTEXT_SEARCH: "true"\n # Database configuration\n PRIMARY_SUBGRAPH_DATA_PGHOST: ## change me\n PRIMARY_SUBGRAPH_DATA_PGPORT: 5432\n PRIMARY_SUBGRAPH_DATA_PGDATABASE: ## change me\n\n # Database sensitive/secret information\n secretEnv:\n PRIMARY_SUBGRAPH_DATA_PGUSER:\n secretName: \n key: username\n PRIMARY_SUBGRAPH_DATA_PGPASSWORD:\n secretName: \n key: password\n\n graphNodeGroups:\n index:\n replicaCount: 1 #\xa0scale me\n query:\n replicaCount: 1 #\xa0scale me\n \n chains:\n mainnet:\n enabled: true\n shard: primary\n provider:\n - label: eth-mainnet\n url: ## change me\n features: [archive, traces]\n\n arbitrum-sepolia:\n enabled: true\n shard: primary\n provider:\n - label: arbitrum-sepolia\n url: http://proxyd-proxyd.arbitrum-sepolia:8545\n features: [archive, traces]\n\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n datasources: true\n\n - name: graph-network-indexer\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-network-indexer\n version: 0.2.5\n values:\n # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer\n - indexerDefaults:\n config:\n ethereum: "http://proxyd-proxyd.arbitrum-sepolia:8545"\n ethereum-network: "arbitrum-sepolia"\n network-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia"\n graph-node-query-endpoint: "http://graph-node-query:8000"\n graph-node-status-endpoint: "http://graph-node-block-ingestor:8030/graphql"\n postgres-host: "" ## change me\n postgres-database: "" ## change me\n\n indexerAgent:\n config:\n collect-receipts-endpoint: "https://gateway-testnet-arbitrum.network.thegraph.com/collect-receipts"\n network-subgraph-deployment: "QmT8UDGK7zKd2u2NQZwhLYHdA4KM55QsivkE3ouCuX6fEj" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n epoch-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/arbitrum-sepolia-ebo" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n epoch-subgraph-deployment: "QmTpu2mVquoMpr4SWSM77nGkU3tcUS1Bhk1sVHpjDrAUAx"\n graph-node-admin-endpoint: "http://graph-node-block-ingestor:8020"\n public-indexer-url: "" ## change me\n index-node-ids: "graph-node-index-0" # if more than one graph-node index, specify as comma delimited list ie "graph-node-index-0, graph-node-index-1"\n\n secretEnv:\n INDEXER_AGENT_MNEMONIC:\n secretName: \n key: mnemonic\n INDEXER_AGENT_POSTGRES_USERNAME:\n secretName: \n key: username\n INDEXER_AGENT_POSTGRES_PASSWORD:\n secretName: \n key: password\n\n\n indexerService:\n replicas: 1 #\xa0scale me\n\n config:\n client-signer-address: "0xe1EC4339019eC9628438F8755f847e3023e4ff9c" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n \n secretEnv:\n INDEXER_SERVICE_MNEMONIC:\n secretName: \n key: mnemonic\n INDEXER_SERVICE_POSTGRES_USERNAME:\n secretName: \n key: username\n INDEXER_SERVICE_POSTGRES_PASSWORD:\n secretName: \n key: password\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n\n - name: subgraph-radio\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/subgraph-radio\n version: 0.2.8\n values:\n - env:\n GRAPH_NODE_STATUS_ENDPOINT: http://graph-node-block-ingestor:8030/graphql\n INDEXER_MANAGEMENT_SERVER_ENDPOINT: http://graph-network-indexer-agent:8000\n GRAPHCAST_NETWORK: "testnet"\n REGISTRY_SUBGRAPH: https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-se\n NETWORK_SUBGRAPH: https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia\n secretEnv:\n MNEMONIC:\n secretName: \n key: mnemonic\n\n - name: graph-toolbox\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-toolbox\n version: 0.1.0\n values:\n - config:\n graphNode:\n # -- URL to Graph Node Admin API\n adminApiUrl: http://graph-node-block-ingestor:8020\n existingConfigMap:\n # -- The name of the ConfigMap that contains your Graph Node config.toml\n configMapName: graph-node-config\n # -- The name of the data key in the ConfigMap that contains your config.toml\n configFileKey: config.toml\n indexer:\n # -- URL to Indexer Agent Management Server\n indexerAgentManagementUrl: http://graph-network-indexer-agent:8000\n\n aliases:\n graphman: graphman --config /graphman-config/config.toml\n indexer: graph-indexer indexer\n psql-primary-subgraph-data: >\n PGPASSWORD=$PRIMARY_SUBGRAPH_DATA_PGPASSWORD psql -w -U $PRIMARY_SUBGRAPH_DATA_PGUSER -d "host=$PRIMARY_SUBGRAPH_DATA_PGHOST,port=$PRIMARY_SUBGRAPH_DATA_PGPORT,dbname=$PRIMARY_SUBGRAPH_DATA_PGDATABASE"\n psql-indexer-metadata: >\n PGPASSWORD=$INDEXER_METDATA_PGPASSWORD psql -w -U $INDEXER_METADATA_PGUSER -d "host=$INDEXER_METADATA_PGHOST,port=$INDEXER_METADATA_PGPORT,dbname=$INDEXER_METADATA_PGDATABASE"\n\n env:\n PRIMARY_SUBGRAPH_DATA_PGHOST: ## change me\n PRIMARY_SUBGRAPH_DATA_PGPORT: 5432\n PRIMARY_SUBGRAPH_DATA_PGDATABASE: ## change me\n INDEXER_METADATA_PGHOST: ## change me\n INDEXER_METADATA_PGPORT: 5432\n INDEXER_METADATA_PGDATABASE: ## change me\n\n secretEnv:\n PRIMARY_SUBGRAPH_DATA_PGUSER:\n secretName: ## change me\n key: username\n PRIMARY_SUBGRAPH_DATA_PGPASSWORD:\n secretName: ## change me\n key: password\n INDEXER_METADATA_PGUSER:\n secretName: ## change me\n key: username\n INDEXER_METDATA_PGPASSWORD:\n secretName: ## change me\n key: password\n')),(0,r.kt)("p",null,"Proceed to deploy:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helmfile -f path/to/helmfile.yaml sync\n")),(0,r.kt)("h3",{id:"-milestone-graph-indexer-running-and-accessible-1"},"\ud83c\udf89 Milestone: Graph Indexer running and accessible"),(0,r.kt)("p",null,"Once your deployments are successfully applied, your Graph Indexer should be operational, with blockchain nodes (if deployed) and the Graph Indexing stack running in your Kubernetes cluster."),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We (optionally) configured and deployed blockchain nodes into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We configured and deployed the Graph Indexing stack into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests")))}h.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/d2eb8d4c.e6072cce.js b/assets/js/d2eb8d4c.e6072cce.js deleted file mode 100644 index b9cacfcc..00000000 --- a/assets/js/d2eb8d4c.e6072cce.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs=self.webpackChunkdocs||[]).push([[1263],{3905:(e,a,n)=>{n.d(a,{Zo:()=>c,kt:()=>d});var t=n(7294);function r(e,a,n){return a in e?Object.defineProperty(e,a,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[a]=n,e}function s(e,a){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);a&&(t=t.filter((function(a){return Object.getOwnPropertyDescriptor(e,a).enumerable}))),n.push.apply(n,t)}return n}function i(e){for(var a=1;a=0||(r[n]=e[n]);return r}(e,a);if(Object.getOwnPropertySymbols){var s=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(r[n]=e[n])}return r}var l=t.createContext({}),p=function(e){var a=t.useContext(l),n=a;return e&&(n="function"==typeof e?e(a):i(i({},a),e)),n},c=function(e){var a=p(e.components);return t.createElement(l.Provider,{value:a},e.children)},u="mdxType",h={inlineCode:"code",wrapper:function(e){var a=e.children;return t.createElement(t.Fragment,{},a)}},m=t.forwardRef((function(e,a){var n=e.components,r=e.mdxType,s=e.originalType,l=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),u=p(n),m=r,d=u["".concat(l,".").concat(m)]||u[m]||h[m]||s;return n?t.createElement(d,i(i({ref:a},c),{},{components:n})):t.createElement(d,i({ref:a},c))}));function d(e,a){var n=arguments,r=a&&a.mdxType;if("string"==typeof e||r){var s=n.length,i=new Array(s);i[0]=m;var o={};for(var l in a)hasOwnProperty.call(a,l)&&(o[l]=a[l]);o.originalType=e,o[u]="string"==typeof e?e:r,i[1]=o;for(var p=2;p{n.r(a),n.d(a,{assets:()=>l,contentTitle:()=>i,default:()=>h,frontMatter:()=>s,metadata:()=>o,toc:()=>p});var t=n(7462),r=(n(7294),n(3905));const s={sidebar_position:3},i="Quick Start",o={unversionedId:"launchpad/quick-start",id:"launchpad/quick-start",title:"Quick Start",description:"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.",source:"@site/docs/launchpad/quick-start.md",sourceDirName:"launchpad",slug:"/launchpad/quick-start",permalink:"/launchpad/quick-start",draft:!1,editUrl:"https://github.com/graphops/docs/edit/main/docs/launchpad/quick-start.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3},sidebar:"launchpadSidebar",previous:{title:"Prerequisites",permalink:"/launchpad/prerequisites"},next:{title:"Release Channels",permalink:"/launchpad/release-channels"}},l={},p=[{value:"Using Launchpad end to end",id:"using-launchpad-end-to-end",level:2},{value:"Install Taskfile",id:"install-taskfile",level:3},{value:"Use launchpad-starter for your new infra repo",id:"use-launchpad-starter-for-your-new-infra-repo",level:3},{value:"Setup the launchpad dependencies",id:"setup-the-launchpad-dependencies",level:3},{value:"Connect your Local environment to your Kubernetes cluster",id:"connect-your-local-environment-to-your-kubernetes-cluster",level:3},{value:"\ud83c\udf89 Milestone: Local environment configured!",id:"-milestone-local-environment-configured",level:3},{value:"Customize your helmfiles",id:"customize-your-helmfiles",level:3},{value:"Syncing your helmfile.yaml with the cluster",id:"syncing-your-helmfileyaml-with-the-cluster",level:3},{value:"\ud83c\udf89 Milestone: Kubernetes and core systems running!",id:"-milestone-kubernetes-and-core-systems-running",level:3},{value:"Deploy blockchain namespaces as desired",id:"deploy-blockchain-namespaces-as-desired",level:3},{value:"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia",id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia",level:4},{value:"Install the Graph Arbitrum Sepolia Indexer Stack",id:"install-the-graph-arbitrum-sepolia-indexer-stack",level:3},{value:"\ud83c\udf89 Milestone: Graph Indexer running and accessible",id:"-milestone-graph-indexer-running-and-accessible",level:3},{value:"Updates",id:"updates",level:2},{value:"Updating launchpad-namespace changes into your stack",id:"updating-launchpad-namespace-changes-into-your-stack",level:3},{value:"Pulling in starter changes",id:"pulling-in-starter-changes",level:3},{value:"Using Helmfile and Launchpad Charts",id:"using-helmfile-and-launchpad-charts",level:2},{value:"Prerequisites",id:"prerequisites",level:3},{value:"Deploying using Launchpad-charts directly",id:"deploying-using-launchpad-charts-directly",level:3},{value:"Key Consideration",id:"key-consideration",level:4},{value:"Deploying using Helmfile",id:"deploying-using-helmfile",level:3},{value:"Deploy blockchain namespaces as desired",id:"deploy-blockchain-namespaces-as-desired-1",level:3},{value:"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia",id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia-1",level:4},{value:"Install the Graph Arbitrum Sepolia Indexer Stack",id:"install-the-graph-arbitrum-sepolia-indexer-stack-1",level:3},{value:"\ud83c\udf89 Milestone: Graph Indexer running and accessible",id:"-milestone-graph-indexer-running-and-accessible-1",level:3}],c={toc:p},u="wrapper";function h(e){let{components:a,...n}=e;return(0,r.kt)(u,(0,t.Z)({},c,n,{components:a,mdxType:"MDXLayout"}),(0,r.kt)("h1",{id:"quick-start"},"Quick Start"),(0,r.kt)("p",null,"We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout ",(0,r.kt)("a",{parentName:"p",href:"/launchpad/modularity"},"this page")," for more info about the modularity of Launchpad. "),(0,r.kt)("p",null,"Make sure you have all the ",(0,r.kt)("a",{parentName:"p",href:"prerequisites"},"Prerequisites")," before starting."),(0,r.kt)("p",null,"To start jump to the relevant section based on how you're using the project:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"#using-launchpad-end-to-end"},"Using Launchpad End to End")," "),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"#using-helmfile-and-launchpad-charts"},"Using Launchpad Charts and Helmfile"))),(0,r.kt)("h2",{id:"using-launchpad-end-to-end"},"Using Launchpad end to end"),(0,r.kt)("p",null,"This section takes you through steps of getting started using all aspects of the Launchpad project."),(0,r.kt)("h3",{id:"install-taskfile"},"Install Taskfile"),(0,r.kt)("p",null,"Launchpad has a large number of tooling dependencies that will run on your local machine. The most important dependency is ",(0,r.kt)("a",{parentName:"p",href:"https://taskfile.dev"},"Taskfile"),"."),(0,r.kt)("p",null,"Follow the ",(0,r.kt)("a",{parentName:"p",href:"https://taskfile.dev/installation/"},"installation instructions")," for your environment and install Taskfile on your local machine before continuing."),(0,r.kt)("h3",{id:"use-launchpad-starter-for-your-new-infra-repo"},"Use launchpad-starter for your new infra repo"),(0,r.kt)("p",null,"Next, we are going to create the repository that will contain your new infrastructure's configuration."),(0,r.kt)("p",null,"First, prepare a new empty repository to hold your infrastructure repo. This could be a new repository on GitHub, GitLab, BitBucket, etc."),(0,r.kt)("p",null,"Next, we're going to clone ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),", and then replace the existing ",(0,r.kt)("inlineCode",{parentName:"p"},"origin")," remote with your new remote repository. This allows us to retain the commit history of ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter"),". A shared commit history will make future rebases against the upstream ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-starter")," much easier."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"# Clone the starter into my-new-infra and cd into it\ngit clone https://github.com/graphops/launchpad-starter my-new-infra\ncd my-new-infra\n\n# Set your own remote as origin\ngit remote remove origin\ngit remote add origin git@github.com:you/your-infra.git\n\n# Push to your new repo\ngit push origin main\n")),(0,r.kt)("p",null,"All work on your infrastructure will take place in this new repo. We recommend carefully version controlling all changes you make to your infrastructure configuration."),(0,r.kt)("h3",{id:"setup-the-launchpad-dependencies"},"Setup the launchpad dependencies"),(0,r.kt)("p",null,"Next, we should install all of the local tooling dependencies (like Helm or Kubectl) that we will need."),(0,r.kt)("p",null,"We can easily do that by running the launchpad:setup command."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"# You may need to use sudo for this command\ntask launchpad:setup\n\n# For now, this will just run launchpad:deps, which will install all the local tooling dependencies\n")),(0,r.kt)("h3",{id:"connect-your-local-environment-to-your-kubernetes-cluster"},"Connect your Local environment to your Kubernetes cluster"),(0,r.kt)("p",null,"To connect your local machine to a Kubernetes cluster, you can follow these general steps:"),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Get Cluster Configuration:")," Make sure your ",(0,r.kt)("a",{parentName:"p",href:"https://devopscube.com/kubernetes-kubeconfig-file/"},(0,r.kt)("inlineCode",{parentName:"a"},"kubeconfig"))," has been added to ",(0,r.kt)("inlineCode",{parentName:"p"},"~/.kube/config")," file. If you don't have this file, you may need to ask the administrator that created the cluster for the configuration."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Verify Configuration:")," Open the ",(0,r.kt)("inlineCode",{parentName:"p"},"config")," file in a text editor to verify that it contains the correct cluster details, including server URL, certificates, and context information."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Switch Context if working with multiple Kubernetes clusters:")," A context in Kubernetes is a combination of a cluster, a user, and a namespace. Use the ",(0,r.kt)("inlineCode",{parentName:"p"},"kubectl config use-context")," command to set your desired context. For example:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sh"},"kubectl config use-context \n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Test Connection:")," Run a simple ",(0,r.kt)("inlineCode",{parentName:"p"},"kubectl")," command to test if your local machine can connect to the cluster:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-sh"},"kubectl get pods\n")),(0,r.kt)("p",null," This command should list the pods in the default namespace of your cluster."),(0,r.kt)("p",null,"Remember that each cluster might have specific setup steps or requirements, especially if it's managed by a cloud provider. Always refer to the documentation provided by the cluster administrator or the cloud provider for detailed instructions on connecting your local machine to the cluster."),(0,r.kt)("h3",{id:"-milestone-local-environment-configured"},"\ud83c\udf89 Milestone: Local environment configured!"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We now have our own private git repo containing the declarative configuration for our cluster deployments"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We have installed all the tooling dependencies on our local machine, which will be used to control the cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Copy ",(0,r.kt)("inlineCode",{parentName:"li"},"sample.helmfile.yaml")," to ",(0,r.kt)("inlineCode",{parentName:"li"},"helmfile.yaml")," and edit it to select which Namespaces you would like to deploy on your Kubernetes cluster")),(0,r.kt)("h3",{id:"customize-your-helmfiles"},"Customize your helmfiles"),(0,r.kt)("p",null,"To get started with Helmfile, if you don\u2019t already have a ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),", you can begin by copying the provided sample configuration file named ",(0,r.kt)("inlineCode",{parentName:"p"},"sample.helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"cp sample.helmfile.yaml helmfile.yaml\n")),(0,r.kt)("p",null,"After copying, open ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," in your preferred text editor to make necessary modifications. Within this file, you will find a ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfiles:")," section which organizes deployment configurations by namespace through multiple helmfile paths:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: namespaces/storage.yaml\n - path: namespaces/sealed-secrets.yaml\n - path: namespaces/postgres-operator.yaml\n - path: namespaces/ingress.yaml\n - path: namespaces/monitoring.yaml\n - path: namespaces/eth-sepolia.yaml\n - path: namespaces/eth-mainnet.yaml\n - path: namespaces/arbitrum-sepolia.yaml\n - path: namespaces/graph-arbitrum-sepolia.yaml\n")),(0,r.kt)("p",null,"This structure allows you to manage deployments modularly. You can add or remove entries in this list to include new namespaces or exclude those you no longer need. Each path points to a specific helmfile that defines resources to be deployed within that namespace. For instance, looking at ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/storage.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-stable/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n")),(0,r.kt)("p",null,"In the example above, values can be set to override the default configurations in a given Namespace, allowing for customization according to specific requirements. Refer to Namespaces documentation available here for more examples on how to configure them, or to see which ones are available: ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces"},"Namespaces"),"."),(0,r.kt)("h3",{id:"syncing-your-helmfileyaml-with-the-cluster"},"Syncing your ",(0,r.kt)("inlineCode",{parentName:"h3"},"helmfile.yaml")," with the cluster"),(0,r.kt)("p",null,"Next we need to install key non-Graph components of our stack, including monitoring and logging systems."),(0,r.kt)("p",null,"Let's see what the ",(0,r.kt)("inlineCode",{parentName:"p"},"releases:apply-base")," task is actually doing by running ",(0,r.kt)("inlineCode",{parentName:"p"},"task help -- releases:apply-base"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task: releases:apply-base\n \nApply current helmfile state filtered by all base layer services\n \ncommands:\n \n\u2022 task releases:apply -- launchpad.graphops.xyz/layer=base\n")),(0,r.kt)("p",null,"As you can see, ",(0,r.kt)("inlineCode",{parentName:"p"},"releases:apply-base")," just calls ",(0,r.kt)("inlineCode",{parentName:"p"},"releases:apply")," filter for all namespaces with the label ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad.graphops.xyz/layer=base"),"."),(0,r.kt)("p",null,"You can list all the releases present in the helmfile.yaml, and their labels, by running ",(0,r.kt)("inlineCode",{parentName:"p"},"task releases:list"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"NAME NAMESPACE ENABLED INSTALLED LABELS CHART VERSION \nopenebs storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs/openebs 3.8.0 \nopenebs-zfs-localpv storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs-zfs-localpv/zfs-localpv 2.3.0 \nopenebs-zfs-storageclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0 \nopenebs-zfs-snapclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0 \npostgres-operator postgres-operator true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:postgres-operator postgres-operator-charts/postgres-operator 1.10.0 \ningress-nginx ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress ingress-nginx/ingress-nginx 4.7.1 \ncert-manager ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress jetstack/cert-manager v1.12.3 \ncert-manager-resources ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress graphops/resource-injector 0.2.0 \nsealed-secrets sealed-secrets true true launchpad.graphops.xyz/namespace:sealed-secrets sealed-secrets/sealed-secrets 2.1\n")),(0,r.kt)("p",null,"First, update the Helmfile configuration for the base namespaces. You will likely need to configure storage and ingress settings in their respective files, ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/storage.yaml")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/ingress.yaml"),", by customizing them with your specific values."),(0,r.kt)("p",null,"In particular, the storage namespace may be a requirement even for other base namespaces, so lets install that one first by running ",(0,r.kt)("inlineCode",{parentName:"p"},"task releases:apply -- launchpad.graphops.xyz/namespace=storage")),(0,r.kt)("p",null,"Next, let's go ahead and install all the remaining cluster services. You will be prompted to install each namespace, with a summary of changes to be made."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply-base\n")),(0,r.kt)("h3",{id:"-milestone-kubernetes-and-core-systems-running"},"\ud83c\udf89 Milestone: Kubernetes and core systems running!"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We connected to our hosts, configured them, and installed Kubernetes"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We installed core cluster services like Prometheus, Grafana, Loki and others"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Deploy blockchain nodes and the Graph Indexing stack")),(0,r.kt)("admonition",{type:"tip"},(0,r.kt)("p",{parentName:"admonition"},"You can now use ",(0,r.kt)("inlineCode",{parentName:"p"},"task indexer:forward-grafana")," to securely access your remote cluster's Grafana instance at http://localhost:3001")),(0,r.kt)("h3",{id:"deploy-blockchain-namespaces-as-desired"},"Deploy blockchain namespaces as desired"),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).")),(0,r.kt)("p",null,"Launchpad comes with Namespace definitions for a number of blockchain networks, including Ethereum Mainnet, Ethereum Sepolia Testnet, Gnosis Chain Mainnet, Polygon mainnet, Abitrum One, Arbitrum Sepolia, Celo Mainnet and others. Using those Namespaces, you can easily deploy blockchain nodes for the networks you want to index into your cluster."),(0,r.kt)("h4",{id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia"},"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia"),(0,r.kt)("p",null,"Make sure that your ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," includes a path that directing to ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/arbitrum-sepolia.yaml"),". Afterward, carefully examine the settings within ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/arbitrum-sepolia.yaml")," to confirm they are accurate and align with your specific needs:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@arbitrum/helmfile.yaml?ref=arbitrum-canary/latest\n selectorsInherited: true\n values:\n - flavor: sepolia\n helmDefaults:\n <<: *helmDefaults\n arbitrum-nitro:\n values:\n nitro:\n config:\n chain: 421614\n parentChainUrl: <> ## if setup with default ethereum ns values this would be http://proxyd-proxyd.eth-sepolia:8545 \n parentChainBeaconUrl: <> ## if setup with defaul ethereum ns values this would be http://nimbus.eth-sepolia:5052\n")),(0,r.kt)("p",null,"Deploy by syncing your cluster with the declarative ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply -- arbitrum-sepolia\n")),(0,r.kt)("h3",{id:"install-the-graph-arbitrum-sepolia-indexer-stack"},"Install the Graph Arbitrum Sepolia Indexer Stack"),(0,r.kt)("p",null,"Make sure that your ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," includes a path that directing to ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/graph-arbitrum-sepolia.yaml"),". Afterward, carefully examine the settings within ",(0,r.kt)("inlineCode",{parentName:"p"},"namespaces/graph-arbitrum-sepolia.yaml")," to confirm they are accurate and align with your specific needs."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},'helmfiles:\n - path: git::https://github.com/graphops/launchpad-namespaces.git@graph/helmfile.yaml?ref=graph-canary/latest\n selectorsInherited: true\n values:\n - helmDefaults:\n <<: *helmDefaults\n flavor: "arbitrum-sepolia"\n - graph-network-indexer:\n values:\n indexerDefaults:\n config:\n indexer-address: "<>"\n indexerAgent:\n config:\n public-indexer-url: "<>"\n graph-operator-mnemonic:\n values:\n resources:\n ### RECOMMENDED, safe to commit\n sealed-secret:\n apiVersion: bitnami.com/v1alpha1\n kind: SealedSecret\n metadata:\n name: graph-operator-mnemonic\n namespace: graph-arbitrum-sepolia\n spec:\n template:\n metadata:\n name: graph-operator-mnemonic\n namespace: graph-arbitrum-sepolia\n type: Opaque\n encryptedData:\n mnemonic: <> # Generate a SealedSecret encryptedData key with the "utils:seal-secrets" task, e.g.: task utils:seal-secrets -- -n graph-arbitrum-sepolia -s graph-operator-mnemonic -k mnemonic -v "your mnemonic words"\n graph-database:\n values:\n resources:\n postgres-cr-primary-subgraph-data:\n spec:\n volume:\n storageClass: "<>"\n postgres-cr-indexer-metadata:\n spec:\n volume:\n storageClass: "<>"\n')),(0,r.kt)("p",null,"Proceed to deploy:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task releases:apply -- graph-arbitrum-sepolia\n")),(0,r.kt)("h3",{id:"-milestone-graph-indexer-running-and-accessible"},"\ud83c\udf89 Milestone: Graph Indexer running and accessible"),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We (optionally) configured and deployed blockchain nodes into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We configured and deployed the Graph Indexing stack into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests")),(0,r.kt)("h2",{id:"updates"},"Updates"),(0,r.kt)("h3",{id:"updating-launchpad-namespace-changes-into-your-stack"},"Updating ",(0,r.kt)("inlineCode",{parentName:"h3"},"launchpad-namespace")," changes into your stack"),(0,r.kt)("p",null,"As new versions of key components in the stack are released, we will update ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces"),"'s templated definitions and the various release streams available. You can selectively inherit these updates with ease by changing the git ref as a means to track what release stream you may want, or to pin to any particular major, minor or patch version."),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following latest"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-latest"),", or alternatively: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-stable/latest"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-latest\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following a specific major version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following a specific minor version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1.2"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"pinning to an exact version"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-v1.2.2"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2.2\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"following the latest canary"),":"),(0,r.kt)("p",null,"Your ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=")," would look like this, for the storage namespace: ",(0,r.kt)("inlineCode",{parentName:"p"},"?ref=storage-canary/latest"),".\nThe path for this ",(0,r.kt)("em",{parentName:"p"},"Namespace"),", under helmfiles, would then look like:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-canary/latest\n")),(0,r.kt)("p",null,"We would recommend that you either follow the latest stable releases, or pin to a specific version."),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"For full implementation details and other comprehensive notes about ",(0,r.kt)("inlineCode",{parentName:"p"},"launchpad-namespaces")," please visit the ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/graphops/launchpad-namespaces/blob/main/README.md"},"github repo"),".")),(0,r.kt)("h3",{id:"pulling-in-starter-changes"},"Pulling in starter changes"),(0,r.kt)("p",null,"From time to time, you may want to update your infra repo with the latest changes from our starter. "),(0,r.kt)("p",null,"Launchpad comes with a built in task to do this:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"task launchpad:pull-upstream-starter\n")),(0,r.kt)("h2",{id:"using-helmfile-and-launchpad-charts"},"Using Helmfile and Launchpad Charts"),(0,r.kt)("p",null,"This guide will cover two primary ways to deploy blockchain-related resources in Kubernetes using Launchpad charts: deploying all components at once using Helmfile and deploying individual components directly using Helm charts."),(0,r.kt)("h3",{id:"prerequisites"},"Prerequisites"),(0,r.kt)("p",null,"Ensure you have ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/helm/helm"},"helm"),", ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/helmfile/helmfile"},"helmfile")," and it's dependency ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/databus23/helm-diff"},"helm-diff")," installed on your local machine. This guide assumes familiarity with basic Helm and Helmfile operations."),(0,r.kt)("p",null,"Before proceeding with this guide, make sure the following tools are installed on your local machine:"),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/helm/helm"},"Helm"),": The package manager for Kubernetes, essential for managing and deploying applications."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/helmfile/helmfile"},"Helmfile"),": A tool to help streamline the use of Helm charts, enabling better management of Helm chart configurations."),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"https://github.com/databus23/helm-diff"},"Helm-diff"),": A Helm plugin that helps visualize differences between your Helmfile configurations and what is actually deployed in your cluster. This plugin is a dependency for effectively using Helmfile."),(0,r.kt)("li",{parentName:"ul"},"(Optional)",(0,r.kt)("a",{parentName:"li",href:"https://github.com/kubernetes-sigs/kustomize"},"Kustomize"),": A tool for customizing Kubernetes configurations beyond what is available with Helm, useful for more complex deployment scenarios.\nThis guide assumes you are familiar with basic operations of Helm and Helmfile.")),(0,r.kt)("h3",{id:"deploying-using-launchpad-charts-directly"},"Deploying using Launchpad-charts directly"),(0,r.kt)("p",null,"If you prefer to use individual components of Launchpad, such as Launchpad Charts, you can add the Launchpad Helm repository and install charts directly:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helm repo add graphops https://graphops.github.io/launchpad-charts\nhelm install my-release graphops/ --values \n")),(0,r.kt)("h4",{id:"key-consideration"},"Key Consideration"),(0,r.kt)("p",null,"Before proceeding, it is important to note that most Kubernetes clusters do not come pre-configured with a ",(0,r.kt)("a",{parentName:"p",href:"https://kubernetes-csi.github.io/docs/"},"Container Storage Interface (CSI)")," for handling storage volumes. This guide relies on the ability to create storage volumes. It is also necessary to have an Ingress controller installed and configured, as it is essential for managing traffic to and from your applications."),(0,r.kt)("h3",{id:"deploying-using-helmfile"},"Deploying using Helmfile"),(0,r.kt)("p",null,"For a comprehensive deployment, managing all related Helm releases and their values via a single Helmfile offers simplicity and maintainability. This method is particularly effective when deploying complex stacks."),(0,r.kt)("h3",{id:"deploy-blockchain-namespaces-as-desired-1"},"Deploy blockchain namespaces as desired"),(0,r.kt)("admonition",{type:"note"},(0,r.kt)("p",{parentName:"admonition"},"If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).")),(0,r.kt)("h4",{id:"optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia-1"},"(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia"),(0,r.kt)("p",null,"The following ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml")," provides an example configuration for deploying Arbitrum Nitro on the Arbitrum Sepolia network. For an easier setup process, we recommend utilizing the ",(0,r.kt)("a",{parentName:"p",href:"#optional-arbitrum-sepolia-install-arbitrum-nitro-and-proxyd-for-arbitrum-sepolia"},"Launchpad Arbitrum namespace"),", which includes most of the necessary configurations pre-defined for your convenience."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},"# helmfile.yaml\nrepositories:\n - name: graphops\n url: https://graphops.github.io/launchpad-charts\n\nreleases:\n - name: arbitrum-nitro\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/arbitrum-nitro\n version: 0.3.4\n values:\n - nitro:\n config:\n chain: 421614 # determines Arbitrum network - 421614 Sepolia\n parentChainUrl: http://your-eth-sepolia-url:8545 ## changeme\n parentChainBeaconUrl: http://your-eth-consensus-node-url:5052 ## changeme\n\n volumeClaimSpec:\n resources:\n requests:\n # -- The amount of disk space to provision for Arbitrum Nitro\n storage: 1Ti\n # -- The storage class to use when provisioning a persistent volume for Arbitrum-Nitro \n storageClassName: openebs-rawfile-localpv #\xa0change me as needed\n\n restoreSnapshot:\n enabled: false\n\n extraLabels:\n app.kubernetes.io/workload-type: blockchain-stateful\n app.kubernetes.io/blockchain: arbitrum-nitro\n\n # if using Prometheus for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n - name: proxyd-nitro\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/proxyd\n version: 0.5.3\n values:\n - backends:\n arbitrum-nitro:\n enabled: true\n # -- Define the RPC URL for the backend\n rpcUrl: http://arbitrum-nitro:8547\n # -- Define the WS URL for the backend\n wsUrl: ws://arbitrum-nitro:8548\n # -- Define additional configuration keys for the backend (see [proxyd config](https://github.com/ethereum-optimism/optimism/blob/5d309e6a6d5e1ef6a88c1ce827b7e6d47f033bbb/proxyd/example.config.toml#L47))\n extraConfig:\n consensus_skip_peer_count: true\n # -- Define which backend groups the backend is part of\n groups:\n - main\n\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n")),(0,r.kt)("p",null,"Deploy by syncing your cluster with the declarative ",(0,r.kt)("inlineCode",{parentName:"p"},"helmfile.yaml"),":"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helmfile -f path/to/helmfile.yaml sync\n")),(0,r.kt)("h3",{id:"install-the-graph-arbitrum-sepolia-indexer-stack-1"},"Install the Graph Arbitrum Sepolia Indexer Stack"),(0,r.kt)("p",null,"This section of the guide does not include the setup for ",(0,r.kt)("inlineCode",{parentName:"p"},"subgraph-data")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-metadata")," PostgreSQL databases necessary for ",(0,r.kt)("inlineCode",{parentName:"p"},"graph-node")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-agent"),". You are encouraged to explore ",(0,r.kt)("a",{parentName:"p",href:"https://www.postgresql.org/support/professional_hosting/"},"managed solutions"),", use ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/bitnami/charts/tree/main/bitnami/postgresql"},"Bitnami's chart"),", or deploy ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/zalando/postgres-operator/tree/master"},"Zalando's Operator")," as part of the Launchpad Namespaces which includes a ready-to-use Postgres setup or independently."),(0,r.kt)("p",null,"Include the necessary configurations for ",(0,r.kt)("inlineCode",{parentName:"p"},"graph-node")," and ",(0,r.kt)("inlineCode",{parentName:"p"},"indexer-agent")," in your helmfile.yaml as shown in the previous sections, adjusting PostgreSQL references and other settings to fit your specific requirements."),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-yaml"},'releases:\n - name: graph-node\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-node\n version: 0.5.3\n values:\n # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-node\n - graphNodeDefaults:\n env:\n # Graph Node configuration\n IPFS: "https://ipfs.network.thegraph.com"\n GRAPH_ALLOW_NON_DETERMINISTIC_FULLTEXT_SEARCH: "true"\n # Database configuration\n PRIMARY_SUBGRAPH_DATA_PGHOST: ## change me\n PRIMARY_SUBGRAPH_DATA_PGPORT: 5432\n PRIMARY_SUBGRAPH_DATA_PGDATABASE: ## change me\n\n # Database sensitive/secret information\n secretEnv:\n PRIMARY_SUBGRAPH_DATA_PGUSER:\n secretName: \n key: username\n PRIMARY_SUBGRAPH_DATA_PGPASSWORD:\n secretName: \n key: password\n\n graphNodeGroups:\n index:\n replicaCount: 1 #\xa0scale me\n query:\n replicaCount: 1 #\xa0scale me\n \n chains:\n mainnet:\n enabled: true\n shard: primary\n provider:\n - label: eth-mainnet\n url: ## change me\n features: [archive, traces]\n\n arbitrum-sepolia:\n enabled: true\n shard: primary\n provider:\n - label: arbitrum-sepolia\n url: http://proxyd-proxyd.arbitrum-sepolia:8545\n features: [archive, traces]\n\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n datasources: true\n\n - name: graph-network-indexer\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-network-indexer\n version: 0.2.5\n values:\n # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer\n - indexerDefaults:\n config:\n ethereum: "http://proxyd-proxyd.arbitrum-sepolia:8545"\n ethereum-network: "arbitrum-sepolia"\n network-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia"\n graph-node-query-endpoint: "http://graph-node-query:8000"\n graph-node-status-endpoint: "http://graph-node-block-ingestor:8030/graphql"\n postgres-host: "" ## change me\n postgres-database: "" ## change me\n\n indexerAgent:\n config:\n collect-receipts-endpoint: "https://gateway-testnet-arbitrum.network.thegraph.com/collect-receipts"\n network-subgraph-deployment: "QmT8UDGK7zKd2u2NQZwhLYHdA4KM55QsivkE3ouCuX6fEj" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n epoch-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/arbitrum-sepolia-ebo" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n epoch-subgraph-deployment: "QmTpu2mVquoMpr4SWSM77nGkU3tcUS1Bhk1sVHpjDrAUAx"\n graph-node-admin-endpoint: "http://graph-node-block-ingestor:8020"\n public-indexer-url: "" ## change me\n index-node-ids: "graph-node-index-0" # if more than one graph-node index, specify as comma delimited list ie "graph-node-index-0, graph-node-index-1"\n\n secretEnv:\n INDEXER_AGENT_MNEMONIC:\n secretName: \n key: mnemonic\n INDEXER_AGENT_POSTGRES_USERNAME:\n secretName: \n key: username\n INDEXER_AGENT_POSTGRES_PASSWORD:\n secretName: \n key: password\n\n\n indexerService:\n replicas: 1 #\xa0scale me\n\n config:\n client-signer-address: "0xe1EC4339019eC9628438F8755f847e3023e4ff9c" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md\n \n secretEnv:\n INDEXER_SERVICE_MNEMONIC:\n secretName: \n key: mnemonic\n INDEXER_SERVICE_POSTGRES_USERNAME:\n secretName: \n key: username\n INDEXER_SERVICE_POSTGRES_PASSWORD:\n secretName: \n key: password\n # if using Prometheus and Grafana for monitoring:\n prometheus:\n serviceMonitors:\n enabled: true\n\n grafana:\n dashboards: true\n\n - name: subgraph-radio\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/subgraph-radio\n version: 0.2.8\n values:\n - env:\n GRAPH_NODE_STATUS_ENDPOINT: http://graph-node-block-ingestor:8030/graphql\n INDEXER_MANAGEMENT_SERVER_ENDPOINT: http://graph-network-indexer-agent:8000\n GRAPHCAST_NETWORK: "testnet"\n REGISTRY_SUBGRAPH: https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-se\n NETWORK_SUBGRAPH: https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia\n secretEnv:\n MNEMONIC:\n secretName: \n key: mnemonic\n\n - name: graph-toolbox\n namespace: arbitrum-sepolia\n createNamespace: true\n chart: graphops/graph-toolbox\n version: 0.1.0\n values:\n - config:\n graphNode:\n # -- URL to Graph Node Admin API\n adminApiUrl: http://graph-node-block-ingestor:8020\n existingConfigMap:\n # -- The name of the ConfigMap that contains your Graph Node config.toml\n configMapName: graph-node-config\n # -- The name of the data key in the ConfigMap that contains your config.toml\n configFileKey: config.toml\n indexer:\n # -- URL to Indexer Agent Management Server\n indexerAgentManagementUrl: http://graph-network-indexer-agent:8000\n\n aliases:\n graphman: graphman --config /graphman-config/config.toml\n indexer: graph-indexer indexer\n psql-primary-subgraph-data: >\n PGPASSWORD=$PRIMARY_SUBGRAPH_DATA_PGPASSWORD psql -w -U $PRIMARY_SUBGRAPH_DATA_PGUSER -d "host=$PRIMARY_SUBGRAPH_DATA_PGHOST,port=$PRIMARY_SUBGRAPH_DATA_PGPORT,dbname=$PRIMARY_SUBGRAPH_DATA_PGDATABASE"\n psql-indexer-metadata: >\n PGPASSWORD=$INDEXER_METDATA_PGPASSWORD psql -w -U $INDEXER_METADATA_PGUSER -d "host=$INDEXER_METADATA_PGHOST,port=$INDEXER_METADATA_PGPORT,dbname=$INDEXER_METADATA_PGDATABASE"\n\n env:\n PRIMARY_SUBGRAPH_DATA_PGHOST: ## change me\n PRIMARY_SUBGRAPH_DATA_PGPORT: 5432\n PRIMARY_SUBGRAPH_DATA_PGDATABASE: ## change me\n INDEXER_METADATA_PGHOST: ## change me\n INDEXER_METADATA_PGPORT: 5432\n INDEXER_METADATA_PGDATABASE: ## change me\n\n secretEnv:\n PRIMARY_SUBGRAPH_DATA_PGUSER:\n secretName: ## change me\n key: username\n PRIMARY_SUBGRAPH_DATA_PGPASSWORD:\n secretName: ## change me\n key: password\n INDEXER_METADATA_PGUSER:\n secretName: ## change me\n key: username\n INDEXER_METDATA_PGPASSWORD:\n secretName: ## change me\n key: password\n')),(0,r.kt)("p",null,"Proceed to deploy:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},"helmfile -f path/to/helmfile.yaml sync\n")),(0,r.kt)("h3",{id:"-milestone-graph-indexer-running-and-accessible-1"},"\ud83c\udf89 Milestone: Graph Indexer running and accessible"),(0,r.kt)("p",null,"Once your deployments are successfully applied, your Graph Indexer should be operational, with blockchain nodes (if deployed) and the Graph Indexing stack running in your Kubernetes cluster."),(0,r.kt)("ul",{className:"contains-task-list"},(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We (optionally) configured and deployed blockchain nodes into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!0,disabled:!0})," ","We configured and deployed the Graph Indexing stack into our cluster"),(0,r.kt)("li",{parentName:"ul",className:"task-list-item"},(0,r.kt)("input",{parentName:"li",type:"checkbox",checked:!1,disabled:!0})," ","Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests")))}h.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.c6e5a965.js b/assets/js/runtime~main.83b76d50.js similarity index 96% rename from assets/js/runtime~main.c6e5a965.js rename to assets/js/runtime~main.83b76d50.js index 00b7b185..0d82c2bc 100644 --- a/assets/js/runtime~main.c6e5a965.js +++ b/assets/js/runtime~main.83b76d50.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,d,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return t[e].call(f.exports,f,f.exports,b),f.exports}b.m=t,e=[],b.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(d,t),d},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",133:"22198432",222:"fb70dcf5",453:"30a24c52",533:"b2b675dd",653:"4ac6162a",948:"8717b14a",1263:"d2eb8d4c",1310:"9d3dd2a1",1477:"b2f554cd",1515:"24ebdd5e",1613:"a19c92ce",1633:"031793e1",1713:"a7023ddc",1723:"1026ed7f",1853:"655846b9",1914:"d9f32620",2267:"59362658",2362:"e273c56f",2398:"55efb065",2535:"814f3328",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3608:"9e4087bc",3936:"d5a57370",3997:"6ceb8cd0",4013:"01a85c17",4089:"bd9dd2f9",4843:"479c7032",4939:"8ef6d289",5119:"931ae2d5",5207:"ea715d66",5252:"6fb70ffc",6013:"4738c5d7",6103:"ccc49370",6241:"163dca7f",6254:"2472ae08",6686:"ed2299db",6885:"4ae37811",6938:"608ae6a4",6998:"43c72bb1",7058:"db074018",7178:"096bfee4",7645:"a7434565",7751:"a3ac3f10",7917:"4f3432ac",7918:"17896441",8056:"ca832579",8176:"d7261236",8193:"f8f6ede7",8271:"1c091541",8386:"01508a75",8610:"6875c492",8636:"f4f34a3a",9003:"925b3f96",9035:"4c9e35b1",9075:"fb51a15c",9334:"247783bb",9514:"1be78505",9638:"4ecff493",9642:"7661071f",9700:"e16015ca",9813:"2c9fa8e7",9960:"04eb4478"}[e]||e)+"."+{53:"21b3d6bd",110:"479310be",133:"dad58786",222:"d1e3bf7d",453:"b07fef1c",533:"0531b767",653:"4695403f",948:"78963ab1",1263:"e6072cce",1310:"7ed98d80",1477:"6c07586f",1515:"42a4eb53",1613:"e67e0e96",1633:"68e1d3d8",1713:"23b2ba87",1723:"3fb7a1bf",1853:"b6e1de89",1914:"5a4a9fed",2267:"6401aa21",2362:"cf7cab60",2398:"ebd12c11",2535:"9dbd0ebe",3089:"845cad8c",3205:"e7ede2a4",3237:"b255f7e0",3514:"2a3b45ce",3608:"a696b2b3",3936:"384c1c6b",3997:"ae257209",4013:"467cbdf3",4089:"e6089dcb",4843:"7c8cdba4",4939:"4519e42a",4972:"73c7d017",5119:"d86c191e",5207:"925edfec",5252:"42aab89e",6013:"fe0c9476",6048:"fb9b0605",6103:"d9c41d1e",6241:"1406ae09",6254:"557def3a",6316:"57f0e7b8",6686:"7b8338c3",6885:"24214e37",6938:"a6023127",6998:"ba8ce235",7058:"f80dbec2",7178:"f73c46b5",7645:"8a3a8041",7724:"a1fda817",7751:"fc3b5bef",7917:"deb6bb3a",7918:"84e372fb",8056:"2e96bb68",8176:"afd1eded",8193:"a3abc84f",8271:"60b718c6",8386:"68dbb6f9",8610:"f37b7b5c",8636:"94f36cf8",8954:"a8cafb86",9003:"35534246",9035:"2be4405b",9075:"b93cd945",9334:"dadfb5a6",9487:"20f5186b",9514:"3a6a17fd",9638:"9a636aa0",9642:"aa215b28",9700:"d1830de3",9813:"224a411d",9960:"ec6f7725"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs:",b.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"7918",22198432:"133",59362658:"2267",66406991:"110","935f2afb":"53",fb70dcf5:"222","30a24c52":"453",b2b675dd:"533","4ac6162a":"653","8717b14a":"948",d2eb8d4c:"1263","9d3dd2a1":"1310",b2f554cd:"1477","24ebdd5e":"1515",a19c92ce:"1613","031793e1":"1633",a7023ddc:"1713","1026ed7f":"1723","655846b9":"1853",d9f32620:"1914",e273c56f:"2362","55efb065":"2398","814f3328":"2535",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","9e4087bc":"3608",d5a57370:"3936","6ceb8cd0":"3997","01a85c17":"4013",bd9dd2f9:"4089","479c7032":"4843","8ef6d289":"4939","931ae2d5":"5119",ea715d66:"5207","6fb70ffc":"5252","4738c5d7":"6013",ccc49370:"6103","163dca7f":"6241","2472ae08":"6254",ed2299db:"6686","4ae37811":"6885","608ae6a4":"6938","43c72bb1":"6998",db074018:"7058","096bfee4":"7178",a7434565:"7645",a3ac3f10:"7751","4f3432ac":"7917",ca832579:"8056",d7261236:"8176",f8f6ede7:"8193","1c091541":"8271","01508a75":"8386","6875c492":"8610",f4f34a3a:"8636","925b3f96":"9003","4c9e35b1":"9035",fb51a15c:"9075","247783bb":"9334","1be78505":"9514","4ecff493":"9638","7661071f":"9642",e16015ca:"9700","2c9fa8e7":"9813","04eb4478":"9960"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n{"use strict";var e,a,f,c,d,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={exports:{}};return t[e].call(f.exports,f,f.exports,b),f.exports}b.m=t,e=[],b.O=(a,f,c,d)=>{if(!f){var t=1/0;for(i=0;i=d)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);b.r(d);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&c&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(d,t),d},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",133:"22198432",222:"fb70dcf5",453:"30a24c52",533:"b2b675dd",653:"4ac6162a",948:"8717b14a",1263:"d2eb8d4c",1310:"9d3dd2a1",1477:"b2f554cd",1515:"24ebdd5e",1613:"a19c92ce",1633:"031793e1",1713:"a7023ddc",1723:"1026ed7f",1853:"655846b9",1914:"d9f32620",2267:"59362658",2362:"e273c56f",2398:"55efb065",2535:"814f3328",3089:"a6aa9e1f",3205:"a80da1cf",3237:"1df93b7f",3514:"73664a40",3608:"9e4087bc",3936:"d5a57370",3997:"6ceb8cd0",4013:"01a85c17",4089:"bd9dd2f9",4843:"479c7032",4939:"8ef6d289",5119:"931ae2d5",5207:"ea715d66",5252:"6fb70ffc",6013:"4738c5d7",6103:"ccc49370",6241:"163dca7f",6254:"2472ae08",6686:"ed2299db",6885:"4ae37811",6938:"608ae6a4",6998:"43c72bb1",7058:"db074018",7178:"096bfee4",7645:"a7434565",7751:"a3ac3f10",7917:"4f3432ac",7918:"17896441",8056:"ca832579",8176:"d7261236",8193:"f8f6ede7",8271:"1c091541",8386:"01508a75",8610:"6875c492",8636:"f4f34a3a",9003:"925b3f96",9035:"4c9e35b1",9075:"fb51a15c",9334:"247783bb",9514:"1be78505",9638:"4ecff493",9642:"7661071f",9700:"e16015ca",9813:"2c9fa8e7",9960:"04eb4478"}[e]||e)+"."+{53:"e3d5fd60",110:"479310be",133:"dad58786",222:"d1e3bf7d",453:"b07fef1c",533:"0531b767",653:"4695403f",948:"78963ab1",1263:"73065bf8",1310:"7ed98d80",1477:"6c07586f",1515:"42a4eb53",1613:"e67e0e96",1633:"68e1d3d8",1713:"23b2ba87",1723:"3fb7a1bf",1853:"b6e1de89",1914:"5a4a9fed",2267:"6401aa21",2362:"cf7cab60",2398:"ebd12c11",2535:"9dbd0ebe",3089:"845cad8c",3205:"e7ede2a4",3237:"b255f7e0",3514:"2a3b45ce",3608:"a696b2b3",3936:"384c1c6b",3997:"ae257209",4013:"467cbdf3",4089:"3f20c11d",4843:"7c8cdba4",4939:"8028956f",4972:"73c7d017",5119:"d86c191e",5207:"925edfec",5252:"42aab89e",6013:"fe0c9476",6048:"fb9b0605",6103:"d9c41d1e",6241:"1406ae09",6254:"557def3a",6316:"57f0e7b8",6686:"7b8338c3",6885:"24214e37",6938:"a6023127",6998:"ba8ce235",7058:"f80dbec2",7178:"f73c46b5",7645:"8a3a8041",7724:"a1fda817",7751:"fc3b5bef",7917:"deb6bb3a",7918:"84e372fb",8056:"2e96bb68",8176:"afd1eded",8193:"a3abc84f",8271:"60b718c6",8386:"68dbb6f9",8610:"f37b7b5c",8636:"94f36cf8",8954:"a8cafb86",9003:"35534246",9035:"2be4405b",9075:"b93cd945",9334:"dadfb5a6",9487:"20f5186b",9514:"3a6a17fd",9638:"9a636aa0",9642:"aa215b28",9700:"d1830de3",9813:"224a411d",9960:"ec6f7725"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs:",b.l=(e,a,f,t)=>{if(c[e])c[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],r.parentNode&&r.parentNode.removeChild(r),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/",b.gca=function(e){return e={17896441:"7918",22198432:"133",59362658:"2267",66406991:"110","935f2afb":"53",fb70dcf5:"222","30a24c52":"453",b2b675dd:"533","4ac6162a":"653","8717b14a":"948",d2eb8d4c:"1263","9d3dd2a1":"1310",b2f554cd:"1477","24ebdd5e":"1515",a19c92ce:"1613","031793e1":"1633",a7023ddc:"1713","1026ed7f":"1723","655846b9":"1853",d9f32620:"1914",e273c56f:"2362","55efb065":"2398","814f3328":"2535",a6aa9e1f:"3089",a80da1cf:"3205","1df93b7f":"3237","73664a40":"3514","9e4087bc":"3608",d5a57370:"3936","6ceb8cd0":"3997","01a85c17":"4013",bd9dd2f9:"4089","479c7032":"4843","8ef6d289":"4939","931ae2d5":"5119",ea715d66:"5207","6fb70ffc":"5252","4738c5d7":"6013",ccc49370:"6103","163dca7f":"6241","2472ae08":"6254",ed2299db:"6686","4ae37811":"6885","608ae6a4":"6938","43c72bb1":"6998",db074018:"7058","096bfee4":"7178",a7434565:"7645",a3ac3f10:"7751","4f3432ac":"7917",ca832579:"8056",d7261236:"8176",f8f6ede7:"8193","1c091541":"8271","01508a75":"8386","6875c492":"8610",f4f34a3a:"8636","925b3f96":"9003","4c9e35b1":"9035",fb51a15c:"9075","247783bb":"9334","1be78505":"9514","4ecff493":"9638","7661071f":"9642",e16015ca:"9700","2c9fa8e7":"9813","04eb4478":"9960"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var c=b.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+d+": "+t+")",r.name="ChunkLoadError",r.type=d,r.request=t,c[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(c in r)b.o(r,c)&&(b.m[c]=r[c]);if(o)var i=o(b)}for(a&&a(f);n Blog | GraphOps Docs - +

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/archive.html b/blog/archive.html index 401a1e95..fac7e582 100644 --- a/blog/archive.html +++ b/blog/archive.html @@ -5,13 +5,13 @@ Archive | GraphOps Docs - + - + \ No newline at end of file diff --git a/blog/first-blog-post.html b/blog/first-blog-post.html index 350f512d..6d86e065 100644 --- a/blog/first-blog-post.html +++ b/blog/first-blog-post.html @@ -5,13 +5,13 @@ First Blog Post | GraphOps Docs - +

First Blog Post

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/long-blog-post.html b/blog/long-blog-post.html index 05236b0d..a6b695b2 100644 --- a/blog/long-blog-post.html +++ b/blog/long-blog-post.html @@ -5,13 +5,13 @@ Long Blog Post | GraphOps Docs - +

Long Blog Post

· 3 min read
Endilie Yacop Sucipto

This is the summary of a very long blog post,

Use a <!-- truncate --> comment to limit blog post size in the list view.

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/mdx-blog-post.html b/blog/mdx-blog-post.html index e1fca6a2..001090c8 100644 --- a/blog/mdx-blog-post.html +++ b/blog/mdx-blog-post.html @@ -5,13 +5,13 @@ MDX Blog Post | GraphOps Docs - +
- + \ No newline at end of file diff --git a/blog/tags.html b/blog/tags.html index 1e083393..899f2b7b 100644 --- a/blog/tags.html +++ b/blog/tags.html @@ -5,13 +5,13 @@ Tags | GraphOps Docs - + - + \ No newline at end of file diff --git a/blog/tags/docusaurus.html b/blog/tags/docusaurus.html index 20100946..e7d166ef 100644 --- a/blog/tags/docusaurus.html +++ b/blog/tags/docusaurus.html @@ -5,13 +5,13 @@ 4 posts tagged with "docusaurus" | GraphOps Docs - +

4 posts tagged with "docusaurus"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook.html b/blog/tags/facebook.html index 15d15f91..d93373c5 100644 --- a/blog/tags/facebook.html +++ b/blog/tags/facebook.html @@ -5,13 +5,13 @@ One post tagged with "facebook" | GraphOps Docs - +

One post tagged with "facebook"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello.html b/blog/tags/hello.html index 6897573c..a4d4b9d3 100644 --- a/blog/tags/hello.html +++ b/blog/tags/hello.html @@ -5,13 +5,13 @@ 2 posts tagged with "hello" | GraphOps Docs - +

2 posts tagged with "hello"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola.html b/blog/tags/hola.html index 6e442fd4..25d8ef3e 100644 --- a/blog/tags/hola.html +++ b/blog/tags/hola.html @@ -5,13 +5,13 @@ One post tagged with "hola" | GraphOps Docs - +

One post tagged with "hola"

View All Tags

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/welcome.html b/blog/welcome.html index c4c2e930..1cbfc5e0 100644 --- a/blog/welcome.html +++ b/blog/welcome.html @@ -5,13 +5,13 @@ Welcome | GraphOps Docs - +

Welcome

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/graphcast/design-principles.html b/graphcast/design-principles.html index a7269090..338873c6 100644 --- a/graphcast/design-principles.html +++ b/graphcast/design-principles.html @@ -5,13 +5,13 @@ Design Principles | GraphOps Docs - +

Design Principles

There are two main components of Graphcast

  • The Graphcast SDK: The base layer SDK which interfaces with The Graph stack and the Waku network. This includes interactions with an Ethereum client, a Graph node client, a client for the Indexer management server, the Network subgraph and the Registry subgraph).
  • Radios: Highly customizable gossip applications, built with the help of the Graphcast SDK, which define the specific message formats and logic around constructing and handling the messages. They are the nodes communicating in the Graphcast Network.

The Graphcast SDK

The SDK is the base layer which is used to abstract all the necessary components of each Radio away from the user. That includes:

  • Establishes a connection to Graphcast via a Waku Gossip node, providing an interface for subscribing to specific topics and broadcasting messages across the network.
  • Interactions with a Graph node and a client for the Indexer management server.
  • Queries to Network and Registry subgraphs.
  • Checks message validity for past message injections, nonexistent blocks and expired timestamps. It also guarantees that messages are signed by an authorised operator address of an active on-chain Indexer (this can be used as a basis for a reputation system).
  • Supports a flexible and customizable configuration of the Graphcast gossip agent, enabling specification of network settings, peer discovery mechanisms, message encoding formats, and more. For detailed instructions on configuring Graphcast to suit your needs, refer to the configuration guide.
  • Topics in Graphcast represent different categories or subjects of information. Nodes can dynamically subscribe to specific topics to receive messages related to those topics. Topics enable efficient message routing and dissemination within the network.
  • Provides comprehensive message handling structure to ensure that messages are reliably transmitted, received, and processed within the network.

Radios

General Radio components

  • Supports Radio for specific use cases.
  • Controls topic subscriptions dynamically for interested topics.
  • Provides Radio type definition used to verify the integrity and authenticity of messages exchanged within the network.
  • Collects Radio-specific information and incorporates it into Graphcast messages along with other relevant metadata.
  • Observes and handles relevant messages received from peers.
  • Provides performance metrics, logs, and API services.

The first Radio built on top of Graphcast is the Subgraph Radio. It's designed to facilitate real-time information exchange among participants in The Graph network and serves as a tool for Indexers and other network participants to share valuable Subgraph data.

With Subgraph Radio, Indexers can run a single Radio instance and track a wide variety of message types and data related to Subgraphs. Different use cases and message types form the different features of the Radio.

Features

Proof of Indexing (POI) cross-checking

Indexers must generate valid POIs to earn indexing rewards. Indexers find it beneficial to alert each other on the health status of subgraphs in community discussions. To alleviate the manual workload, the POI cross-checking feature within Subgraph Radio:

  • Defines message types and topics
  • Collects public POIs from the Graph node and sends them inside of Graphcast messages along with other useful metadata
  • Observes relevant messages and aggregates public POIs sent from other Indexers, in order to compare local POIs to remote POIs
  • Monitors the network for conflicts and takes certain actions if needed, for instance Indexers can configure an alert system to send messages to a custom channel in their Slack workspace, a Discord channel, or a Telegram chat.

Subgraph Upgrade Pre-sync

The subgraph upgrade pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. You can learn more about the feature here.

- + \ No newline at end of file diff --git a/graphcast/intro.html b/graphcast/intro.html index 4386283b..ab33ff7f 100644 --- a/graphcast/intro.html +++ b/graphcast/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Why Graphcast 1

Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas?

Why Graphcast 2

When using the protocol, the cost to broadcast information to other network participants is determined by gas fees on the blockchain.

Why Graphcast 3

Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models.

What is Graphcast

The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following uses cases:

  • Real-time cross-checking of subgraph data integrity, with active bail-out in the case of diverging from stake-weighted POI consensus.
  • Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers.
  • Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc.
  • Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc.
  • Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc.

Learn more

If you want to find out more about the initial idea behind Graphcast, as well as stay up to date with the newest developments, keep an eye on the GRC post on The Graph Forum, or join the Graphcast Discord channel.

Contributing

We welcome and appreciate your contributions! 🤝 ➡️ Graphcast SDK

- + \ No newline at end of file diff --git a/graphcast/radios/graphcast-cli.html b/graphcast/radios/graphcast-cli.html index 023ba528..aab23538 100644 --- a/graphcast/radios/graphcast-cli.html +++ b/graphcast/radios/graphcast-cli.html @@ -5,13 +5,13 @@ Graphcast CLI | GraphOps Docs - +

Graphcast CLI

The source code for the Graphcast CLI is available on GitHub.

Introduction

The Graphcast CLI enables sending one-off messages. Currently, it can be used for the Subgraph Upgrade Pre-sync feature of Subgraph Radio.

The Graphcast CLI is configured using config variables. You will need to prepare the following config variables (either as env variables or passing CLI args when running the CLI):

NameDescription and Examples
PRIVATE_KEYPrivate key to the Graphcast ID wallet (precendence over mnemonics).
Example: PRIVATE_KEY=YOUR_PRIVATE_KEY
MNEMONICMnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of private key or mnemonic is needed).
Example: MNEMONIC=YOUR_MNEMONIC
GRAPH_ACCOUNTGraph account corresponding to Graphcast operator.
Example: GRAPH_ACCOUNT=YOUR_GRAPH_ACCOUNT
REGISTRY_SUBGRAPHSubgraph endpoint to the Graphcast Registry.
Default: https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli
NETWORK_SUBGRAPHSubgraph endpoint to The Graph network subgraph.
Default: https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-goerli
GRAPHCAST_NETWORKSupported Graphcast networks: mainnet, testnet.
Default: testnet
LOG_LEVELLogging configuration to set as RUST_LOG.
Default: info
LOG_FORMATSupport logging formats: pretty, json, full, compact.
Default: pretty

The Graphcast CLI code is very extensible and could be altered to send any kind of Graphcast-compatible message to the network.

Usage

The Graphcast CLI supports the following subcommands - upgrade-presync and indexing-status. Both of them work with additional configuration options:

NameDescription and Examples
SUBGRAPH_IDSubgraph id shared by the old and new deployment.
NEW_HASHSubgraph hash for the upgrade version of the subgraph.

The upgrade-presync subcommand has an additional MAX_RETRY variable, which specifies the number of retries for the subcommand. The default value is 5.

Below you can see examples of working CLI commands.

Run with Docker

  1. Pull the Graphcast CLI image
docker pull ghcr.io/graphops/graphcast-cli:latest
  1. Run the image, providing the required configuration variables. Here's a sample configuration:
docker run ghcr.io/graphops/graphcast-cli \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

(or) Run using a pre-built binary

We also provide pre-built binaries for Ubuntu and MacOS, which you can find in the Assets section on each release in the releases page on Github. Simply download the binary, make it executable (chmod a+x ./graphcast-cli-{TAG}-{SYSTEM}) and then run it (using ./graphcast-cli-{TAG}-{SYSTEM}), like this:

./graphcast-cli-0.0.1-macos \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

(or) Run using a pre-built binary

  1. Clone the repo
git clone https://github.com/graphops/graphcast-cli.git
  1. Navigate to the project directory
cd graphcast-cli
  1. Run the CLI
cargo run --release -- --private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
upgrade-presync --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"
- + \ No newline at end of file diff --git a/graphcast/radios/listener-radio.html b/graphcast/radios/listener-radio.html index 832cb1a1..5cdda68a 100644 --- a/graphcast/radios/listener-radio.html +++ b/graphcast/radios/listener-radio.html @@ -5,13 +5,13 @@ Listener Radio | GraphOps Docs - +

Listener Radio

The source code for Listener Radio is available on GitHub and Docker builds are automatically published as GitHub Packages.

Introduction

This Radio shall monitor Graphcast network by the pubsub topic of graphcast-v[version]-[network]. The Radio will not send messages to the network, but instead will record the messages and generate basic metrics for network monitoring.

Graphcast network is a complex system with numerous nodes and connections, and monitoring it is crucial for maintaining its performance, identifying potential issues, and ensuring its robustness and reliability.

  • Performance Optimization: to identify bottlenecks and areas of inefficiency.
  • Troubleshooting: to quickly diagnose issues within the network, reducing downtime and improving reliability.
  • Security: to immediately detect any unusual activity that might indicate a security breach.
  • Planning and Forecasting: Record valuable data that can be used for planning and forecasting purposes, helping us to make informed decisions about the network's future.

Quick Start

  • Ensure a running Postgres instance
  • Set Postgres url to DATABASE_URL in .env
  • Set general GraphcastAgent environmental variables shown in the below table
  • cargo run from source code (later should use Github actions to build source and dockerize

Basic Configuration

You will need to prepare the following environment variables:

NameDescription and examples
DATABASE_URLPostgres Database URL. The tool comes with automatic database migration, database url passed in must be exist and can be connected.
Example: postgresql://[username]:[password]@[pg_host]:[pg_port]/[db_name]
PRIVATE_KEYPrivate key to the Graphcast ID wallet (Precendence over mnemonics).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
GRAPH_NODE_STATUS_ENDPOINTURL to a Graph Node Indexing Status endpoint.
Example: http://index-node:8030/graphql
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use.
Mainnet: mainnet
Goerli: testnet

Example message table

idmessage
1{"nonce": 1686182179, "network": "mainnet", "payload": {"content": "0x3f...", "identifier": "QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj"}, "signature": "dff1...", "block_hash": "276e...", "identifier": "QmVhiE4nax9i86UBnBmQCYDzvjWuwHShYh7aspGPQhU5Sj", "block_number": 17431860}
2{"nonce": 1686182183, "network": "goerli", "payload": {"content": "0xc0...", "identifier": "QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB"}, "signature": "dbd2...", "block_hash": "0198...", "identifier": "QmacQnSgia4iDPWHpeY6aWxesRFdb8o5DKZUx96zZqEWrB", "block_number": 9140860}
......

Advanced Configuration

In the configuration table below is the full list of environment variables you can set, along with example values.

See Basic Configuration above. The following environment variables are optional:

Name (Optional variables)Description and examples
MNEMONICMnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of PRIVATE_KEY or MNEMONIC is needed). Example: claptrap armchair violin...
COLLECT_MESSAGE_DURATIONSeconds that the Subgraph Radio will wait to collect remote POI attestations before making a comparison with the local POI. Example: 120 for 2 minutes.
TOPICSComma separated static list of content topics (subgraphs) to subscribe to. Example: QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Vz,QmUwCFhXM3f6qH9Ls9Y6gDNURBH7mxsn6JcectgxAz6CwU,QmQ1Lyh3U6YgVP6YX1RgRz6c8GmKkEpokLwPvEtJx6cF1y
WAKU_HOSTInterface onto which to bind the bundled Waku node. Example: 0.0.0.0
WAKU_PORTP2P port on which the bundled Waku node will operate. Example: 60000
WAKU_NODE_KEYStatic Waku Node Key.
BOOT_NODE_ADDRESSESPeer addresses to use as Waku boot nodes. Example: "addr1, addr2, addr3"
TELEGRAM_TOKENTelegram Bot Token to use for notifications. Example: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
TELEGRAM_CHAT_IDThe ID of the Telegram chat to send messages to. Example: -1001234567890
WAKU_LOG_LEVELWaku node logging configuration. Example: INFO (is also the default)
RUST_LOGRust tracing configuration. Example: graphcast_sdk=debug,subgraph_radio=debug, defaults to info for everything
DISCORD_WEBHOOKDiscord webhook URL for notifications. Example: https://discord.com/api/webhooks/123456789012345678/AbCDeFgHiJkLmNoPqRsTuVwXyZaBcDeFgHiJkLmN
SLACK_WEBHOOKSlack webhook URL for notifications. Example: https://hooks.slack.com/services/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
METRICS_PORTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 3001
METRICS_HOSTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 0.0.0.0
SERVER_HOSTIf SERVER_PORT is set, the Radio will expose an API service on the given host and port. Default: 0.0.0.0
SERVER_PORTIf set, the Radio will expose an API service on the given port (off by default). Example: 8080
LOG_FORMATOptions: pretty - verbose and human readable; json - not verbose and parsable; compact - not verbose and not parsable; full - verbose and not parsible. Default value: pretty.
DISCV5_ENRSComma separated ENRs for Waku Discv5 bootstrapping. Defaults to empty list.
DISCV5_PORTDiscoverable UDP port. Default: 9000
ID_VALIDATIONDefines the level of validation for message signers used during radio operation. Options include: no-check, valid-address, graphcast-registered, graph-network-account, registered-indexer, indexer

Configurations explained

Identity validaiton

ID_VALIDATION is used to define level of validation for message signers used during radio operation.

Available Options:

  • no-check: does not perform check on the message signature and does not verify the signer.
  • valid-address: checks the signer to be a valid Ethereum address.
  • graphcast-registered: checks the signer to be registered on Graphcast Registry.
  • graph-network-account: checks the signer to be a Graph account.
  • registered-indexer: checks the signer to be registered on Graphcast Registry and corresponds to an Indexer that satisfies the minimum stake requirement.
  • indexer: checks the signer to be either registered on Graphcast Registry or to be a Graph Account, and corresponds to an Indexer satisfying the minimum stake requirement.

Gossip protocol

WAKU_HOST and WAKU_PORT specify where the bundled Waku node runs. If you want to run multiple Radios, or multiple instances of the same Radio, you should run them on different ports.

If you want to customize the log level, you can toggle RUST_LOG environment variable. Here's an example configuration to get more verbose logging:

RUST_LOG="warn,hyper=warn,graphcast_sdk=debug,subgraph_radio=debug"

Discv5 is an ambient node discovery network for establishing a decentralized network of interconnected Graphcast Radios. Discv5, when used in Graphcast Radios, serves as a dedicated peer-to-peer discovery protocol that empowers Radios to form an efficient, decentralized network. Without Discv5, the traffic within the Graphcast network would largely rely on centrally hosted boot nodes, leading to a less distributed architecture. However, with Discv5, Radios are capable of directly routing messages among themselves, significantly enhancing network decentralization and reducing reliance on the central nodes. If you want to learn more about Discv5, check out the official spec.

Monitoring the Radio

Prometheus & Grafana

The exposed metrics can be scraped by a Prometheus server and displayed in Grafana. In order to use them you have to have a local Prometheus server running and scraping metrics on the provided port. You can specify the metrics host and port by using the environment variables METRICS_PORT and METRICS_HOST.

HTTP Server

The Radio spins up an HTTP server with a GraphQL API when SERVER_HOST and SERVER_PORT environment variables are set. The supported routes are:

  • /health for health status
  • /api/v1/graphql for GET and POST requests with GraphQL playground interface

The GraphQL API now includes:

Below are an example query:

query {
rows {
id
message {
nonce
network
payload {
content
}
}
}

messages {
identifier
nonce
network
blockNumber
blockHash
signature
payload {
identifier
content
}
}
}

example mutation:

mutation{
deleteMessage(id:1)
}
- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/advanced-configuration.html b/graphcast/radios/subgraph-radio/advanced-configuration.html index 40985cf1..1681a28a 100644 --- a/graphcast/radios/subgraph-radio/advanced-configuration.html +++ b/graphcast/radios/subgraph-radio/advanced-configuration.html @@ -5,13 +5,13 @@ Advanced Configuration | GraphOps Docs - +

Advanced Configuration

In the configuration table below is the full list of environment variables you can set, along with example values.

See Basic Configuration in the Introduction. The following environment variables are optional:

Name (Optional variables)Description and examples
MNEMONICMnemonic to the Graphcast ID wallet or the Indexer Operator wallet (first address of the wallet is used; Only one of PRIVATE_KEY or MNEMONIC is needed). Example: claptrap armchair violin...
COLLECT_MESSAGE_DURATIONSeconds that the Subgraph Radio will wait to collect remote POI attestations before making a comparison with the local POI. Example: 120 for 2 minutes.
GOSSIP_TOPIC_COVERAGEToggle for topic coverage level. Possible values: "comprehensive", "on-chain", "minimal". Default is set to "comprehensive" coverage.
AUTO_UPGRADE_COVERAGEToggle for the types of subgraph the radio send offchain syncing commands to indexer management server. Default to upgrade all syncing deployments. Default is set to "comprehensive" coverage.TOPICSComma separated static list of content topics (subgraphs) to subscribe to. Example: QmWmyoMoctfbAaiEs2G46gpeUmhqFRDW6KWo64y5r581Vz,QmUwCFhXM3f6qH9Ls9Y6gDNURBH7mxsn6JcectgxAz6CwU,QmQ1Lyh3U6YgVP6YX1RgRz6c8GmKkEpokLwPvEtJx6cF1y
WAKU_HOSTInterface onto which to bind the bundled Waku node. Example: 0.0.0.0
WAKU_PORTP2P port on which the bundled Waku node will operate. Example: 60000
WAKU_NODE_KEYStatic Waku Node Key.
BOOT_NODE_ADDRESSESPeer addresses to use as Waku boot nodes. Example: "addr1, addr2, addr3"
TELEGRAM_TOKENTelegram Bot Token to use for notifications. Example: 123456:ABC-DEF1234ghIkl-zyx57W2v1u123ew11
TELEGRAM_CHAT_IDThe ID of the Telegram chat to send messages to. Example: -1001234567890
SLACK_WEBHOOKSlack webhook URL for notifications. Example: https://hooks.slack.com/services/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
WAKU_LOG_LEVELWaku node logging configuration. Example: INFO (is also the default)
RUST_LOGRust tracing configuration. Example: graphcast_sdk=debug,subgraph_radio=debug, defaults to info for everything
DISCORD_WEBHOOKDiscord webhook URL for notifications. Example: https://discord.com/api/webhooks/123456789012345678/AbCDeFgHiJkLmNoPqRsTuVwXyZaBcDeFgHiJkLmN
METRICS_PORTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 3001
METRICS_HOSTIf set, the Radio will expose Prometheus metrics on this (off by default). Example: 0.0.0.0
SERVER_HOSTIf SERVER_PORT is set, the Radio will expose an API service on the given host and port. Default: 0.0.0.0
SERVER_PORTIf set, the Radio will expose an API service on the given port (off by default). Example: 8080
LOG_FORMATOptions: pretty - verbose and human readable; json - not verbose and parsable; compact - not verbose and not parsable; full - verbose and not parsible. Default value: pretty.
SQLITE_FILE_PATHIf set, the Radio will persist the database between reruns (off by default) Example: ./store.sql.
DISCV5_ENRSComma separated ENRs for Waku Discv5 bootstrapping. Defaults to empty list.
DISCV5_PORTDiscoverable UDP port. Default: 9000
ID_VALIDATIONDefines the level of validation for message signers used during radio operation. Options include: no-check, valid-address, graphcast-registered, graph-network-account, registered-indexer, indexer. Default: indexer
INDEXER_MANAGEMENT_SERVER_ENDPOINTURL to the Indexer management server of Indexer Agent. Example: http://localhost:18000
AUTO_UPGRADEToggle for the types of subgraphs for which the Radio will send offchain syncing commands to the indexer management server. Default to upgrade all syncing deployments. Possible values: "comprehensive", "on-chain", "minimal", "none". Default is set to "comprehensive" coverage.
RATELIMIT_THRESHOLDSet upgrade intent ratelimit in seconds: only one upgrade per subgraph within the threshold (default: 86400 seconds = 1 day)
PROTOCOL_NETWORKThe protocol network (currently matches with suffix of the provided NETWORK_SUBGRAPH configuration variable)
NOTIFICATION_MODEOptions: live, periodic-report, periodic-update. Learn more about notification modes here. Default: live
NOTIFICATION_INTERVALInterval (in hours) between sending a divergence notification (used in the periodic-update and periodic-report nofitification modes). Learn more about notification modes here. Default: 24
info

For enhanced security, we recommend running Subgraph Radio with an independent Graphcast ID linked to your Indexer account. This Graphcast ID is an Ethereum account authorized to sign Graphcast messages on behalf of your Indexer. By default, Subgraph Radio validates messages received from any signer, that can be resolved to an Indexer address, regardless of whether or not they are registered on the Graphcast registry (though this behavior can be altered by setting the ID_VALIDATION config variable). Learn how to register a Graphcast ID here.

Configurations explained

GOSSIP_TOPIC_COVERAGE

GOSSIP_TOPIC_COVERAGE is used to specify the topic coverage level. It controls the range of topics (subgraph ipfs hashes) the Indexer subscribes to in order to process data and participate in the network.

There are three coverage levels available:

  • comprehensive: Subscribe to on-chain topics, user-defined static topics, and subgraph deployments syncing on graph node. This level is useful for Indexers who want to compare public POIs for all deployments syncing on their graph node even if they don't have an active allocations open (their stake will not be taken into account in attestation).
  • on-chain: Subscribe to on-chain topics and user-defined static topics. This is the default coverage level and is suitable for indexers who only want to compare data for deployments with active allocations.
  • minimal: Only subscribe to user-defined static topics. This level is for Indexers who want to limit their participation to specific topics of interest.

Identity validaiton

ID_VALIDATION is used to define level of validation for message signers used during radio operation. We recommend registered-indexer for most strict identity validation, while indexer is a viable option for those who want to use the network before considering Grapchast ID registration. You can choose a sender identity validation mechanism for your radio, based on your use case and security preferences.

Available Options:

  • no-check: Does not perform check on the message signature and does not verify the signer. All messages should pass the sender check.
  • valid-address: Requires the signer to be a valid Ethereum address. Messages should be traceable to an Ethers wallet.
  • graphcast-registered: Requires the signer to be registered on the Graphcast Registry.
  • graph-network-account: signer must be a Graph account.
  • registered-indexer: signer must be registered at Graphcast Registry and correspond to an Indexer satisfying the indexer minimum stake requirement.
  • indexer: signer must be registered at Graphcast Registry or is a Graph Account, and correspond to an Indexer satisfying the indexer minimum stake requirement.

Gossip protocol

WAKU_HOST and WAKU_PORT specify where the bundled Waku node runs. If you want to run multiple Radios, or multiple instances of the same Radio, you should run them on different ports.

If you want to customize the log level, you can toggle RUST_LOG environment variable. Here's an example configuration to get more verbose logging:

RUST_LOG="warn,hyper=warn,graphcast_sdk=debug,subgraph_radio=debug"

Discv5 is an ambient node discovery network for establishing a decentralized network of interconnected Graphcast Radios. Discv5, when used in Graphcast Radios, serves as a dedicated peer-to-peer discovery protocol that empowers Radios to form an efficient, decentralized network. Without Discv5, the traffic within the Graphcast network would largely rely on centrally hosted boot nodes, leading to a less distributed architecture. However, with Discv5, Radios are capable of directly routing messages among themselves, significantly enhancing network decentralization and reducing reliance on the central nodes. If you want to learn more about Discv5, check out the official spec.

Protocol network

Available Options:

  • goerli
  • mainnet
  • arbitrum-one
  • arbitrum-goerli

State management

PERSISTENCE_FILE_PATH configuration variable allows the Radio to maintain operational continuity across sessions. When the file path is set, it triggers the Radio to periodically store its state, including local attestations, remote messages and POI comparison results in a JSON-formatted file at the specified path. This facilitates seamless session transitions and minimizes data loss. In the event of a system disruption, the state can be reloaded from this file, ensuring the Radio can resume operation effectively.

Subgraph Upgrade Pre-sync feature configuration variables

The subgraph upgrade pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. If the Radio operator has set up the notification system, they will get notified whenever a new subgraph upgrade intent message is received.

If the INDEXER_MANAGEMENT_SERVER_ENDPOINT configuration variable has been set, the Radio will send a request to the Indexer Agent to start offchain syncing the new Subgraph deployment.

The AUTO_UPGRADE_COVERAGE variable can be toggled to change the coverage level of subgraphs for which the Radio will send offchain syncing commands to the indexer management server.

Configuration options

To configure Subgraph Radio, you can use the following methods:

Using Environment Variables

Example .env file:

PRIVATE_KEY="a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m"
GRAPH_NODE_STATUS_ENDPOINT="http://127.0.0.42:8030/graphql"
REGISTRY_SUBGRAPH="https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet"
NETWORK_SUBGRAPH="https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet"
GRAPHCAST_NETWORK=mainnet
INDEXER_ADDRESS="0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"

Using CLI arguments

Pass the configuration options directly as command-line arguments.

docker run ghcr.io/graphops/subgraph-radio \
--private-key "a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m" \
--graph-node-status-endpoint "http://127.0.0.42:8030/graphql" \
--registry-subgraph "https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet" \
--network-subgraph "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet" \
--graphcast-network mainnet \
--indexer-address "0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"

Using a TOML/YAML file

Example TOML configuration file (config.toml):

[graph_stack]
graph_node_status_endpoint = 'http://127.0.0.42:8030/graphql'
indexer_address = '0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6'
registry_subgraph = 'https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet'
network_subgraph = 'https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet'
private_key = 'a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m'

Then you just need to have the CONFIG_FILE set, either as an env variable - CONFIG_FILE=path/to/config.toml or passed as a CLI arg - --config-file path/to/config.toml.

Example YAML configuration file (config.yaml):

graph_stack:
graph_node_status_endpoint: "http://127.0.0.42:8030/graphql"
indexer_address: "0xa1b2c3d4e5f6a7b8c9d0e1f2a3b4c5d6"
registry_subgraph: "https://api.thegraph.com/subgraphs/name/randomuser/graphcast-registry-mainnet"
network_subgraph: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-mainnet"
private_key: "a2b3c1d4e5f6890e7f6g5h4i3j2k1l0m"

Then you just need to have the CONFIG_FILE set, either as an env variable - CONFIG_FILE=path/to/config.yaml or passed as a CLI arg - --config-file path/to/config.yaml.

We also have an extensive configuration file template in the repo.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/http-server.html b/graphcast/radios/subgraph-radio/http-server.html index 0300795b..c75d30be 100644 --- a/graphcast/radios/subgraph-radio/http-server.html +++ b/graphcast/radios/subgraph-radio/http-server.html @@ -5,13 +5,13 @@ HTTP Server | GraphOps Docs - +

HTTP Server

The Radio spins up an HTTP server with a GraphQL API when SERVER_HOST and SERVER_PORT environment variables are set. The supported routes are:

  • /health for health status
  • /api/v1/graphql for GET and POST requests with GraphQL playground interface

The GraphQL API now includes several advanced queries:

  • radioPayloadMessages
  • localAttestations
  • upgradeIntentMessages
  • comparisonResults
  • comparisonRatio

Below are some example queries:

query {
radioPayloadMessages {
identifier
nonce
signature
graphAccount
payload {
identifier
content
}
}
localAttestations {
deployment
blockNumber
attestation {
ppoi
}
}
comparisonResults(identifier: "Qm...") {
deployment
blockNumber
resultType
localAttestation {
ppoi
}
attestations {
senders
stakeWeight
ppoi
}
}
comparisonRatio {
deployment
blockNumber
stakeRatio
}
upgradeIntentMessages {
subgraphId
newHash
nonce
graphAccount
}
}

You can customize the returned data from the comparisonRatio query by providing optional arguments - deployment, block and resultType.

query {
comparisonRatio(deployment: "Qm...", block: 17887350, resultType: MATCH) {
deployment
blockNumber
stakeRatio
}
}

In this example, the stakeRatio query will return the stake ratios only for attestations from deployment "Qm..." and block number 17887350, and only for the specified result type.

Note: The result_type field of the filter corresponds to the resultType field in the comparisonResults query. This field represents the type of comparison result.

stakeRatio orders the attestations by stake weight, then computes the ratio of unique senders.

To understand more about the format of the ratio results, check out this section.

These queries provide a clear aggregation of the attestations from remote messages, giving a concise understanding of the Radio's state. The optional filters - deployment, block, and filter - can be used to refine the results.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/intro.html b/graphcast/radios/subgraph-radio/intro.html index f50e5cc1..9819e8a1 100644 --- a/graphcast/radios/subgraph-radio/intro.html +++ b/graphcast/radios/subgraph-radio/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Subgraph Radio

Subgraph Radio is an optional component of the Graph Protocol Indexer Stack. It uses the Graphcast Network to facilitate the exchange of data among Indexers and other participants about Subgraphs.

The source code for the Subgraph Radio is available on GitHub and Docker builds are automatically published as GitHub Packages. Subgraph Radio is also published as a crate on crates.io.

Basic Configuration

The Subgraph Radio can be configured using environment variables, CLI arguments, as well as a .toml or .yaml configuration file. Take a look at the configuration options to learn more. In all cases, users will need to prepare the following configuration variables:

NameDescription and examples
PRIVATE_KEYPrivate key of the Graphcast ID wallet or the Indexer Operator wallet (precendence over MNEMONIC).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
INDEXER_ADDRESSIndexer address for Graphcast message verification, in all lowercase.
Example: 0xabcdcabdabcdabcdcabdabcdabcdcabdabcdabcd
GRAPH_NODE_STATUS_ENDPOINTURL to a Graph Node Indexing Status endpoint.
Example: http://index-node:8030/graphql
INDEXER_MANAGEMENT_SERVER_ENDPOINTURL to the Indexer management server of Indexer Agent. Example: http://localhost:18000
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use.
Mainnet: mainnet
Goerli: testnet

Run with Docker

  1. Pull the Subgraph Radio image
docker pull ghcr.io/graphops/subgraph-radio:latest
  1. Run the image, providing the required environment variables. Here's a sample mainnet configuration:
docker run \
-e GRAPHCAST_NETWORK="mainnet" \
-e REGISTRY_SUBGRAPH="https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet" \
-e NETWORK_SUBGRAPH="https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet" \
-e PRIVATE_KEY="PRIVATE_KEY" \
-e GRAPH_NODE_STATUS_ENDPOINT="http://graph-node:8030/graphql" \
-e RUST_LOG="warn,hyper=warn,graphcast_sdk=info,subgraph_radio=info" \
-e INDEXER_ADDRESS="INDEXER_ADDRESS" \
ghcr.io/graphops/subgraph-radio:latest

(or) Run with docker-compose

You can append this service definition to your docker-compose manifest and customise the definitions:

services:
# ... your other service definitions
subgraph-radio:
image: ghcr.io/graphops/subgraph-radio:latest
container_name: subgraph-radio
restart: unless-stopped
environment:
GRAPHCAST_NETWORK: "mainnet"
REGISTRY_SUBGRAPH: "https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet"
NETWORK_SUBGRAPH: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet"
PRIVATE_KEY: "PRIVATE_KEY"
GRAPH_NODE_STATUS_ENDPOINT: "http://graph-node:8030/graphql"
RUST_LOG: "warn,hyper=warn,graphcast_sdk=info,subgraph_radio=info"
INDEXER_ADDRESS: "INDEXER_ADDRESS"
logging:
driver: local

(or) Run as part of StakeSquid's docker-compose setup

Subgraph Radio is included as an optional component in both the mainnet and testnet versions of StakeSquid's guide.

(or) Run using a pre-built binary

We also provide pre-built binaries for Ubuntu and MacOS, which you can find in the Assets section on each release in the releases page on Github. Simply download the binary, make it executable (chmod a+x ./subgraph-radio-{TAG}-{SYSTEM}) and then run it (using ./subgraph-radio-{TAG}-{SYSTEM}).

Developing the Subgraph Radio

Building the image using the Dockerfile locally

If you want to make any changes to the Subgraph Radio codebase, you can use this option.

Prerequisites
  1. Clone this repo and cd into it
  2. Create a .env file that includes at least the required environment variables. To see the full list of environment variables you can provide, check out the Configuration section.
Running the Subgraph Radio inside a Docker container
docker-compose up -d

Building Subgraph Radio locally

To have full control over the Subgraph Radio code and run it directly on your machine (without Docker) you can use this option.

Prerequisites

  1. Clone this repo and cd into it
  2. Make sure you have the following installed:
  • Rust
  • Go
  • Build tools (e.g. the build-essentials package for Debian-based Linux distributions or Xcode Command Line Tools for MacOS)
  • C compiler (e.g. the clang package for Debian-based Linux distribution or Xcode Command Line Tools for MacOS)
  • OpenSSL (e.g. the libssl-dev package for Debian-based Linux distribution or openssl for MacOS)
  • PostreSQL libraries and headers (e.g. the libpq-dev package for Debian-based Linux distribution or postgresql for MacOS)
  1. You have Graph Node syncing your indexer's on-chain allocations.
  2. You have created a .env file that includes at least the required environment variables. To see the full list of environment variables you can provide, check out the Configuration section.

Running the Subgraph Radio natively

cargo run --release
- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/monitoring.html b/graphcast/radios/subgraph-radio/monitoring.html index 0ae39dea..cc103e45 100644 --- a/graphcast/radios/subgraph-radio/monitoring.html +++ b/graphcast/radios/subgraph-radio/monitoring.html @@ -5,13 +5,13 @@ Notifications and Monitoring | GraphOps Docs - +

Notifications and Monitoring

Notifications

If the Radio operator has set up a Slack, Discord and/or Telegram bot integration and the Radio finds a POI mismatch, it sends alerts to the designated channels. The operator can also inspect the logs to see if the Radio is functioning properly, if it's sending and receiving messages, if it's comparing normalised POIs, if there is a found POI mismatch, etc.

Notification modes

Subgraph Radio supports three modes of notification, based on the user's preference for how often they'd like to get notified, and what data the notifications contain:

  • live - the Radio sends a notification as soon as it finds a divergence, providing the Subgraph deployment and the block.
  • periodic-update - the Radio sends a notification on a specified interval (default is 24 hours) containing any updates to comparison results that have happened since the previous notification (the notification message format is the same as the one using live mode). If there are no updates it will not send a notification.
  • periodic-report - the Radio sends a notification on a specified interval (default is 24 hours) with a summary of total subgraphs being cross-checked, number of matched subgraphs, number of diverged subgraphs, and a list of the divergent subgraphs and the blocks where the divergence was caught.

The default notification mode if there's Slack/Discord/Telegram integration in place is live.

The notification mode can be toggled using the NOTIFICATION_MODE and NOTIFICATION_INTERVAL configuration variables.

See more information on how to configure notifications, as well as how to set up Slack, Discord and Telegram in the advanced configuration section.

Prometheus & Grafana

The Subgraph Radio exposes metrics that can then be scraped by a Prometheus server and displayed in Grafana. In order to use them you have to have a local Prometheus server running and scraping metrics on the provided port. You can specify the metrics host and port by using the environment variables METRICS_PORT and METRICS_HOST.

Setting up the Grafana dashboard

The Subgraph Radio Grafana dashboard is included by default in Stakesquid's docker-compose stack. If you're not using the stack, below is a walk-through of how you can set it up.

There is a Grafana dashboard config JSON file provided in the repo, which you can import and use to visualise the metrics in Grafana. When importing the dashboard, it will require you to specify two data sources - a Prometheus one and a GraphQL one. For Prometheus you should select the Prometheus instance that you've set up to scrape metrics from Subgraph Radio's metrics host and port. For GraphQL, you'd need to install the GraphQL data source plugin, if you don't have it already installed. Then you need to create a new GraphQL data srouce that points to the GraphQL API of the Radio's integrated HTTP server. For instance, if you've set SERVER_HOST to 0.0.0.0 and SERVER_PORT to 3012, your GraphQL data source would need to point at http://0.0.0.0:3012/api/v1/graphql. You can learn more about the HTTP server in the next section.

Reading the Grafana dashboard

Grafana Dashboard

When the Subgraph Radio Grafana dashboard has been set up, it offers 6 panels:

POI Comparison Overview

At a glance, you can see the number of matching and diverging subgraphs. These two gauges update to reflect the results continiously after each comparison. The reason these are gauges and not counters is because a subgraph's comparison result can change between POI comparison events, for instance you might have a diverging public POI for a given subgraph on block X, but then at block Y it could be matching with the consensus public POI, in that case is would change groups, the number of divergent subgraphs would decrement and the number of matching subgraphs would increment.

Message stats

This includes the validated messages per minute, as well as the total cached messages in the store.

Number of Gossiping Indexers per Subgraph

This panel shows how many Indexers are actively sending public POIs for the subgraphs that you're interested in. This view can be filtered by a specific subgraph.

POI Comparison Results

This is the most insightful and important panel. The data in it is coming directly from the HTTP server's GraphQL endpoint. It shows the most recent comparison results for each subgraph that is being actively cross-checked, as well as the block for which that comparison happened.

The Count Ratio shows the ratio of unique senders that have attested to a public POI for that subgraph on that block. For instance 3:1:1* means that there are three distinct public POIs that were compared. It also means there are four Indexers attesting to public POIs that is different that the locally generated public POI, three of them attest to the same POI and the third Indexer attests to a different one, but none of those two POIs match the locally generated one. If it's 3*:1 it means that the local POI matches with the most often attested POI (highest sender count), meaning that the local Indexer is in that group of three Indexers, and there is one other Indexer who has sent a different POI. If it's 4* it means that there are four Indexers attesting to a given POI and all four POIs are the same (the local one included). The count that has a * sign is where the local attestation is.

Another possible ratio value is 3:0*, the 0* here represents that there is no local public POI, generated for this subgraph on this block (this might happen due to a lot of reasons, one of them being that the subgraph isn't fully synced).

The Stake Ratio is similar to the Count Ratio, but POIs are grouped by stake, so 11686531* means that that is the combined stake backing the public POI for that subgraph on that block (the local Indexer stake is included) where as for example 44141361*:651361 would mean that there are two distinct POIs and hence two different sender groups, and these two stake values are the aggregated stake values behind each of those POIs. The * on the first one means that the local Indexer's stake is attesting to the same public POIs and the local stake is included in that value. Similar to the Count Ratio, if there's a 0*, for instance - 44141361:0*, it means that there is no local public POI, generated for this subgraph on this block (therefore there is no attesting stake from the local Indexer).

Function Call Stats

Shows insights into the frequency of different functions running in the Radio, it helps convey a sense of how often/how many times certain events have happened, like POI comparison, processing a validated message, sending a message, and more.

Number of diverged subgraphs

Count of diverged subgraphs and how it's changed over time.

Locally tracked Public POIs

Number of locally generated public POIs for all of the subgraphs that are actively being cross-checked.

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/poi-cross-checking.html b/graphcast/radios/subgraph-radio/poi-cross-checking.html index 8f4bd667..4957a4fe 100644 --- a/graphcast/radios/subgraph-radio/poi-cross-checking.html +++ b/graphcast/radios/subgraph-radio/poi-cross-checking.html @@ -5,13 +5,13 @@ POI Cross-checking | GraphOps Docs - +

POI Cross-checking

An essential aspect of earning indexing rewards as an Indexer is the generation of valid Proof of Indexing hashes (POIs). These POIs provide evidence of the Indexer's possession of correct data. Submitting invalid POIs could lead to a Dispute and possible slashing by the protocol. With Subgraph Radio's POI feature, Indexers gain confidence knowing that their POIs are continually cross-verified against those of other participating Indexers. Should there be a discrepancy in POIs, Subgraph Radio functions as an early warning system, alerting the Indexer within minutes.

All POIs generated through Subgraph Radio are public (normalized), meaning they are hashed with a 0x0 Indexer Address and can be compared between Indexers. However, these public POIs are not valid for on-chain reward submission. Subgraph Radio groups and weighs public POIs according to the aggregate stake in GRT attesting to each. The normalized POI with the most substantial aggregate attesting stake is deemed canonical and used for comparisons with your local Indexer POIs.

POI Cross-checking

Determining which Subgraphs to gossip about

Subgraph Radio will gossip about different subgraphs depending on the COVERAGE configuration (see more). By default, the Radio will gossip about all healthy subgraphs, whether they are allocated to or not.

Subgraph Radio periodically polls the Graph Node for new blocks on all relevant networks and constructs Graphcast topics on each allocation identified by subgraph deployment IPFS hash. Chainheads for these networks are updated with data from the Graph Node, and the Radio ensures that it is always using the latest chainhead when processing messages.

Gathering and comparing normalised POIs

At a given interval, the Radio fetches the normalised POI for each deployment. This interval is defined in blocks different for each network. It then saves those public POIs, and as other Indexers running the Radio start doing the same, messages start propagating through the network. The Radio saves each message and processes them on a given interval.

The messages include a nonce (UNIX timestamp), block number, signature (used to derive the sender's on-chain Indexer address) and network. Before saving an entry to the map, the Radio operator verifies through the Graph network subgraph for the sender's on-chain identity and amount of tokens staked, which is used during comparisons later on.

At another interval, the Radio compares the local public POIs with the collected remote ones. The remote POIs are sorted so that for each subgraph (on each block), the POI that is backed by the most on-chain stake is selected. This means that the combined stake of all Indexers that attested to it is considered, not just the highest staking Indexer. The top POI is then compared with the local POIs for that subgraph at that block to determine consensus.

If there is a mismatch and if the Radio operator has set up a Slack, Discord and/or Telegram bot integration, the Radio will send alerts to the designated channels.

After a successful comparison, the attestations that have been checked are removed from the store.

Sequence Diagram

- + \ No newline at end of file diff --git a/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html b/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html index a9d24e83..21a5e1d9 100644 --- a/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html +++ b/graphcast/radios/subgraph-radio/subgraph-upgrade-presyncing.html @@ -5,13 +5,13 @@ Subgraph Upgrade Pre-syncing | GraphOps Docs - +

Subgraph Upgrade Pre-syncing

The Subgraph Upgrade Pre-sync feature provides a way for Subgraph Developers to signal when they plan on releasing a new subgraph version, thereby allowing Indexers to start syncing the subgraph in advance. Subgraph Developers can use the Graphcast CLI to send a message to all Indexers, interested in the given subgraph.

Upgrade Presyncing

As an Indexer running Subgraph Radio

As long as there is a valid configuration for AUTO_UPGRADE and INDEXER_MANAGEMENT_SERVER_ENDPOINT (see Advanced Configuration), Subgraph Radio will process Upgrade Intent Messages and automatically begin offchain syncing new Subgraph Deployments.

Rate Limits

In order to prevent spam, Subgraph Radio implements a rate limit on Upgrade Intent Messages. By default, Subgraph Radio will permit one upgrade for an existing Subgraph Deployment per day.

As a Subgraph Developer

Send an Upgrade Intent Message

Refer to the usage section of Graphcast CLI to learn more about different ways to send a UpgradeIntentMessage, as well as the different configurations options available.

Example:

docker run ghcr.io/graphops/graphcast-cli \
# pass the address for subgraph deployer
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
# pass the pk for the subgraph deployer
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
# specify we want to send an upgrade presync message
upgrade-presync \
# specify the subgraph ID
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3" \
# specify the new subgraph deployment hash
--new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x"

This is what the final log should look like after successfully sending the message:

INFO graphcast_cli::operator::operation: Sent message, msg_id: "0x126c76b7a5e9a30b3834807e0e02f9858191d153746ae7aebdef90bd4bae9b7a"
at src/operator/operation.rs:37

Check Indexing Status

After sending an UpgradeIntentMessage, a Subgraph Developer can periodically check the indexing status of the new subgraph deployment using the public API of the Indexers who actively allocate on the current version of the subgraph.

Same arguments here can be used as the argument for UpgradeIntentMessage. However, gossiping is not involved in this operation and the queries are made through deterministic queries.

Command for querying for the indexing status:

docker run ghcr.io/graphops/graphcast-cli \
--private-key "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" \
--graph-account "0xe9a1cabd57700b17945fd81feefba82340d9568f" \
indexing-status --new-hash "QmVVfLWowm1xkqc41vcygKNwFUvpsDSMbHdHghxmDVmH9x" \
--subgraph-id "CnJMdCkW3pr619gsJVtUPAWxspALPdCMw6o7obzYBNp3"

Sequence Diagram

- + \ No newline at end of file diff --git a/graphcast/sdk/intro.html b/graphcast/sdk/intro.html index 8cdff865..a64aa9f3 100644 --- a/graphcast/sdk/intro.html +++ b/graphcast/sdk/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

Graphcast SDK is a decentralized, distributed peer-to-peer (P2P) communication tool that enables users across the network to exchange information in real-time. It is designed to overcome the high cost of signaling or coordination between blockchain participants by enabling off-chain communication (gossip/cheap talk). This is particularly useful for applications where real-time communication is essential but the cost of on-chain transactions is prohibitive.

How it Works

The SDK serves as a base layer for Radio developers, providing essential components to build their applications without starting from scratch. These components include:

  1. Connection to the Graphcast network: Forms a communication network and provides an interface to subscribe to receive messages on specific topics and to broadcast messages onto the network. Allows for real-time communication between different nodes in the network.

  2. Interactions with Graph entities: This allows for necessary interactions with Graph node, Graph network subgraph, Graphcast registry.

An example of a ping-pong Radio is provided in the examples folder, which leverages the base layer and defines the specific logic around constructing and sending messages, as well as receiving and handling them. This example can serve as a starting point for developers looking to build their own Radios.

Network Configurations

A Graphcast radio can interact with many parts of The Graph network modularly. The network configurations actively supported by the team include mainnet (Ethereum mainnet and Arbitrum One) and testnet (Goerli and Arbitrum Goerli). You are free to define and use your own Graphcast Network and Graphcast Registry. This flexibility allows for a wide range of applications and use cases.

Contributing

Contributions are welcome and appreciated! Please refer to the Contributor Guide, Code Of Conduct, and Security Notes for this repository. These documents provide guidelines for how to contribute to the project in a way that is beneficial to all parties involved.

Upgrading and Testing

Updates to the SDK will be merged into the main branch once their release PR has been approved. For testing, it is recommended to use nextest as your test runner. You can run the suite using the command cargo nextest run. Regular testing is crucial to ensure the stability and reliability of the software.

Resources

- + \ No newline at end of file diff --git a/graphcast/sdk/radio-dev.html b/graphcast/sdk/radio-dev.html index 8ae8e2f1..61d6e881 100644 --- a/graphcast/sdk/radio-dev.html +++ b/graphcast/sdk/radio-dev.html @@ -5,13 +5,13 @@ Radio Development | GraphOps Docs - +

Radio Development

Do you want to build robust, peer-to-peer messaging apps that automatically exchange valuable data with other Indexers in real time? Do you have an idea for what data could be useful to share that could lead to greater communication efficiency in The Graph network as a whole? Then you want to build a Radio on top of the Graphcast network.

For a more complex and full example of the Graphcast SDK being used to create a Subgraph Radio, take a look at this repo.

A simple ping pong example

Let's take a look at the simplest possible example of a Radio, built on top of Graphcast - a ping pong app. When one participant sends Ping, all the others in the network are listening on the ping pong topic will send Pong back. Pretty straightforward.

Register a Graphcast ID

We recommend that you register a Graphcast ID for your on-chain Indexer address. You can learn what a Graphcast ID is and how to register one here.

Once you complete those steps you will have a Graphcast ID that is authorized to sign messages on behalf of your Indexer.

Populate your .env file

You now need to export a few environment variables:

NameDescription and examples
PRIVATE_KEYPrivate key to the Graphcast ID wallet or Indexer Operator wallet (Precendence over MNEMONICS).
Example: 0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef
REGISTRY_SUBGRAPHURL to the Graphcast Registry subgraph for your network. Check APIs for your preferred network
NETWORK_SUBGRAPHURL to the Graph Network subgraph. Check APIs for your preferred network
GRAPHCAST_NETWORKThe Graphcast Messaging fleet and pubsub namespace to use. For this example you should use testnet

A few dependencies

Make sure you have the following installed:

  • Rust
  • Go
  • Build tools (e.g. the build-essentials package for Debian-based Linux distributions or Xcode Command Line Tools for MacOS)
  • C compiler (e.g. the clang package for Debian-based Linux distribution or Xcode Command Line Tools for MacOS)
  • OpenSSL (e.g. the libssl-dev package for Debian-based Linux distribution or openssl for MacOS)
  • PostreSQL libraries and headers (e.g. the libpq-dev package for Debian-based Linux distribution or postgresql for MacOS)

Start off with a new Rust project (cargo new ping-pong). Then add the following dependencies to you Cargo.toml file:

[dependencies]
graphcast-sdk = "0.4.0"
once_cell = "1.15"
tokio = { version = "1.1.1", features = ["full"] }
anyhow = "1.0.39"
ethers = "1.0.0"
dotenv = "0.15.0"
tracing = "0.1"
ethers-contract = "1.0.0"
ethers-core = "1.0.0"
ethers-derive-eip712 = "1.0.0"
prost = "0.11"
serde = "1.0.147"
serde_derive = "1.0.114"

The imports

Open your main.rs file and add the following imports:

// For date and time utils
use chrono::Utc;

// Load environment variables from .env file
use dotenv::dotenv;

// Import Arc and Mutex for thread-safe sharing of data across threads
use std::sync::{Arc, Mutex};

// Import Graphcast SDK types and functions for agent configuration, message handling, and more
use graphcast_sdk::graphcast_agent::{GraphcastAgent, GraphcastAgentConfig};

// Import sleep and Duration for handling time intervals and thread delays
use std::{thread::sleep, time::Duration};

// Import AsyncMutex for asynchronous mutual exclusion of shared resources
use tokio::sync::Mutex as AsyncMutex;

// Import tracing macros for logging and diagnostic purposes
use tracing::{debug, error, info, trace};

// Import SimpleMessage from the crate's types module
use types::SimpleMessage;

// Import Config from the crate's config module
use config::Config;

use crate::types::{GRAPHCAST_AGENT, MESSAGES};

// Include the local config and types modules
mod config;
mod types;

Structure

Everything we need will be inside the main() function. And since we'll be using async code we have to annotate it with #[tokio::main], we can start off with something as simple as:

#[tokio::main]
async fn main() {
// TODO: Radio logic
}

Before diving into the contents of the main function, let's quickly populate the other two files we need - config.rs and types.rs.

Let's take a look at types.rs first:

use async_graphql::SimpleObject;
use ethers_contract::EthAbiType;
use ethers_core::types::transaction::eip712::Eip712;
use ethers_derive_eip712::*;
use graphcast_sdk::graphcast_agent::GraphcastAgent;
use prost::Message;
use serde::{Deserialize, Serialize};

// Import the OnceCell container for lazy initialization of global/static data
use once_cell::sync::OnceCell;
use std::sync::{Arc, Mutex};

/// A global static (singleton) instance of A GraphcastMessage vector.
/// It is used to save incoming messages after they've been validated, in order
/// defer their processing for later, because async code is required for the processing but
/// it is not allowed in the handler itself.
pub static MESSAGES: OnceCell<Arc<Mutex<Vec<SimpleMessage>>>> = OnceCell::new();

/// The Graphcast Agent instance must be a global static variable (for the time being).
/// This is because the Radio handler requires a static immutable context and
/// the handler itself is being passed into the Graphcast Agent, so it needs to be static as well.
pub static GRAPHCAST_AGENT: OnceCell<GraphcastAgent> = OnceCell::new();

/// Make a test radio type
#[derive(Eip712, EthAbiType, Clone, Message, Serialize, Deserialize, SimpleObject)]
#[eip712(
name = "Graphcast Ping-Pong Radio",
version = "0",
chain_id = 1,
verifying_contract = "0xc944e90c64b2c07662a292be6244bdf05cda44a7"
)]
pub struct SimpleMessage {
#[prost(string, tag = "1")]
pub identifier: String,
#[prost(string, tag = "2")]
pub content: String,
}

impl SimpleMessage {
pub fn new(identifier: String, content: String) -> Self {
SimpleMessage {
identifier,
content,
}
}

pub fn radio_handler(&self) {
MESSAGES
.get()
.expect("Could not retrieve messages")
.lock()
.expect("Could not get lock on messages")
.push(self.clone());
}
}

SimpleMessage defines the structure that all messages for this Radio must follow.

RadioPayloadMessage is decorated with several macros - #[derive(Eip712, EthAbiType, Clone, Message, Serialize, Deserialize)], which automatically implement certain traits that are required by the SDK.

The #[eip712] macro is used to define information that is used in EIP-712, a standard for structuring typed data in Ethereum transactions.

Now let's see the config.rs file:

use clap::Parser;
use ethers::signers::WalletError;
use graphcast_sdk::build_wallet;
use graphcast_sdk::graphcast_agent::message_typing::IdentityValidation;
use graphcast_sdk::init_tracing;
use graphcast_sdk::wallet_address;
use serde::{Deserialize, Serialize};
use tracing::info;

#[derive(Clone, Debug, Parser, Serialize, Deserialize)]
#[clap(
name = "ping-pong-radio",
about = "A simple example for using the Graphcast SDK to build Radios",
author = "GraphOps"
)]
pub struct Config {
#[clap(
long,
value_name = "ENDPOINT",
env = "GRAPH_NODE_STATUS_ENDPOINT",
help = "API endpoint to the Graph Node Status Endpoint"
)]
pub graph_node_endpoint: Option<String>,
#[clap(
long,
value_name = "KEY",
value_parser = Config::parse_key,
env = "PRIVATE_KEY",
hide_env_values = true,
help = "Private key to the Graphcast ID wallet (Precendence over mnemonics)",
)]
pub private_key: Option<String>,
#[clap(
long,
value_name = "KEY",
value_parser = Config::parse_key,
env = "MNEMONIC",
hide_env_values = true,
help = "Mnemonic to the Graphcast ID wallet (first address of the wallet is used; Only one of private key or mnemonic is needed)",
)]
pub mnemonic: Option<String>,
#[clap(
long,
value_name = "SUBGRAPH",
env = "REGISTRY_SUBGRAPH",
help = "Subgraph endpoint to the Graphcast Registry",
default_value = "https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli"
)]
pub registry_subgraph: String,
#[clap(
long,
value_name = "INDEXER_ADDRESS",
env = "INDEXER_ADDRESS",
help = "Graph account corresponding to Graphcast operator"
)]
pub indexer_address: String,
#[clap(
long,
value_name = "SUBGRAPH",
env = "NETWORK_SUBGRAPH",
help = "Subgraph endpoint to The Graph network subgraph",
default_value = "https://gateway.testnet.thegraph.com/network"
)]
pub network_subgraph: String,
#[clap(
long,
value_name = "LOG_FORMAT",
env = "LOG_FORMAT",
help = "Support logging formats: pretty, json, full, compact",
long_help = "pretty: verbose and human readable; json: not verbose and parsable; compact: not verbose and not parsable; full: verbose and not parsible",
possible_values = ["pretty", "json", "full", "compact"],
default_value = "full"
)]
pub log_format: String,
#[clap(
long,
value_name = "ID_VALIDATION",
value_enum,
env = "ID_VALIDATION",
default_value = "valid-address",
help = "Identity validation mechanism for senders (message signers)",
long_help = "Identity validation mechanism for senders (message signers)\n
no-check: all messages signer is valid, \n
valid-address: signer needs to be an valid Eth address, \n
graphcast-registered: must be registered at Graphcast Registry, \n
graph-network-account: must be a Graph account, \n
registered-indexer: must be registered at Graphcast Registry, correspond to and Indexer statisfying indexer minimum stake requirement, \n
indexer: must be registered at Graphcast Registry or is a Graph Account, correspond to and Indexer statisfying indexer minimum stake requirement"
)]
pub id_validation: IdentityValidation,
}

impl Config {
/// Parse config arguments
pub fn args() -> Self {
// TODO: load config file before parse (maybe add new level of subcommands)
let config = Config::parse();
init_tracing(config.log_format.clone()).expect("Could not set up global default subscriber for logger, check environmental variable `RUST_LOG` or the CLI input `log-level`");
config
}

/// Validate that private key as an Eth wallet
fn parse_key(value: &str) -> Result<String, WalletError> {
// The wallet can be stored instead of the original private key
let wallet = build_wallet(value)?;
let addr = wallet_address(&wallet);
info!(address = addr, "Resolved Graphcast id");
Ok(String::from(value))
}
}

This file defines the Config struct and its associated methods for handling configuration options of our Radio. This outlines the basic configuration that all Radios have to define.

The configuration options can be provided through command-line arguments, environment variables, or a combination of both. The Config struct parses and validates these options, it also initializes the tracing system for logging purposes.

Methods

  • args(): Parses and returns the configuration options from command-line arguments and environment variables.
  • parse_key(value: &str): Validates a given private key by attempting to create an Ethereum wallet with it. Returns the private key as a string if successful.

Instantiate the essentials

From here on, all following code will be in the main function. To start off, we define a name for our Radio, read the provided environment variables and instantiate our configuration struct.

// This can be any string
let radio_name = "ping-pong".to_string();
// Loads the environment variables from .env
dotenv().ok();

// Instantiates the configuration struct based on provided environment variables or CLI args
let config = Config::args();
let _parent_span = tracing::info_span!("main").entered();

Now let's instantiate a few variables that will do all the heavy lifting for us.

// Subtopics are optionally provided and used as the content topic identifier of the message subject,
// if not provided then they are usually generated based on indexer allocations
let subtopics: Vec<String> = vec!["ping-pong-content-topic".to_string()];

// GraphcastAgentConfig defines the configuration that the SDK expects from all Radios, regardless of their specific functionality
let graphcast_agent_config = GraphcastAgentConfig::new(
config.private_key.expect("No private key provided"),
config.indexer_address,
radio_name,
config.registry_subgraph,
config.network_subgraph,
config.id_validation.clone(),
config.graph_node_endpoint,
None,
Some("testnet".to_string()),
Some(subtopics),
None,
None,
None,
None,
Some(true),
// Example ENR address
Some(vec![String::from("enr:-JK4QBcfVXu2YDeSKdjF2xE5EDM5f5E_1Akpkv_yw_byn1adESxDXVLVjapjDvS_ujx6MgWDu9hqO_Az_CbKLJ8azbMBgmlkgnY0gmlwhAVOUWOJc2VjcDI1NmsxoQOUZIqKLk5xkiH0RAFaMGrziGeGxypJ03kOod1-7Pum3oN0Y3CCfJyDdWRwgiMohXdha3UyDQ")]),
None,
)
.await
.unwrap_or_else(|e| panic!("Could not create GraphcastAgentConfig: {e}"));

GraphcastAgentConfig takes in an optional vector for content topics. Here we explicitly provide a singleton vector of "ping-pong-content-topic", but you can define topics based on the radio's use case needs. If you leave the field as None, then the agent will automatically fetch your indexer's active allocations and create a list of topics in the format of radio application name + the allocated subgraph deployments' IPFS hash.

Next, we will instantiate a GraphcastAgent:

debug!("Initializing the Graphcast Agent");
let (graphcast_agent, waku_msg_receiver) = GraphcastAgent::new(graphcast_agent_config)
.await
.expect("Could not create Graphcast agent");

GraphcastAgent is the main struct through which the Radios communicate with the SDK.

And lastly for the setup part, we need to run two one-off setters for GraphcastAgent and for the incoming messages store:

// A one-off setter to load the Graphcast Agent into the global static variable
_ = GRAPHCAST_AGENT.set(graphcast_agent);

// A one-off setter to instantiate an empty vec before populating it with incoming messages
_ = MESSAGES.set(Arc::new(Mutex::new(vec![])));

Awesome, we're all set to start with the actual Radio logic now!

Sending messages

We'll define a helper function that holds the logic of sending messages to the Graphcast network:

// Helper function to reuse message sending code
async fn send_message(payload: SimpleMessage) {
if let Err(e) = GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.send_message(
// The identifier can be any string that suits your Radio logic
// If it doesn't matter for your Radio logic (like in this case), you can just use a UUID or a hardcoded string
"ping-pong-content-topic",
payload,
Utc::now().timestamp(),
)
.await
{
error!(error = tracing::field::debug(&e), "Failed to send message");
};
}

Again, the identifier that we define as ping-pong-content-topic can be any string that suits your Radio logic, if it doesn't really matter for your use case (like in the ping-pong Radio case) you can just use a UUID or a hardcoded string.

Receiving and handling messages

We now know how to send message, but how do we receive and handle message from other network participants?

After GossipAgent validates the incoming messages, we provide a custom callback handler that specifies what to do with the message. In this handler we cache the message for later aggregation and processing, but depending on your Radio use case you are free any data storage option - a database, a custom data structure or a simple vector.

Here is a simple handler that does just that:

// The handler specifies what to do with incoming messages.
// This is where you can define multiple message types and how they gets handled by the radio
// by chaining radio payload typed decode and handler functions
tokio::spawn(async move {
for msg in waku_msg_receiver {
trace!(
"Radio operator received a Waku message from Graphcast agent, now try to fit it to Graphcast Message with Radio specified payload"
);
let _ = GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.decoder::<SimpleMessage>(msg.payload())
.await
.map(|msg| {
msg.payload.radio_handler();
})
.map_err(|err| {
error!(
error = tracing::field::debug(&err),
"Failed to handle Waku signal"
);
err
});
}
});

GRAPHCAST_AGENT
.get()
.expect("Could not retrieve Graphcast agent")
.register_handler()
.expect("Could not register handler");

The main loop

Great, we're almost there! We have a way to pass messages back and forth 🏓. But sending a one-off message is no fun, we want to create some sort of scheduled and continuous logic of message exchange, and perhaps the easiest way to do that is to use a block number as cue.

We'll start listening to Ethereum blocks coming from the Graph Node and on each block we'll do a simple check - if the block number is even we'll send a "Ping" message, and if it's odd we'll process the messages we've received. After processing the messages we'll clear our store.

let mut block_number = 0;

loop {
block_number += 1;
info!(block = block_number, "🔗 Block number");
if block_number & 2 == 0 {
// If block number is even, send ping message
let msg = SimpleMessage::new(
"table".to_string(),
std::env::args().nth(1).unwrap_or("Ping".to_string()),
);
send_message(msg).await;
} else {
// If block number is odd, process received messages
let messages = AsyncMutex::new(
MESSAGES
.get()
.expect("Could not retrieve messages")
.lock()
.expect("Could not get lock on messages"),
);
for msg in messages.lock().await.iter() {
if msg.content == *"Ping" {
let replay_msg = SimpleMessage::new("table".to_string(), "Pong".to_string());
send_message(replay_msg).await;
};
}

// Clear message store after processing
messages.lock().await.clear();
}

// Wait before next block check
sleep(Duration::from_secs(5));
}

The finished Radio

Congratulations, you've now written you first full Graphcast Radio! The finished code is also available in this repo, the only important difference is in the dependencies.

That's awesome. But how do we run it?

You can start up the ping-pong Radio using cargo run.

You can spawn more instances of the ping-pong Radio and examine how they interact with each other in the terminal logs.

Now there's just one more thing to do - have fun examining the logs & be proud of yourself - you made it! 🥂 From here on out, the only limit to the Radios you can build is your own imagination.

- + \ No newline at end of file diff --git a/graphcast/sdk/registry.html b/graphcast/sdk/registry.html index d184ee7e..7b9b1f08 100644 --- a/graphcast/sdk/registry.html +++ b/graphcast/sdk/registry.html @@ -5,13 +5,13 @@ Registry Contract | GraphOps Docs - +

Registry Contract

The Graphcast Registry contracts allow an address to set a GraphcastID by calling setGraphcastID(indexer_address, graphcastID_address) as either an Indexer or an Indexer operator, or calling setGraphcastID(graphcastID_address) as the Indexer address. The relationship between an Indexer address to its GraphcastID is limited to 1:1, and cannot be set to itself. This restriction provides consistency and security for the Indexer identity to operate on Graphcast as one GraphcastID operating across Radio applications. To learn more about the registry, you can check out the Github repository.

There are also subgraphs for these registry contracts. They provide information on both the Indexer registration status and the GraphcastID registration status, specifically mapping the indexer registered on The Graph service registry contract to GraphcastID registered on the Graphcast registry contract.

Register a Graphcast ID

The Graphcast Registry contract maps Graphcast IDs to Indexers in the Graph Protocol. With a unique Graphcast ID, an Indexer can sign messages for the Radio, eliminating the need to expose their private Indexer (or Indexer Operator) key or mnemonic. This provides an added layer of security, protecting Indexers' sensitive information while enabling participation in the Graphcast Network.

Here is a brief overview of the accounts you'll be interacting with:

Account NameDescription
Indexer AccountThe existing account associated with your Graph Protocol Indexer. This may be a Token Lock Contract address, or a multisig or EOA address.
Indexer Operator AccountAn account you have registered as an Operator for your Indexer. You can use the Operator account that you pass to indexer-agent.
Graphcast ID AccountA new account that you will create that is used by Graphcast Radio instances to sign messages on behalf of your Indexer.

You'll need to use a registered Indexer Operator account for your Indexer to register a Graphcast ID.

tip

You can register multiple Operators for your Indexer in parallel. If you would prefer not to import the Operator account that you use with indexer-agent into your wallet in order to register your Graphcast ID, you can generate and register a dedicated operator account for this purpose. After you have registered your Graphcast ID, you can deregister the dedicated operator if you desire.

  1. Generate a new Ethereum account to act as your Graphcast ID, keeping the details safe. Be sure to select the Ethereum network, and save the mnemonic, as well as the address and private key for the first account. This is your Graphcast ID.
  2. Import your Indexer Operator private key into your wallet (e.g. MetaMask or Frame) in order to send a transaction to register your Graphcast ID.
  3. Navigate to the Graphcast registry contract for your preferred network and register your Graphcast ID.
  4. Call setGraphcastIDFor(indexer_address, graphcast_id), passing in your Indexer Address and Graphcast ID. Neither address should be your Indexer Operator address that is being used to sign the transaction.
  5. Submit your transaction and wait for it to be included in a block.

Registry endpoints

NetworkRegistry ContractSubgraph API
Ethereum-mainnet0x89f97698d6006f25570cd2e31737d3d22aedcbcfhttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnet
Ethereum-goerli0x26ebbA649FAa7b56FDB8DE9Ea17aF3504B76BFA0https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerli
Arbitrum-one0xfae79e8cb8fbac2408e5baf89262bd92b6ca464ahttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-one
Arbitrum-goerli0x50c2d70a41ecefe4cc54a331457ea204ecf97292https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arbitrum-go
info

Each Graphcast ID can be associated with a single Indexer. To revoke a Graphcast ID for your Indexer, call setGraphcastIDFor(indexer_address, graphcast_id) with a Graphcast ID of 0x0 using a registered Indexer Operator Account.

Subgraph APIs

Here we list out the APIs the team supports actively. For network subgraph endpoint, We recommend you to expose your indexer-service's endpoint at /network queries with authentication. You can also index and serve registry subgraph but they are not currently deployed on the decentralized network.

Here are the endpoints available on the hosted service.

Protocol NetworkGraphcast NetworkRegistry Subgraph EndpointNetwork Subgraph Endpoint
Ethereum Mainnetmainnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-mainnethttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-mainnet
Goerlitestnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-goerlihttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-goerli
Arbitrum-Onemainnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-onehttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum
Arbitrum-Goerlitestnethttps://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arbitrum-gohttps://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-goerli
- + \ No newline at end of file diff --git a/index.html b/index.html index 3ad70895..3e41c340 100644 --- a/index.html +++ b/index.html @@ -5,13 +5,13 @@ GraphOps Docs | GraphOps Docs - +
Image copyright Eko Purnomo, courtesy of the Noun Project

Deploy, monitor and scale your Indexer on Kubernetes using Launchpad

Launchpad provides a toolbox for smoothly operating your Graph Protocol Indexer on Kubernetes

Image copyright Eko Purnomo, courtesy of the Noun Project

Join Graphcast to coordinate with other Indexers using Radios

Run Radios (P2P apps) in your stack to coordinate with other Indexers via the Graphcast Network

- + \ No newline at end of file diff --git a/launchpad/advanced-tutorials/advanced-kubernetes.html b/launchpad/advanced-tutorials/advanced-kubernetes.html index a6456ceb..35112d21 100644 --- a/launchpad/advanced-tutorials/advanced-kubernetes.html +++ b/launchpad/advanced-tutorials/advanced-kubernetes.html @@ -5,13 +5,13 @@ Considerations for Kubernetes installation using FCOS | GraphOps Docs - +

Considerations for Kubernetes installation using FCOS

This guide provides a general walkthrough for installing Kubernetes using Fedora CoreOS (FCOS) as the base operating system.

Prerequisites

Before proceeding with this guide, ensure you have a solid understanding of how FCOS works and the steps required to install and enable FCOS as detailed in Install FCOS Guide.

Additionally, a clear grasp of the fundamental Kubernetes architecture will greatly aid in navigating the guidance outlined ahead.

Key components for Kubernetes Installation

To set up Kubernetes on any node, you will require the kubeadm tool and a compatible container runtime.

Key features of kubeadm include:

  • Cluster Initialization: kubeadm helps you initialize the control plane node of a Kubernetes cluster. It handles tasks like generating TLS certificates, creating the Kubernetes configuration files, and setting up the initial control plane components.

  • Node Joining: You can use kubeadm to add worker nodes (also known as worker or minion nodes) to the cluster. This involves generating the necessary credentials and configurations for nodes to communicate with the control plane.

  • Upgrades: kubeadm assists in upgrading a Kubernetes cluster to a newer version by providing commands to perform version-specific upgrade tasks.

  • Configurations: The tool helps generate the necessary Kubernetes configuration files (e.g., kubeconfig) that enable communication between different components of the cluster.

  • Networking: While kubeadm itself does not handle networking directly, it can help you integrate with various networking solutions, such as Calico, Flannel, or others.

  • Token Management: kubeadm uses tokens for securely joining nodes to the cluster. It manages the generation and distribution of these tokens.

  • Certificate Management: kubeadm manages TLS certificates required for secure communication between cluster components.

  • Configuration Validation: kubeadm performs preflight checks to validate whether the host system is ready for cluster creation or joining.

note

If you opt for a multi-node Kubernetes cluster, your Butane configurations will differ based on the specific role each node plays, whether it's a control plane or a worker node.

Butane config for Kubernetes control-planes

Running kubeadm init is the first step in setting up the Kubernetes control plane, but there are several additional tasks you need to perform to ensure that the control plane is fully functional and secure:

  1. Install kubectl: After running kubeadm init, you'll receive instructions on how to set up the kubectl command-line tool will be used to interact with the Kubernetes cluster.

  2. Set Up Network Plugin: Kubernetes requires a network plugin to enable communication between pods and nodes. Choose a network plugin that suits your needs (e.g., Calico, Flannel, Cilium) and install it on the cluster.

  3. Secure the Control Plane: Apply security best practices to the control plane components. For example, you can restrict access to the API server, enable RBAC (Role-Based Access Control), and set up authentication and authorization mechanisms.

  4. Back Up Certificates: Back up the Kubernetes certificates generated during the kubeadm init process. These certificates are critical for secure communication within the cluster.

  5. Configure Load Balancing: If you're setting up a high-availability control plane, you might need to configure load balancing for the API server to distribute traffic among multiple control plane nodes.

Remember that this list provides a general overview of the tasks you need to complete after running kubeadm init. The specific steps may vary depending on your cluster's requirements and the components you choose to install.

Butane config for Kubernetes worker nodes

On a worker node you need to perform the following steps for installing Kubernetes:

  1. Install the Container Runtime of your choice. This runtime is responsible for managing and running containers.

  2. Install the kubelet: The kubelet is the primary node agent responsible for managing containers on the node and ensuring they match the desired state described in the Kubernetes manifest files.

  3. Run kubeadm join: Once the container runtime and kubelet are installed and properly configured on the worker node, you can run the kubeadm join command to connect the node to the cluster's control plane.

  4. Network Configuration: After the node is joined to the cluster, you might need to configure network plugins (e.g., Calico, Flannel) to enable communication between nodes and pods.

- + \ No newline at end of file diff --git a/launchpad/advanced-tutorials/install-fcos.html b/launchpad/advanced-tutorials/install-fcos.html index 776df263..daa92a34 100644 --- a/launchpad/advanced-tutorials/install-fcos.html +++ b/launchpad/advanced-tutorials/install-fcos.html @@ -5,14 +5,14 @@ FCOS Installation | GraphOps Docs - +

FCOS Installation

Fedora CoreOS (FCOS) is an open-source container-focused operating system that is:

  • minimal
  • automatically updated
  • designed for clusters but can also be used standalone.

It is optimized for Kubernetes and includes technology from CoreOS Container Linux and Fedora Atomic Host, providing a secure and scalable container host for workloads.

Here are key differences between FCOS and traditional operating systems:

  • Package management: FCOS uses rpm-ostree for atomic updates, while traditional OSes use package managers like apt or yum.
  • Security: FCOS includes SELinux for enhanced security, while traditional OSes may require additional security configurations.
  • Containerization: FCOS is designed for container workloads, while traditional OSes may need extra setup for containers.
  • Automatic updates: FCOS provides automatic updates, while traditional OSes may require manual updates.
  • Minimal footprint: FCOS is optimized for running containers at scale, while traditional OSes have a broader range of software and features.

This guide takes you through the different considerations required to install and configure FCOS. NOTE the following instructions are for guidance only and do not represent step by step instructions.

Picking the right installation method

To install and configure FCOS, you need to use the coreos-installer tool. The following options for booting the OS are available:

  • Installing on bare metal:

    • Booting from live ISO using a KVM
    • Booting from PXE or iPXE
    • Booting from a container
    • Installing coreos-installer using cargo (not officially documented) but a good option for anyone running Hetzner servers or any other provider that doesn't offer PXE/iPXE boot and is not officially supporting FCOS images. The officially supported alternative for this option would be booting from live ISO. Example of coreos-installer install using cargo:
      # install packages necessary for coreos-installer
      apt update && apt upgrade
      apt install pkg-config libssl-dev libzstd-dev
      # install cargo
      curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
      source "$HOME/.cargo/env"
      cargo install coreos-installer
      # install butane
      wget https://github.com/coreos/butane/releases/download/$YOUR_BUTANE_VERSION/butane-x86_64-unknown-linux-gnu
      chmod +x butane-x86_64-unknown-linux-gnu
  • Installing on cloud servers/VMs:

    • official FCOS images can be used to provision new servers for AWS and GCP - can be found under Cloud Launchable section on downloads page

Once an installation image is picked, time to customize the system.

Create a configuration

FedoraCoreOS follows the principles of immutable infrastructure, where the operating system and application components are treated as immutable, meaning they are not modified after deployment. Updates are delivered through "automatic updates" managed by the OS , following a rolling update strategy. New instances with updated images are provisioned, and old instances are replaced.

Treating the operating system as immutable:

  • reduces configuration drift
  • enhances system reliability
  • stateful components or data can still exist outside the operating system and have their own mechanisms for persistence and updates

To customize a Fedora CoreOS (FCOS) system, a configuration file needs to be provided which will be used by Ignition to provision the system.

This file will be used to customize various aspects of the system, such as creating a user, adding a trusted SSH key, enabling systemd services, and more.

To create an ignition file:

  • define a butane config in YAML format using the specification. Your butane file should contain the following minimum sections:
    • Ignition Version: Specify the version of the Ignition configuration format to use
    • Storage Configuration: Define the disk layout and filesystems for your FCOS installation. This includes partitioning, formatting, and mounting options.
    • Passwd Users: Set up user accounts for logging into the FCOS instance.
    • Systemd Units: Configure systemd units to manage services and perform system-level tasks.
    • Networkd Units: Configure network settings, including network interfaces, IP addressing, and DNS as required.
  • These are just the basic building blocks for a Butane configuration file. Depending on your specific requirements, you may need to include additional configuration options such as users, SSH keys, systemd units, networking, etc. You can refer to the Butane documentation and the FCOS documentation for more details and advanced configuration options.
  • An example of a butane file you can get started with containing the minimum requirement:
    variant: fcos
    version: 1.4.0
    storage:
    disks:
    - device: /dev/sda
    partitions:
    - number: 1
    size: 512MiB
    label: root
    filesystem: ext4
    should_exist: true
    filesystems:
    - name: root
    mount:
    path: /
    device: /dev/disk/by-partlabel/root
    format: true
    passwd:
    users:
    - name: myuser
    ssh_authorized_keys:
    - ssh-rsa AAAA...
    systemd:
    units:
    - name: my-service.service
    enable: true
    contents: |
    [Unit]
    Description=My Service

    [Service]
    ExecStart=/usr/bin/my-service
    networkd:
    units:
    - name: 00-eth0.network
    contents: |
    [Match]
    Name=eth0

    [Network]
    DHCP=ipv4
  • use the butane cli (formally Fedora CoreOS Config Transpiler (fcct)) to convert the YAML config into a valid ignition configuration (JSON format).
    butane --pretty --strict < /tmp/config.bu > /tmp/config.ign
    # or if using podman
    sudo podman run --interactive --rm [quay.io/coreos/butane:release](http://quay.io/coreos/butane:release) --pretty --strict < /tmp/config.bu > /tmp/config.ign

Install new OS with coreos-installer

Next pass the config.ign file to coreos-installer.

coreos-installer install /dev/sda -i config.ign /tmp/config.ign

If you've run the above command the folowing will happen on the host

  1. The CoreOS Installer will install the Fedora CoreOS operating system onto the specified device (in this case, /dev/sda) using the provided Ignition configuration file (/tmp/config.ign).
  2. The installation process will partition and format the device, copy necessary files, and configure the bootloader.
  3. At this point user should reboot the system.
  4. Upon reboot, the system will start up with the newly installed Fedora CoreOS.
  5. After the initial boot, Fedora CoreOS will automatically manage updates using the rpm-ostree tool. It will fetch and apply updates in an atomic manner, ensuring a consistent and reliable system.
  6. You can log in to the system and start using Fedora CoreOS. As an immutable operating system, any modifications to the system outside of automatic updates are typically done by updating the Ignition configuration file and performing a reboot to apply the changes.

Next steps

The outlined steps mark the initial phase of grasping the workings of FCOS. For the different components that you'd need to include in your butane config to install Kuberneter follow Advanced Kubernetes.

- + \ No newline at end of file diff --git a/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config.html b/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config.html index da6079d0..0e8d35f0 100644 --- a/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config.html +++ b/launchpad/advanced-tutorials/kubeadm-upgrade-cluster-config.html @@ -5,13 +5,13 @@ Upgrading Kubernetes ClusterConfig with kubeadm | GraphOps Docs - +

Upgrading Kubernetes ClusterConfig with kubeadm

When managing a Kubernetes cluster with kubeadm, there could be scenarios where you need to update the ClusterConfiguration independently of performing version upgrades. This guide walks you through those steps.

Kubeadm maintains the cluster configuration within a ConfigMap (kubeadm-config) in the kube-system namespace. If you modify this ConfigMap, the changes won’t be applied automatically to the running control plane components.

To apply the changes to a control-plane node, you will have to perform a kubeadm upgrade after editing the kubeadm-config ConfigMap. The general steps would look like this:

Pick a control-plane node to be the first to upgrade, followed by:

1: Edit kubeadm-config ConfigMap with desired changes:

kubectl edit cm -o yaml kubeadm-config -n kube-system

2: Verify the upgrade plan:

kubeadm upgrade plan

3: Perform the upgrade:

Note: If you have local patches applied to your Kubernetes setup (ie. altering the kube-scheduler or kube-controller-manager configurations for better performance under specific workloads or hardware configurations), ensure they are included or updated appropriately during the upgrade process. To do this pass the --patches /path/to/your/patches flag to your kubeadm upgrade apply command.

kubeadm upgrade apply v1.28.3

Note: When using kubectl upgrade apply, a version must be specified. If you do not intend to upgrade the Kubernetes version, simply specify the currently installed version. This allows you to apply changes without altering the Kubernetes version.

Steps 2 and 3 will need to be performed against every single node, both control-planes and worker nodes as applicable depending on the changes. Once those steps are performed you should see etcd and kubeapi-server pods restarted. After you perform these steps, the changes you made in the kubeadm-config ConfigMap the new configuration will be active.

Note: When making modifications that affect etcd, it’s crucial to confirm that the changes have been successfully applied. Ensure that the new etcd pod is integrated into the cluster and maintains a minimum quorum of two before proceeding to apply changes to the subsequent control plane. This step is vital for sustaining the stability and resilience of the cluster during the update process.

- + \ No newline at end of file diff --git a/launchpad/advanced-tutorials/kubeadm-upgrade-nodes.html b/launchpad/advanced-tutorials/kubeadm-upgrade-nodes.html index b4cc8b7b..449654ee 100644 --- a/launchpad/advanced-tutorials/kubeadm-upgrade-nodes.html +++ b/launchpad/advanced-tutorials/kubeadm-upgrade-nodes.html @@ -5,13 +5,13 @@ Upgrading Kubernetes with kubeadm | GraphOps Docs - +

Upgrading Kubernetes with kubeadm

In this guide we will use as an example upgrading from kubernetes v1.28.1 to v1.28.3

The the control-plane nodes must be upgraded first, followed by the worker nodes.

Upgrade Control-Plane Nodes

Pick a control-plane node to be the first to upgrade, followed by:

1: Upgrading kubeadm and kubectl to the latest patch version of the desired major version:

apt-get update
apt-mark unhold kubeadm
apt-get install -y kubeadm='1.28.3-*'
apt-mark hold kubeadm

2: Verify the upgrade plan:

kubeadm upgrade plan v1.28.3

3: Drain the node:

kubectl drain <node-name> --ignore-daemonsets

4: Perform the upgrade:

kubeadm upgrade apply v1.28.3

5: Upgrade the node's CRI-O or other container runtime to an appropriate version if need be. For CRI-O that would be changing the minor version in the repositories added to /etc/apt/sources.list.d and then running:

apt-get update
apt-get install cri-o cri-o-runc
systemctl daemon-reload
systemctl restart crio

6: Upgrade kubelet and kubectl

apt-get update
apt-mark unhold kubelet
apt-mark unhold kubectl
apt-get install -y kubelet='1.28.3-*'
apt-get install -y kubectl='1.28.3-*'
apt-mark hold kubelet
apt-mark hold kubectl

7: Restart kubelet

systemctl daemon-reload
systemctl restart kubelet

8: Uncordon the node

kubectl uncordon <node-name>

9: Possibly, upgrade CNI. Particularly if it's a minor version upgrade there may be a need to update the CNI to a new version as well according to the vendor's release notes for the upgrade process

Upgrade remaining Control-Plane Nodes

For the remaining control-plane nodes, execute steps 1 to 8, one at a time but:

  • step 2 is skipped, no need to plan the upgrade anymore
  • step 4 is replaced by:
kubeadm upgrade node

Upgrade Worker Nodes

After all the control-plane nodes are upgraded, it's time to upgrade your worker nodes by following the previous steps from 1 to 8 but:

  • step 2 is skipped
  • step 4 is replaced by:
kubeadm upgrade node

Note: You can upgrade as many worker nodes in parallel as you see fit and/or find adequate to your availability requirements, as the nodes being upgraded will be drained from workloads.

- + \ No newline at end of file diff --git a/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm.html b/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm.html index 78f4ca79..972a762d 100644 --- a/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm.html +++ b/launchpad/advanced-tutorials/kubernetes-create-cluster-with-kubeadm.html @@ -5,7 +5,7 @@ Kubernetes Guide - Bootstrapping with Kubeadm | GraphOps Docs - + @@ -15,7 +15,7 @@ Kubeadm always bootstraps a cluster as a single control-plane node, and other nodes are added after the bootstrapping.

We're going to create a YAML file instead of passing all the options as flags to kubeadm. Create a cluster-config.yaml file as the following:

apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.10.0.0/16"
kubernetesVersion: "v1.25.9"
controlPlaneEndpoint: <endpoint_ip_or_dns>

where you must replace <endpoint_ip_or_dns> by the control-plane's endpoint and, optionally, choose a different podSubnet and/or serviceSubnet. Documentation on all the many configuration options available can be found here.

next, you can use kubeadm to bootstrap the cluster with:

kubeadm init --upload-certs --config cluster-config.yaml

after which, if all goes well, one should see output similar to this:

[root@demo /]# kubeadm init
I0515 19:48:51.424146 1642628 version.go:256] remote version is much newer: v1.27.1; falling back to: stable-1.25
[init] Using Kubernetes version: v1.25.9
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [demo kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 134.177.177.107]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [demo localhost] and IPs [134.177.177.107 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [demo localhost] and IPs [134.177.177.107 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 4.502328 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node demo as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node demo as control-plane by adding the taints [node-role.kubernetes.io/control-plane:NoSchedule]
[bootstrap-token] Using token: 4y3umx.fnuv7v9pgp4jn74b
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of control-plane node by running the following command on each as a root:

kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866 --control-plane --certificate-key f8902e114ef118304e561c3ecd4d0b543adc226b7a07f675f56564185ffe0c07

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use kubeadm init phase upload-certs to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.0.200:6443 --token 9vr73a.a8uxyaju799qwdjv --discovery-token-ca-cert-hash sha256:7c2e69131a36ae2a042a339b33381c6d0d43887e2de83720eff5359e26aec866

(Note: Save these kubeadm join commands presented in this output, as they contain secrets that will be required to add more nodes in future steps.)

this being a control-plane node, kubeadm will have created a kubeconfig file in /etc/kubernetes/admin.conf. A kubeconfig file is a YAML file that contains the required metadata and credentials to talk to the cluster, such as certificates/tokens and endpoint specification. Kubectl will use whatever kubeconfig file is pointed at by the KUBECONFIG environment variable, or, by default, the file in ~/.kube/config. So, as suggested in the output, we should do:

export KUBECONFIG=/etc/kubernetes/admin.conf

now kubectl should be setup to interact your the cluster. Try it by doing the following command:

[root@demo /]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
demo Ready control-plane 10s v1.25.9

Installing a CNI

Kubernetes follows a very modular API interface based design. Some of those components, like the CSI (https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/ ) CNI (https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/) or Ingress controller, come together to form the core of most kubernetes platform setups.

The CNI is the module that will take care of enabling networking between containers and services in different nodes, or setup each container's networking properties. As such, it is a critical next step in order to add more nodes to the cluster, or even run workload containers.

We have chosen to use cilium as a CNI solution, but there are many options to choose from.

We'll go ahead and fetch the cilium binary from upstream by running the following script:

CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
cd /usr/local/bin
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}

and then proceed to install cilium with default options by running:

/usr/local/bin/cilium install

Adding more control-plane nodes

If you have gone with the default topology setup, kubeadm should be instantiating etcd instances co-located with your control-plane nodes. Given that and the fact that etcd is a majority quorum based system, it's especially important that for a high-availability setup, you'll keep an odd (i.e: one, three, five, ...) number of control-plane nodes. As such, the minimum number of control-plane nodes that can offer high-availability would be three.

To add more control-plane nodes you need to first get the hosts ready for such by:

  • preparing the node OS as required
  • provisioning the required tools and software as in the first bootstrapping node (container runtime engine, kubelet, kubeadm, kubectl, ...)

and then execute, on that node, the appropriate kubeadm join command as shown in the previous kubeadm init output. For a control-plane node, that takes the form:

kubeadm join <endpoint> --token <secret> --discovery-token-ca-cert-hash sha256:<hash> --control-plane --certificate-key <secret>

Note: the kubeadm join commands shown after bootstrapping the cluster or, rather, the secrets uploaded and displayed are temporary and expire after a certain time. In case you lost them or they've expired, you can re-upload new certificates and display the new ones, on the bootstrapping control-plane node, by running:

kubeadm init phase upload-certs --upload-certs
kubeadm token create --print-join-command

Adding worker nodes

To add worker nodes to your cluster, first get them ready by:

  • preparing the node OS as required
  • provisioning the required tools and software as in the first bootstrapping node (container runtime engine, kubelet, kubeadm, kubectl, ...)

Next, you can run the appropriate kubeadm join command that was displayed at cluster bootstrap. It has the form:

kubeadm join <endpoint> --token <secret> --discovery-token-ca-cert-hash sha256:<hash>

In case you haven't saved that output, you can run (on one of the existing control-plane cluster members) the following command:

kubeadm token create --print-join-command

which will display the appropriate kubeadm join command and the relevant secrets, again.

QuickStart on Ubuntu 22.04 with CRI-O

Note: This guide assumes you'll be running these commands as root.

Prerequisites

1: Enable the required kernel modules on boot:

cat <<EOF > /etc/modules-load.d/crio-network.conf
overlay
br_netfilter
EOF

and load them now:

modprobe overlay
modprobe br_netfilter

2: Set appropriate networking sysctl toggles:

cat <<EOF > /etc/sysctl.d/99-kubernetes.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF

and apply them immediately:

sysctl --system

3: Disable swap:

swapoff -a

and take care to disable swap setup on boot, in case it is enabled (maybe on /etc/fstab)

Install packages

4: Install dependencies:

apt-get update
apt-get install -y apt-transport-https ca-certificates curl gpg

5: Set variables for CRI-O commands:

export OS="xUbuntu_22.04"
export VERSION="1.28"

6: Install CRI-O:

echo "deb [signed-by=/usr/share/keyrings/libcontainers-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list
echo "deb [signed-by=/usr/share/keyrings/libcontainers-crio-archive-keyring.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/ /" > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable:cri-o:$VERSION.list

mkdir -p /usr/share/keyrings
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-archive-keyring.gpg
curl -L https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable:/cri-o:/$VERSION/$OS/Release.key | gpg --dearmor -o /usr/share/keyrings/libcontainers-crio-archive-keyring.gpg

apt-get update
apt-get install cri-o cri-o-runc

systemctl daemon-reload
systemctl enable --now crio

7: Install kubernetes packages:

curl -fsSL https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg
echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v${VERSION}/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt-get update
apt-get install -y kubelet kubeadm kubectl

8: Hold package versions so they don't auto-update:

apt-mark hold kubelet kubeadm kubectl

Initialize the Cluster

9: Create a kubeadm config for initializing the Cluster:

cat << EOF > /tmp/cluster-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.2
taints: []
skipPhases:
- addon/kube-proxy
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
networking:
serviceSubnet: "10.96.0.0/16"
podSubnet: "10.10.0.0/16"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
node-cidr-mask-size: "20"
kubernetesVersion: "v1.28.3"
controlPlaneEndpoint: 10.110.0.2
EOF

Note: If you intend to setup a HA Cluster, you should take care of setting up the VIP beforehand (be it by creating a Load Balancer in a Cloud Provider, or using a bare-metal solution based on something like Keepalived). That VIP (or DNS) should go into the controlPlaneEndpoint, as changing this after creating the Cluster is an elaborate endeavour.

We are specifying a particular node-IP to ensure usage of the internal interface, as our node has multiple interfaces/IPs. We are also skipping the kube-proxy installation because we plan to use Cilium CNI, which will replace kube-proxy.

10: Initialize the Cluster:

kubeadm init --upload-certs --config /tmp/cluster-config.yaml

11: Copy kubeconfig to ~/.kube/config:

mkdir ~/.kube
cp /etc/kubernetes/admin.conf ~/.kube/config

12: Verify the cluster is online and ready with kubectl get nodes:

NAME                             STATUS   ROLES           AGE   VERSION
ubuntu-s-2vcpu-4gb-amd-ams3-01 Ready control-plane 85m v1.28.3

Install Cilium CNI

13: Install cilium binary

CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/master/stable.txt)
CLI_ARCH=amd64
cd /usr/local/bin
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}

14: Install cilium CNI with cilium install:

ℹ  Using Cilium version 1.14.2
🔮 Auto-detected cluster name: kubernetes
🔮 Auto-detected kube-proxy has not been installed
ℹ Cilium will fully replace all functionalities of kube-proxy

15: Wait a minute and verify it has been deployed successfully with cilium status:

    /¯¯\
/¯¯\__/¯¯\ Cilium: OK
\__/¯¯\__/ Operator: OK
/¯¯\__/¯¯\ Envoy DaemonSet: disabled (using embedded mode)
\__/¯¯\__/ Hubble Relay: disabled
\__/ ClusterMesh: disabled

Deployment cilium-operator Desired: 1, Ready: 1/1, Available: 1/1
DaemonSet cilium Desired: 1, Ready: 1/1, Available: 1/1
Containers: cilium Running: 1
cilium-operator Running: 1
Cluster Pods: 2/2 managed by Cilium
Helm chart version: 1.14.2
Image versions cilium quay.io/cilium/cilium:v1.14.2@sha256:6263f3a3d5d63b267b538298dbeb5ae87da3efacf09a2c620446c873ba807d35: 1
cilium-operator quay.io/cilium/operator-generic:v1.14.2@sha256:52f70250dea22e506959439a7c4ea31b10fe8375db62f5c27ab746e3a2af866d: 1

Congratulations! 🎉

Add more nodes

Control-Plane nodes

1: On each node, repeat the previous steps for prerequisites and package installs (steps 1 to 8)

2: Create a kubeadm join config:

cat << EOF > /tmp/join-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
token: <token>
apiServerEndpoint: <control plane endpoint>
caCertHashes:
- <ca cert hash>
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.5
controlPlane:
certificateKey: <ca certificate key>
EOF

The <token>,<ca cert hash> and <ca certificate key> will have been output by kubeadm at the initialization step (previous step 10). If you don't have them anymore or the token has expired, you can get a new certificateKey with:

kubeadm init phase upload-certs --upload-certs

and obtain the token and certificate hash with:

kubeadm token create --print-join-command

We're setting node-ip here because our nodes have multiple IPs and we want to specify which interface the services should listen on.

3: On each node, provided each join-config.yaml has been adjusted if required, join the node with:

kubeadm join --config /tmp/join-config.yaml

Worker nodes

1: On each node, repeat the previous steps for prerequisites and package installs (steps 1 to 8)

2: Create a kubeadm join config:

cat << EOF > /tmp/join-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
kind: JoinConfiguration
discovery:
bootstrapToken:
token: <token>
apiServerEndpoint: 10.110.0.2:6443
caCertHashes:
- <ca cert hash>
nodeRegistration:
kubeletExtraArgs:
cgroup-driver: systemd
node-ip: 10.110.0.7
taints: []
EOF

The <token> and <ca cert hash> will have been output by kubeadm at the initialization step (previous step 10). If you don't have them anymore or the token has expired, you can obtain them again by running on a control-plane node:

kubeadm token create --print-join-command

We're setting node-ip here because our nodes have multiple IPs and we want to specify which interface the services should listen on.

3: On each node, provided each join-config.yaml has been adjusted if required, join the node with:

kubeadm join --config /tmp/join-config.yaml

4: Label the new worker nodes, by running on a control-plane node:

kubectl label node <node_name> node-role.kubernetes.io/worker=""
- + \ No newline at end of file diff --git a/launchpad/client-side-tooling.html b/launchpad/client-side-tooling.html index 3860829a..4713c492 100644 --- a/launchpad/client-side-tooling.html +++ b/launchpad/client-side-tooling.html @@ -5,13 +5,13 @@ Client Side Tooling | GraphOps Docs - +

Client Side Tooling

Launchpad comes with an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your cluster software stack.

Client Side Stack

These tools do not run on your servers, but on your local machine. They form the command & control center that you use to send instructions to your cluster.

Installing on your local machine

Launchpad comes with a task to install local dependencies on your machine. See the Quick Start Guide for more information.

Understanding the tools in the client-side stack

Taskfile

Taskfile is a simple task runner for automation and devops tasks. It allows you to define tasks in a single file, Taskfile.yml, and run them in a consistent, cross-platform way. It can be used to automate anything from building and deploying applications to running tests and linting code. Taskfile is written in Go and is easy to install and use.

Launchpad uses task as the primary command line interface. You can also define your own tasks!

Helm

Helm is a package manager for Kubernetes that helps you manage and automate the deployment of your applications. It allows you to define, install, and upgrade Kubernetes resources in a consistent, versioned way. Helm uses a simple templating syntax to allow you to parameterize your deployments and create reusable chart templates. Helm also provides a variety of pre-built charts for popular software.

Launchpad uses Helm to deploy packages (Helm Charts) into your cluster.

Helmfile

Helmfile is a tool for managing multiple Helm charts together using a single file. It allows you to define a set of Helm releases together in a file, and then use a single command to install, upgrade, or delete all of the releases at once. This makes it easy to manage complex, multi-chart applications. Helmfile is written in Go and is easy to install and use.

Launchpad uses Helmfile to declare and manage sets of related Helm releases.

Kustomize

Kustomize lets you customize raw, template-free YAML files for multiple purposes, leaving the original YAML untouched and usable as is. It is used by helmfile for some of its features.

Kubectl

Kubectl is the command-line interface for Kubernetes that allows you to deploy, scale, and manage applications on a Kubernetes cluster. It provides a simple, easy-to-use command-line interface for performing common Kubernetes tasks such as creating and managing pods, services, and deployments.

Launchpad uses Kubectl to interact with your Kubernetes cluster.

- + \ No newline at end of file diff --git a/launchpad/design-principles.html b/launchpad/design-principles.html index d629998f..d7b1cd48 100644 --- a/launchpad/design-principles.html +++ b/launchpad/design-principles.html @@ -5,13 +5,13 @@ Design Principles | GraphOps Docs - + - + \ No newline at end of file diff --git a/launchpad/docs-map.html b/launchpad/docs-map.html index 5a16739a..51261491 100644 --- a/launchpad/docs-map.html +++ b/launchpad/docs-map.html @@ -3,15 +3,15 @@ -Launchpad Documentation | GraphOps Docs +Launchpad Documentation | GraphOps Docs - +
-

Launchpad Documentation

Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults. Suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack. Key components of Launchpad include the Launchpad Starter (graphops/launchpad-starter), which serves as the initial setup point for new deployments; Launchpad Charts (graphops/launchpad-charts), a collection of Helm Charts for blockchains and web3 applications; and Launchpad Namespaces (graphops/launchpad-namespaces), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management.

Here's a guide to help you navigate this documentation based on the information you're seeking:

First steps

Are you new to Launchpad or to Kubernetes? Here's a high-level overview of how this documentation is organised, to help you know where to look for the information you need:

Getting help

Having trouble? We'd like to help!

Getting Involved

Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. As such contributors are highly appreciated and welcome. Visit the github repos' guidance to contribute code to Launchpad Charts or Launchpad Namespaces

You can also get involved by simply attending our biweekly Launchpad Office Hours(LOH) community call on discord. You can access previous LOH recordings here.

- +

Launchpad Documentation

Launchpad is a comprehensive toolkit designed for running a Graph Protocol Indexer on Kubernetes, aimed at providing the fastest route to production deployments of multi-chain indexing software stacks with robust security and performance defaults.

Launchpad is suitable for environments ranging from a single node cluster to large scale multi-region clusters. Launchpad is also comprised of an opinionated set of tools that run on your local machine, that are layered to offer a declarative workflow for managing your deployment stack.

Key components of Launchpad include the Launchpad Starter (graphops/launchpad-starter), which serves as the initial setup point for new deployments; Launchpad Charts (graphops/launchpad-charts), a collection of Helm Charts for blockchains and web3 applications; Launchpad Namespaces (graphops/launchpad-namespaces), which are preconfigured Kubernetes Namespaces that utilize Helmfile for enhanced management; and Launchpad Taskfiles (graphops/launchpad-taskfiles), a collection of Tasks defined with Taskfile.

Here's a guide to help you navigate this documentation based on the information you're seeking:

First steps

Are you new to Launchpad or to Kubernetes? Here's a high-level overview of how this documentation is organised, to help you know where to look for the information you need:

Getting help

Having trouble? We'd like to help!

Getting Involved

Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. As such contributors are highly appreciated and welcome. Visit the github repos' guidance to contribute code to Launchpad Charts or Launchpad Namespaces

You can also get involved by simply attending our biweekly Launchpad Office Hours(LOH) community call on discord. You can access previous LOH recordings here.

+ \ No newline at end of file diff --git a/launchpad/faq.html b/launchpad/faq.html index 5c649652..f4ccf715 100644 --- a/launchpad/faq.html +++ b/launchpad/faq.html @@ -5,13 +5,13 @@ Frequently Asked Questions (FAQs) | GraphOps Docs - +

Frequently Asked Questions (FAQs)

Here are answers to some commonly asked questions. If you have a question that is not covered here, feel free to ask.


Table of Contents


Do I need a server for launchpad-starter?

Q: Do I need a server for launchpad-starter?

A: No! The Client Side Tooling that comes with Launchpad should be run on your local machine. These tools are only used to instruct your cluster what to do..


When you setup postgres, how do you configure the zfs storage parameters?

Q: When you setup postgres, how do you configure the zfs storage parameters (eg the block size, compression, etc) ?

A: Persistent workloads consume Persistent Volumes that use some specific StorageClass (an abstraction). Storage Providers in Kubernetes (like openebs/zfs-localpv), do the operational work of "implementing" those Storage Classes. It is the StorageClass object/resource that would have that particular ZFS setup controlled by parameters. Here's an example of a zfs StorageClass that sets some parameters:

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
annotations:
meta.helm.sh/release-name: openebs-zfs-storageclass
meta.helm.sh/release-namespace: storage
labels:
app.kubernetes.io/managed-by: Helm
launchpad.graphops.xyz/layer: base
launchpad.graphops.xyz/namespace: storage
name: openebs-zfs-localpv-compressed-128k
parameters:
compression: "on"
fstype: zfs
poolname: zpool
recordsize: "128k"
provisioner: zfs.csi.openebs.io
reclaimPolicy: Delete
volumeBindingMode: WaitForFirstConsumer
allowVolumeExpansion: true

Is there a way to inject a pretuned postgres config into the chart?

Q: Is there a way to inject a pretuned postgres config into the chart? Or is that a post deployment step?

A: Yes. The resource-injector chart allows us to inject a pre-tuned postgres db. This postgresql is a CRD (custom-resource-definition) that is consumed by the postgres-operator (which does the whole operational work of creating the database, setting up users, etc.. as well as replication, backups are also a possiblity). It really allows a great deal of flexibility in terms of what/how it setups the database!


Why are my stateful pods in Pending state and their expected pvc showing WaitForFirstConsumer errors?

Q: Why are my stateful pods in Pending state and their expected pvc showing WaitForFirstConsumer errors?

  Normal  WaitForPodScheduled   26m (x19 over 31m)   persistentvolume-controller  waiting for pod kube-prometheus-stack-grafana-75b74df8fb-2vwbr to be scheduled
Normal WaitForPodScheduled 47s (x102 over 26m) persistentvolume-controller waiting for pod kube-prometheus-stack-grafana-75b74df8fb-2vwbr to be scheduled

or

Normal  WaitForFirstConsumer  6m52s                   persistentvolume-controller  waiting for first consumer to be created before binding     

A: Using volumeBindingMode: WaitForFirstConsumer although needed for both openebs-rawfile-localpv and openebs-zfs-localpv seems to misbehave when there is a storageClass set as default in the cluster (the storageClass definition has the following annotation: storageclass.kubernetes.io/is-default-class: "true"). Making sure there is no default storageClass should fix this issue.


Do I need to use Cilium for Launchpad?

Q: Do I need a specific CNI (Cilium, Calico etc) in order to use Launchpad?

A: The Launchpad stack will work regardless of CNI used and in more general terms should work will all Kubernetes clusters - so you can customize your cluster how you prefer. In our Kubernetes guide we use Cilium due to its use of eBPF technology. This advanced approach offers a significant boost in efficiency, especially noticeable when managing a large number of nodes. It scales well and ensures lower latency, which is crucial for high-performance requirements. While Calico does enjoy a broader base of community support and is a strong choice with its iptables routing, Cilium has the upper advantage due to its performance and its more expansive set of features.

It's important to acknowledge that while Cilium has better performance and features than Calico, it is a bit trickier to set up. Our decision isn't influenced by Launchpad; it's purely a preference based on the operational benefits that Cilium brings to our infrastructure.


How active is the Launchpad project?

Q: How often is the Launchpad project updated?

A: The GraphOps team actively maintains the Launchpad project as it is integral to their indexing infrastructure. For details on how new versions of applications (ie. Erigon, Graph-node etc) are integrated into Launchpad Charts and Launchpad Namespaces, please refer to our Release Channels documentation. Additionally, you can learn about our criteria for supporting different Launchpad Namespaces by visiting the Supported Namespaces page. These resources provide a comprehensive view of our update frequency and decision-making processes regarding the inclusion of new features and applications.


I'm not ready to use Launchpad but I use Kubernetes

Q: Is this project relevant to me if I use Kubernetes to manage blockchain infrastructure?

A: Absolutely, the Launchpad project is designed with modularity at its core, making it highly adaptable for users who aren't ready to fully implement all of its components. You can benefit from using our Launchpad Charts with Helm to manage specific components of your blockchain infrastructure independently. Additionally, our charts are compatible with GitOps workflows, allowing you to integrate them seamlessly into your existing management practices. For further insights into how you can leverage the modular aspects of our project, please visit our Modularity documentation.

Need More Help?

If your question is not answered here, you can message us on the #kubernetes-launchpad channel on graphprotocol Discord or you can open an issue on our launchpad-namespaces or launchpad-charts repos.

- + \ No newline at end of file diff --git a/launchpad/intro.html b/launchpad/intro.html index 6343deec..140f4979 100644 --- a/launchpad/intro.html +++ b/launchpad/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +
-

Introduction

Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.

There are three major components to be aware of:

  1. Launchpad Starter (graphops/launchpad-starter): A starting point for every new Launchpad deployment
  2. Launchpad Charts (graphops/launchpad-charts): A collection of Helm Charts for blockchains and web3 apps
  3. Launchpad Namespaces (graphops/launchpad-namespaces): A collection of preconfigured Kubernetes Namespaces using Helmfile

Launchpad components

Features

  • Actively maintained by GraphOps and contributors
  • An opinionated starter (launchpad-starter) to define and manage your stack in a declarative, version controlled manner
  • A collection of Helm Charts for deploying and monitoring blockchain nodes and Graph Protocol Indexers in Kubernetes, with P2P NodePort support
  • Preconfigured namespaces for core cluster functions (logging, monitoring, etc) and major blockchains
  • An automated dependency update pipeline for graphops/launchpad-charts and graphops/launchpad-namespaces

Are you interested in exploring Launchpad but not ready to adopt the entire stack? Explore our Modularity page to discover how you can selectively integrate elements of Launchpad, like launchpad-starter, launchpad-charts, and launchpad-namespaces, to fit your specific needs without committing to a full end-to-end implementation.

Next steps

  • Visit our Documentation Map for an overview of where to find all the information you need
  • Read the Prerequisites section to understand what you need to started
  • Read the Quick Start guide to get up and running
  • Look at the repositories above on GitHub to understand how they work
- +

Introduction

Launchpad is a toolkit for running a Graph Protocol Indexer on Kubernetes. It aims to provide the fastest path to production multi-chain indexing, with sane security and performance defaults. It should work well whether you have a single node cluster or twenty. It is comprised of an opinionated set of tools on your local machine, layered over one another to provide a declarative workflow to manage your deployments stack.

There are four major components to be aware of:

  1. Launchpad Starter (graphops/launchpad-starter): A starting point for every new Launchpad deployment
  2. Launchpad Charts (graphops/launchpad-charts): A collection of Helm Charts for blockchains and web3 apps
  3. Launchpad Namespaces (graphops/launchpad-namespaces): A collection of preconfigured Kubernetes Namespaces using Helmfile
  4. Launchpad Taskfiles (graphops/launchpad-taskfiles): A collection of preconfigured Tasks using Taskfile

Launchpad components

Features

  • Actively maintained by GraphOps and contributors
  • An opinionated starter (launchpad-starter) to define and manage your stack in a declarative, version controlled manner
  • A collection of Helm Charts for deploying and monitoring blockchain nodes and Graph Protocol Indexers in Kubernetes, with P2P NodePort support
  • Preconfigured namespaces for core cluster functions (logging, monitoring, etc) and major blockchains
  • An automated dependency update pipeline for graphops/launchpad-charts and graphops/launchpad-namespaces

Are you interested in exploring Launchpad but not ready to adopt the entire stack? Explore our Modularity page to discover how you can selectively integrate elements of Launchpad, like launchpad-starter, launchpad-charts, and launchpad-namespaces, to fit your specific needs without committing to a full end-to-end implementation.

Next steps

  • Visit our Documentation Map for an overview of where to find all the information you need
  • Read the Prerequisites section to understand what you need to started
  • Read the Quick Start guide to get up and running
  • Look at the repositories above on GitHub to understand how they work
+ \ No newline at end of file diff --git a/launchpad/modularity.html b/launchpad/modularity.html index 3450ec1f..849e8b32 100644 --- a/launchpad/modularity.html +++ b/launchpad/modularity.html @@ -5,13 +5,13 @@ Modularity | GraphOps Docs - +

Modularity

The full Launchpad stack contains:

  1. Launchpad Starter (graphops/launchpad-starter): A starting point for new Launchpad deployments
  2. Launchpad Charts (graphops/launchpad-charts): A collection of Helm Charts for blockchains and web3 apps
  3. Launchpad Namespaces (graphops/launchpad-namespaces): A collection of preconfigured Kubernetes Namespaces using Helmfile

We have designed Launchpad with modularity in mind therefore users can pick what elements of the stack to utilise in their own infrastructure. As such below you can find some options.

Using launchpad-starter

Using launchpad-starter as a starter repo for your own IaaC repo is the recommended approach. launchpad-starter comes with a sane set of defaults and leverages Helmfile to declaratively specify and orchestrate releases of software in your Kubernetes cluster.

See our Quick Start guide and the launchpad-starter repo for more information.

Using launchpad-namespaces without launchpad-starter

As a user, you have the flexibility to choose whether or not to utilize the launchpad-starter repository.

If you decide not to use it, you can create your own repository that includes a straightforward helmfile.yaml file, which will orchestrate the execution of various launchpad-namespaces that align with your specific requirements. An illustrative example can be found in sample.helmfile.yaml.

By opting out of launchpad-starter, you are essentially choosing not to leverage:

  • Taskfile definitions that encompass commonly utilized tasks
  • The automated process that installs all essential local tool dependencies on your personal machine
  • The regularly refreshed sample.helmfile.yaml configuration

Using launchpad-charts without launchpad-namespaces or launchpad-starter

Users also have the choice to exclusively utilise launchpad-charts only.

For example if you wanted to run one of our charts manually without utilising helmfile:

helm repo add graphops https://graphops.github.io/launchpad-charts
helm install erigon graphops/erigon

Another option could be to utilise ArgoCD as a GitOps continuous delivery tool for managing Kubernetes applications. In this case the user would not need launchpad-starter or launchpad-namespaces and instead could use the launchpad-charts in conjunction with ArgoCD. An example of how to configure Argo with helm-charts can be found here.

- + \ No newline at end of file diff --git a/launchpad/other-resources.html b/launchpad/other-resources.html index b5b051dd..19a63e4e 100644 --- a/launchpad/other-resources.html +++ b/launchpad/other-resources.html @@ -5,13 +5,13 @@ Other Resources | GraphOps Docs - + - + \ No newline at end of file diff --git a/launchpad/prerequisites.html b/launchpad/prerequisites.html index 5d6895f9..75ab6d35 100644 --- a/launchpad/prerequisites.html +++ b/launchpad/prerequisites.html @@ -5,13 +5,13 @@ Prerequisites | GraphOps Docs - +

Prerequisites

You will need some things to use Launchpad for your infrastructure:

A basic understanding of infrastructure

We expect that you are familiar with infrastructure basics, including:

  • Linux
  • Networking, DNS
  • SSH and authentication
  • Storage fundamentals
  • Basic system administration

A basic, functional knowledge of git

The Launchpad stack advocates for declarative, version controlled infrastructure. This means the declarative state of your infrastructure will be committed into a private git repo as it evolves over time. You will need to be able to perform basic git workflows like:

  • Staging files (e.g. git add .)
  • Committing changes and pushing code (e.g. git push origin main)
  • Viewing the repo history (e.g. git show, git log, or using GitHub)

More advanced users will benefit from understanding how to pull and rebase, but this is not a requirement.

A basic understanding of operating a Graph Protocol Indexer

We will assume a basic understanding of the Graph Protocol Indexing stack, as well as some of the operational requirements of Indexing.

See Other Resources for links to helpful resources.

A client machine

Launchpad comes with a series of tools that should run on a client device. This is most likely your local machine. These tools should not run on your servers. Instead, they help you instruct your cluster of servers to do what you want.

Currently, Launchpad comes with support for Linux and MacOS clients. Windows is currently not supported, though you may be able to use Launchpad using the Windows Subsystem for Linux.

Knowledge of Kubernetes and a Kubernetes cluster

The Launchpad project requires a certain level of familiarity with Kubernetes and its intricacies. The extent of this Kubernetes expertise depends on your choice of cluster. Opting for a managed cluster from a leading Cloud Provider requires less intensive Kubernetes knowledge, as operating such a cluster is more straightforward, necessitating only a fundamental grasp of different Kubernetes resource types.

However, it's essential to note that managed clusters can be very costly when running blockchains. In contrast, selecting a self-managed cluster demands a deeper understanding, encompassing all components necessary for cluster provisioning and management. Regardless of your choice, you'll need to create a Kubernetes cluster.

For a detailed exploration of setting up a Kubernetes cluster yourself, please refer to our Kubernetes guide. If you choose to set up a self-managed cluster, you might consider using Fedora CoreOS as one of the possible options, detailed in our Fedora CoreOS guide, among other methods.

Operational knowledge of Helm

Launchpad operates in tandem with Helm and Helm Charts. However, no need to worry if you're new to Helm or chart authoring – we've got you covered. Launchpad leverages a combination of widely used and publicly available charts (ie. grafana/helm-charts), along with our in-house helm-charts, launchpad-charts. This ensures a seamless experience without the need for in-depth Helm expertise.

In addition, we abstracted some of the Helm usage by using tasks ( ie. task releases:apply or task releases:delete) as outlined in our Quick Start guide. As such, all you need is a basic understanding of Helm's core functions and release management. Writing helm-charts is not a prerequisite for most users, as we provide the necessary charts to streamline your experience.

Willingness to learn and contribute

Launchpad is a collaborative effort to create the best UX for Graph Protocol Indexers on Kubernetes. The Launchpad stack provides an opinionated set of defaults and recipes for success, but to be an advanced operator you will need to learn Kubernetes and many of the other tools in the stack. With Launchpad, you have guard rails to guide you in your journey towards mastering operating your Indexer on Kubernetes.

Please contribute back when you are able!

- + \ No newline at end of file diff --git a/launchpad/quick-start.html b/launchpad/quick-start.html index d41033bc..ed71ac3c 100644 --- a/launchpad/quick-start.html +++ b/launchpad/quick-start.html @@ -5,19 +5,19 @@ Quick Start | GraphOps Docs - +
-

Quick Start

We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.

Make sure you have all the Prerequisites before starting.

To start jump to the relevant section based on how you're using the project:

Using Launchpad end to end

This section takes you through steps of getting started using all aspects of the Launchpad project.

Install Taskfile

Launchpad has a large number of tooling dependencies that will run on your local machine. The most important dependency is Taskfile.

Follow the installation instructions for your environment and install Taskfile on your local machine before continuing.

Use launchpad-starter for your new infra repo

Next, we are going to create the repository that will contain your new infrastructure's configuration.

First, prepare a new empty repository to hold your infrastructure repo. This could be a new repository on GitHub, GitLab, BitBucket, etc.

Next, we're going to clone launchpad-starter, and then replace the existing origin remote with your new remote repository. This allows us to retain the commit history of launchpad-starter. A shared commit history will make future rebases against the upstream launchpad-starter much easier.

# Clone the starter into my-new-infra and cd into it
git clone https://github.com/graphops/launchpad-starter my-new-infra
cd my-new-infra

# Set your own remote as origin
git remote remove origin
git remote add origin git@github.com:you/your-infra.git

# Push to your new repo
git push origin main

All work on your infrastructure will take place in this new repo. We recommend carefully version controlling all changes you make to your infrastructure configuration.

Setup the launchpad dependencies

Next, we should install all of the local tooling dependencies (like Helm or Kubectl) that we will need.

We can easily do that by running the launchpad:setup command.

# You may need to use sudo for this command
task launchpad:setup

# For now, this will just run launchpad:deps, which will install all the local tooling dependencies

Connect your Local environment to your Kubernetes cluster

To connect your local machine to a Kubernetes cluster, you can follow these general steps:

Get Cluster Configuration: Make sure your kubeconfig has been added to ~/.kube/config file. If you don't have this file, you may need to ask the administrator that created the cluster for the configuration.

Verify Configuration: Open the config file in a text editor to verify that it contains the correct cluster details, including server URL, certificates, and context information.

Switch Context if working with multiple Kubernetes clusters: A context in Kubernetes is a combination of a cluster, a user, and a namespace. Use the kubectl config use-context command to set your desired context. For example:

kubectl config use-context <context-name>

Test Connection: Run a simple kubectl command to test if your local machine can connect to the cluster:

kubectl get pods

This command should list the pods in the default namespace of your cluster.

Remember that each cluster might have specific setup steps or requirements, especially if it's managed by a cloud provider. Always refer to the documentation provided by the cluster administrator or the cloud provider for detailed instructions on connecting your local machine to the cluster.

🎉 Milestone: Local environment configured!

  • We now have our own private git repo containing the declarative configuration for our cluster deployments
  • We have installed all the tooling dependencies on our local machine, which will be used to control the cluster
  • Next: Copy sample.helmfile.yaml to helmfile.yaml and edit it to select which Namespaces you would like to deploy on your Kubernetes cluster

Customize your helmfiles

To get started with Helmfile, if you don’t already have a helmfile.yaml, you can begin by copying the provided sample configuration file named sample.helmfile.yaml:

cp sample.helmfile.yaml helmfile.yaml

After copying, open helmfile.yaml in your preferred text editor to make necessary modifications. Within this file, you will find a helmfiles: section which organizes deployment configurations by namespace through multiple helmfile paths:

helmfiles:
- path: namespaces/storage.yaml
- path: namespaces/sealed-secrets.yaml
- path: namespaces/postgres-operator.yaml
- path: namespaces/ingress.yaml
- path: namespaces/monitoring.yaml
- path: namespaces/eth-sepolia.yaml
- path: namespaces/eth-mainnet.yaml
- path: namespaces/arbitrum-sepolia.yaml
- path: namespaces/graph-arbitrum-sepolia.yaml

This structure allows you to manage deployments modularly. You can add or remove entries in this list to include new namespaces or exclude those you no longer need. Each path points to a specific helmfile that defines resources to be deployed within that namespace. For instance, looking at namespaces/storage.yaml:

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-stable/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults

In the example above, values can be set to override the default configurations in a given Namespace, allowing for customization according to specific requirements. Refer to Namespaces documentation available here for more examples on how to configure them, or to see which ones are available: Namespaces.

Syncing your helmfile.yaml with the cluster

Next we need to install key non-Graph components of our stack, including monitoring and logging systems.

Let's see what the releases:apply-base task is actually doing by running task help -- releases:apply-base:

task: releases:apply-base

Apply current helmfile state filtered by all base layer services

commands:

• task releases:apply -- launchpad.graphops.xyz/layer=base

As you can see, releases:apply-base just calls releases:apply filter for all namespaces with the label launchpad.graphops.xyz/layer=base.

You can list all the releases present in the helmfile.yaml, and their labels, by running task releases:list:

NAME                            NAMESPACE               ENABLED INSTALLED       LABELS                                                                                  CHART                                           VERSION       
openebs storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs/openebs 3.8.0
openebs-zfs-localpv storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs-zfs-localpv/zfs-localpv 2.3.0
openebs-zfs-storageclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0
openebs-zfs-snapclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0
postgres-operator postgres-operator true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:postgres-operator postgres-operator-charts/postgres-operator 1.10.0
ingress-nginx ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress ingress-nginx/ingress-nginx 4.7.1
cert-manager ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress jetstack/cert-manager v1.12.3
cert-manager-resources ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress graphops/resource-injector 0.2.0
sealed-secrets sealed-secrets true true launchpad.graphops.xyz/namespace:sealed-secrets sealed-secrets/sealed-secrets 2.1

First, update the Helmfile configuration for the base namespaces. You will likely need to configure storage and ingress settings in their respective files, namespaces/storage.yaml and namespaces/ingress.yaml, by customizing them with your specific values.

In particular, the storage namespace may be a requirement even for other base namespaces, so lets install that one first by running task releases:apply -- launchpad.graphops.xyz/namespace=storage

Next, let's go ahead and install all the remaining cluster services. You will be prompted to install each namespace, with a summary of changes to be made.

task releases:apply-base

🎉 Milestone: Kubernetes and core systems running!

  • We connected to our hosts, configured them, and installed Kubernetes
  • We installed core cluster services like Prometheus, Grafana, Loki and others
  • Next: Deploy blockchain nodes and the Graph Indexing stack
tip

You can now use task indexer:forward-grafana to securely access your remote cluster's Grafana instance at http://localhost:3001

Deploy blockchain namespaces as desired

note

If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).

Launchpad comes with Namespace definitions for a number of blockchain networks, including Ethereum Mainnet, Ethereum Sepolia Testnet, Gnosis Chain Mainnet, Polygon mainnet, Abitrum One, Arbitrum Sepolia, Celo Mainnet and others. Using those Namespaces, you can easily deploy blockchain nodes for the networks you want to index into your cluster.

(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia

Make sure that your helmfile.yaml includes a path that directing to namespaces/arbitrum-sepolia.yaml. Afterward, carefully examine the settings within namespaces/arbitrum-sepolia.yaml to confirm they are accurate and align with your specific needs:

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@arbitrum/helmfile.yaml?ref=arbitrum-canary/latest
selectorsInherited: true
values:
- flavor: sepolia
helmDefaults:
<<: *helmDefaults
arbitrum-nitro:
values:
nitro:
config:
chain: 421614
parentChainUrl: <<your-l1-chain-url>> ## if setup with default ethereum ns values this would be http://proxyd-proxyd.eth-sepolia:8545
parentChainBeaconUrl: <<your-l1-consensus-layer-url>> ## if setup with defaul ethereum ns values this would be http://nimbus.eth-sepolia:5052

Deploy by syncing your cluster with the declarative helmfile.yaml:

task releases:apply -- arbitrum-sepolia

Install the Graph Arbitrum Sepolia Indexer Stack

Make sure that your helmfile.yaml includes a path that directing to namespaces/graph-arbitrum-sepolia.yaml. Afterward, carefully examine the settings within namespaces/graph-arbitrum-sepolia.yaml to confirm they are accurate and align with your specific needs.

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@graph/helmfile.yaml?ref=graph-canary/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
flavor: "arbitrum-sepolia"
- graph-network-indexer:
values:
indexerDefaults:
config:
indexer-address: "<<your indexer arbitrum address>>"
indexerAgent:
config:
public-indexer-url: "<<your public index URL>>"
graph-operator-mnemonic:
values:
resources:
### RECOMMENDED, safe to commit
sealed-secret:
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: graph-operator-mnemonic
namespace: graph-arbitrum-sepolia
spec:
template:
metadata:
name: graph-operator-mnemonic
namespace: graph-arbitrum-sepolia
type: Opaque
encryptedData:
mnemonic: <<your encrypted mnemonic>> # Generate a SealedSecret encryptedData key with the "utils:seal-secrets" task, e.g.: task utils:seal-secrets -- -n graph-arbitrum-sepolia -s graph-operator-mnemonic -k mnemonic -v "your mnemonic words"
graph-database:
values:
resources:
postgres-cr-primary-subgraph-data:
spec:
volume:
storageClass: "<<your storage class>>"
postgres-cr-indexer-metadata:
spec:
volume:
storageClass: "<<your storage class>>"

Proceed to deploy:

task releases:apply -- graph-arbitrum-sepolia

🎉 Milestone: Graph Indexer running and accessible

  • We (optionally) configured and deployed blockchain nodes into our cluster
  • We configured and deployed the Graph Indexing stack into our cluster
  • Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests

Updates

Updating launchpad-namespace changes into your stack

As new versions of key components in the stack are released, we will update launchpad-namespaces's templated definitions and the various release streams available. You can selectively inherit these updates with ease by changing the git ref as a means to track what release stream you may want, or to pin to any particular major, minor or patch version.

following latest:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-latest, or alternatively: ?ref=storage-stable/latest. +

Quick Start

We have designed Launchpad to be modular so that you can implement the whole project or parts of it as best suits your needs. Checkout this page for more info about the modularity of Launchpad.

Make sure you have all the Prerequisites before starting.

To start jump to the relevant section based on how you're using the project:

Using Launchpad end to end

This section takes you through steps of getting started using all aspects of the Launchpad project.

Install Taskfile

Launchpad has a large number of tooling dependencies that will run on your local machine. The most important dependency is Taskfile.

Follow the installation instructions for your environment and install Taskfile on your local machine before continuing.

Use launchpad-starter for your new infra repo

Next, we are going to create the repository that will contain your new infrastructure's configuration.

First, prepare a new empty repository to hold your infrastructure repo. This could be a new repository on GitHub, GitLab, BitBucket, etc.

Next, we're going to clone launchpad-starter, and then replace the existing origin remote with your new remote repository. This allows us to retain the commit history of launchpad-starter. A shared commit history will make future rebases against the upstream launchpad-starter much easier.

# Clone the starter into my-new-infra and cd into it
git clone https://github.com/graphops/launchpad-starter my-new-infra
cd my-new-infra

# Set your own remote as origin
git remote remove origin
git remote add origin git@github.com:you/your-infra.git

# Push to your new repo
git push origin main

All work on your infrastructure will take place in this new repo. We recommend carefully version controlling all changes you make to your infrastructure configuration.

Setup the launchpad dependencies

Next, we should install all of the local tooling dependencies (like Helm or Kubectl) that we will need.

We can easily do that by running the launchpad:update-deps command.

# You may need to use sudo for this command
task launchpad:update-deps

Connect your Local environment to your Kubernetes cluster

To connect your local machine to a Kubernetes cluster, you can follow these general steps:

Get Cluster Configuration: Make sure your kubeconfig has been added to ~/.kube/config file. If you don't have this file, you may need to ask the administrator that created the cluster for the configuration.

Verify Configuration: Open the config file in a text editor to verify that it contains the correct cluster details, including server URL, certificates, and context information.

Switch Context if working with multiple Kubernetes clusters: A context in Kubernetes is a combination of a cluster, a user, and a namespace. Use the kubectl config use-context command to set your desired context. For example:

kubectl config use-context <context-name>

Test Connection: Run a simple kubectl command to test if your local machine can connect to the cluster:

kubectl get pods

This command should list the pods in the default namespace of your cluster.

Remember that each cluster might have specific setup steps or requirements, especially if it's managed by a cloud provider. Always refer to the documentation provided by the cluster administrator or the cloud provider for detailed instructions on connecting your local machine to the cluster.

🎉 Milestone: Local environment configured!

  • We now have our own private git repo containing the declarative configuration for our cluster deployments
  • We have installed all the tooling dependencies on our local machine, which will be used to control the cluster
  • Next: Copy sample.helmfile.yaml to helmfile.yaml and edit it to select which Namespaces you would like to deploy on your Kubernetes cluster

Customize your helmfiles

To get started with Helmfile, if you don’t already have a helmfile.yaml, you can begin by copying the provided sample configuration file named sample.helmfile.yaml:

cp sample.helmfile.yaml helmfile.yaml

After copying, open helmfile.yaml in your preferred text editor to make necessary modifications. Within this file, you will find a helmfiles: section which organizes deployment configurations by namespace through multiple helmfile paths:

helmfiles:
- path: namespaces/storage.yaml
- path: namespaces/sealed-secrets.yaml
- path: namespaces/postgres-operator.yaml
- path: namespaces/ingress.yaml
- path: namespaces/monitoring.yaml
- path: namespaces/eth-sepolia.yaml
- path: namespaces/eth-mainnet.yaml
- path: namespaces/arbitrum-sepolia.yaml
- path: namespaces/graph-arbitrum-sepolia.yaml

This structure allows you to manage deployments modularly. You can add or remove entries in this list to include new namespaces or exclude those you no longer need. Each path points to a specific helmfile that defines resources to be deployed within that namespace. For instance, looking at namespaces/storage.yaml:

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-stable/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults

In the example above, values can be set to override the default configurations in a given Namespace, allowing for customization according to specific requirements. Refer to Namespaces documentation available here for more examples on how to configure them, or to see which ones are available: Namespaces.

Syncing your helmfile.yaml with the cluster

You can list all the releases present in the helmfile.yaml, and their labels, by running task releases:list:

NAME                            NAMESPACE               ENABLED INSTALLED       LABELS                                                                                  CHART                                           VERSION       
openebs storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs/openebs 3.8.0
openebs-zfs-localpv storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage openebs-zfs-localpv/zfs-localpv 2.3.0
openebs-zfs-storageclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0
openebs-zfs-snapclass storage true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:storage graphops/resource-injector 0.2.0
postgres-operator postgres-operator true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:postgres-operator postgres-operator-charts/postgres-operator 1.10.0
ingress-nginx ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress ingress-nginx/ingress-nginx 4.7.1
cert-manager ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress jetstack/cert-manager v1.12.3
cert-manager-resources ingress true true launchpad.graphops.xyz/layer:base,launchpad.graphops.xyz/namespace:ingress graphops/resource-injector 0.2.0
sealed-secrets sealed-secrets true true launchpad.graphops.xyz/namespace:sealed-secrets sealed-secrets/sealed-secrets 2.1

First, update the Helmfile configuration for the base namespaces. You will likely need to configure storage and ingress settings in their respective files, namespaces/storage.yaml and namespaces/ingress.yaml, by customizing them with your specific values.

In particular, the storage namespace may be a requirement even for other base namespaces, so lets install that one first by running task releases:apply -- launchpad.graphops.xyz/namespace=storage

Next, let's go ahead and install all the remaining cluster services. You will be prompted to install each namespace, with a summary of changes to be made.

task releases:apply -- monitoring
task releases:apply -- storage
task releases:apply -- sealed-secrets
task releases:apply -- postgres-operator
task releases:apply -- ingress

🎉 Milestone: Kubernetes and core systems running!

  • We connected to our hosts, configured them, and installed Kubernetes
  • We installed core cluster services like Prometheus, Grafana, Loki and others
  • Next: Deploy blockchain nodes and the Graph Indexing stack
tip

You can now use task indexer:forward-grafana to securely access your remote cluster's Grafana instance at http://localhost:3001

Deploy blockchain namespaces as desired

note

If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).

Launchpad comes with Namespace definitions for a number of blockchain networks, including Ethereum Mainnet, Ethereum Sepolia Testnet, Gnosis Chain Mainnet, Polygon mainnet, Abitrum One, Arbitrum Sepolia, Celo Mainnet and others. Using those Namespaces, you can easily deploy blockchain nodes for the networks you want to index into your cluster.

(optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia

Make sure that your helmfile.yaml includes a path that directing to namespaces/arbitrum-sepolia.yaml. Afterward, carefully examine the settings within namespaces/arbitrum-sepolia.yaml to confirm they are accurate and align with your specific needs:

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@arbitrum/helmfile.yaml?ref=arbitrum-canary/latest
selectorsInherited: true
values:
- flavor: sepolia
helmDefaults:
<<: *helmDefaults
arbitrum-nitro:
values:
nitro:
config:
chain: 421614
parentChainUrl: <<your-l1-chain-url>> ## if setup with default ethereum ns values this would be http://proxyd-proxyd.eth-sepolia:8545
parentChainBeaconUrl: <<your-l1-consensus-layer-url>> ## if setup with defaul ethereum ns values this would be http://nimbus.eth-sepolia:5052

Deploy by syncing your cluster with the declarative helmfile.yaml:

task releases:apply -- arbitrum-sepolia

Install the Graph Arbitrum Sepolia Indexer Stack

Make sure that your helmfile.yaml includes a path that directing to namespaces/graph-arbitrum-sepolia.yaml. Afterward, carefully examine the settings within namespaces/graph-arbitrum-sepolia.yaml to confirm they are accurate and align with your specific needs.

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@graph/helmfile.yaml?ref=graph-canary/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
flavor: "arbitrum-sepolia"
- graph-network-indexer:
values:
indexerDefaults:
config:
indexer-address: "<<your indexer arbitrum address>>"
indexerAgent:
config:
public-indexer-url: "<<your public index URL>>"
graph-operator-mnemonic:
values:
resources:
### RECOMMENDED, safe to commit
sealed-secret:
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: graph-operator-mnemonic
namespace: graph-arbitrum-sepolia
spec:
template:
metadata:
name: graph-operator-mnemonic
namespace: graph-arbitrum-sepolia
type: Opaque
encryptedData:
mnemonic: <<your encrypted mnemonic>> # Generate a SealedSecret encryptedData key with the "utils:seal-secrets" task, e.g.: task utils:seal-secrets -- -n graph-arbitrum-sepolia -s graph-operator-mnemonic -k mnemonic -v "your mnemonic words"
graph-database:
values:
resources:
postgres-cr-primary-subgraph-data:
spec:
volume:
storageClass: "<<your storage class>>"
postgres-cr-indexer-metadata:
spec:
volume:
storageClass: "<<your storage class>>"

Proceed to deploy:

task releases:apply -- graph-arbitrum-sepolia

🎉 Milestone: Graph Indexer running and accessible

  • We (optionally) configured and deployed blockchain nodes into our cluster
  • We configured and deployed the Graph Indexing stack into our cluster
  • Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests

Updates

Updating launchpad-namespace changes into your stack

As new versions of key components in the stack are released, we will update launchpad-namespaces's templated definitions and the various release streams available. You can selectively inherit these updates with ease by changing the git ref as a means to track what release stream you may want, or to pin to any particular major, minor or patch version.

following latest:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-latest, or alternatively: ?ref=storage-stable/latest. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-latest

following a specific major version:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-v1. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1

following a specific minor version:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-v1.2. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2

pinning to an exact version:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-v1.2.2. The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-v1.2.2

following the latest canary:

Your ?ref= would look like this, for the storage namespace: ?ref=storage-canary/latest. -The path for this Namespace, under helmfiles, would then look like:

- path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-canary/latest

We would recommend that you either follow the latest stable releases, or pin to a specific version.

note

For full implementation details and other comprehensive notes about launchpad-namespaces please visit the github repo.

Pulling in starter changes

From time to time, you may want to update your infra repo with the latest changes from our starter.

Launchpad comes with a built in task to do this:

task launchpad:pull-upstream-starter

Using Helmfile and Launchpad Charts

This guide will cover two primary ways to deploy blockchain-related resources in Kubernetes using Launchpad charts: deploying all components at once using Helmfile and deploying individual components directly using Helm charts.

Prerequisites

Ensure you have helm, helmfile and it's dependency helm-diff installed on your local machine. This guide assumes familiarity with basic Helm and Helmfile operations.

Before proceeding with this guide, make sure the following tools are installed on your local machine:

  • Helm: The package manager for Kubernetes, essential for managing and deploying applications.
  • Helmfile: A tool to help streamline the use of Helm charts, enabling better management of Helm chart configurations.
  • Helm-diff: A Helm plugin that helps visualize differences between your Helmfile configurations and what is actually deployed in your cluster. This plugin is a dependency for effectively using Helmfile.
  • (Optional)Kustomize: A tool for customizing Kubernetes configurations beyond what is available with Helm, useful for more complex deployment scenarios. +The path for this Namespace, under helmfiles, would then look like:

    - path: git::https://github.com/graphops/launchpad-namespaces.git@storage/helmfile.yaml?ref=storage-canary/latest

    We would recommend that you either follow the latest stable releases, or pin to a specific version.

    note

    For full implementation details and other comprehensive notes about launchpad-namespaces please visit the github repo.

    Pulling in starter changes

    From time to time, you may want to update your infra repo with the latest changes from our starter.

    Launchpad comes with a built in task to do this, but it does require you to handle any rebase conflicts:

    task launchpad:pull-upstream-starter

    Using Helmfile and Launchpad Charts

    This guide will cover two primary ways to deploy blockchain-related resources in Kubernetes using Launchpad charts: deploying all components at once using Helmfile and deploying individual components directly using Helm charts.

    Prerequisites

    Ensure you have helm, helmfile and it's dependency helm-diff installed on your local machine. This guide assumes familiarity with basic Helm and Helmfile operations.

    Before proceeding with this guide, make sure the following tools are installed on your local machine:

    • Helm: The package manager for Kubernetes, essential for managing and deploying applications.
    • Helmfile: A tool to help streamline the use of Helm charts, enabling better management of Helm chart configurations.
    • Helm-diff: A Helm plugin that helps visualize differences between your Helmfile configurations and what is actually deployed in your cluster. This plugin is a dependency for effectively using Helmfile.
    • (Optional)Kustomize: A tool for customizing Kubernetes configurations beyond what is available with Helm, useful for more complex deployment scenarios. This guide assumes you are familiar with basic operations of Helm and Helmfile.

    Deploying using Launchpad-charts directly

    If you prefer to use individual components of Launchpad, such as Launchpad Charts, you can add the Launchpad Helm repository and install charts directly:

    helm repo add graphops https://graphops.github.io/launchpad-charts
    helm install my-release graphops/<chart-name> --values <your-values-override.yaml>

    Key Consideration

    Before proceeding, it is important to note that most Kubernetes clusters do not come pre-configured with a Container Storage Interface (CSI) for handling storage volumes. This guide relies on the ability to create storage volumes. It is also necessary to have an Ingress controller installed and configured, as it is essential for managing traffic to and from your applications.

    Deploying using Helmfile

    For a comprehensive deployment, managing all related Helm releases and their values via a single Helmfile offers simplicity and maintainability. This method is particularly effective when deploying complex stacks.

    Deploy blockchain namespaces as desired

    note

    If you have existing external blockchain nodes that you would like to use instead of deploying them into your cluster, you can skip this section, but make sure that you can access those nodes securely (e.g. via an internal network, or using HTTPS and authentication).

    (optional, arbitrum-sepolia) Install Arbitrum Nitro and Proxyd for Arbitrum Sepolia

    The following helmfile.yaml provides an example configuration for deploying Arbitrum Nitro on the Arbitrum Sepolia network. For an easier setup process, we recommend utilizing the Launchpad Arbitrum namespace, which includes most of the necessary configurations pre-defined for your convenience.

    # helmfile.yaml
    repositories:
    - name: graphops
    url: https://graphops.github.io/launchpad-charts

    releases:
    - name: arbitrum-nitro
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/arbitrum-nitro
    version: 0.3.4
    values:
    - nitro:
    config:
    chain: 421614 # determines Arbitrum network - 421614 Sepolia
    parentChainUrl: http://your-eth-sepolia-url:8545 ## changeme
    parentChainBeaconUrl: http://your-eth-consensus-node-url:5052 ## changeme

    volumeClaimSpec:
    resources:
    requests:
    # -- The amount of disk space to provision for Arbitrum Nitro
    storage: 1Ti
    # -- The storage class to use when provisioning a persistent volume for Arbitrum-Nitro
    storageClassName: openebs-rawfile-localpv # change me as needed

    restoreSnapshot:
    enabled: false

    extraLabels:
    app.kubernetes.io/workload-type: blockchain-stateful
    app.kubernetes.io/blockchain: arbitrum-nitro

    # if using Prometheus for monitoring:
    prometheus:
    serviceMonitors:
    enabled: true

    - name: proxyd-nitro
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/proxyd
    version: 0.5.3
    values:
    - backends:
    arbitrum-nitro:
    enabled: true
    # -- Define the RPC URL for the backend
    rpcUrl: http://arbitrum-nitro:8547
    # -- Define the WS URL for the backend
    wsUrl: ws://arbitrum-nitro:8548
    # -- Define additional configuration keys for the backend (see [proxyd config](https://github.com/ethereum-optimism/optimism/blob/5d309e6a6d5e1ef6a88c1ce827b7e6d47f033bbb/proxyd/example.config.toml#L47))
    extraConfig:
    consensus_skip_peer_count: true
    # -- Define which backend groups the backend is part of
    groups:
    - main

    # if using Prometheus and Grafana for monitoring:
    prometheus:
    serviceMonitors:
    enabled: true

    grafana:
    dashboards: true

    Deploy by syncing your cluster with the declarative helmfile.yaml:

    helmfile -f path/to/helmfile.yaml sync

    Install the Graph Arbitrum Sepolia Indexer Stack

    This section of the guide does not include the setup for subgraph-data and indexer-metadata PostgreSQL databases necessary for graph-node and indexer-agent. You are encouraged to explore managed solutions, use Bitnami's chart, or deploy Zalando's Operator as part of the Launchpad Namespaces which includes a ready-to-use Postgres setup or independently.

    Include the necessary configurations for graph-node and indexer-agent in your helmfile.yaml as shown in the previous sections, adjusting PostgreSQL references and other settings to fit your specific requirements.

    releases:
    - name: graph-node
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/graph-node
    version: 0.5.3
    values:
    # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-node
    - graphNodeDefaults:
    env:
    # Graph Node configuration
    IPFS: "https://ipfs.network.thegraph.com"
    GRAPH_ALLOW_NON_DETERMINISTIC_FULLTEXT_SEARCH: "true"
    # Database configuration
    PRIMARY_SUBGRAPH_DATA_PGHOST: <your-subgraph-data-postgresql-host> ## change me
    PRIMARY_SUBGRAPH_DATA_PGPORT: 5432
    PRIMARY_SUBGRAPH_DATA_PGDATABASE: <your-subgraph-data-postgresql-db> ## change me

    # Database sensitive/secret information
    secretEnv:
    PRIMARY_SUBGRAPH_DATA_PGUSER:
    secretName: <your-secret-containing-subgraph-data-postgresql-username>
    key: username
    PRIMARY_SUBGRAPH_DATA_PGPASSWORD:
    secretName: <your-secret-containing-subgraph-data-postgresql-password>
    key: password

    graphNodeGroups:
    index:
    replicaCount: 1 # scale me
    query:
    replicaCount: 1 # scale me

    chains:
    mainnet:
    enabled: true
    shard: primary
    provider:
    - label: eth-mainnet
    url: <your-eth-mainnet-RPC> ## change me
    features: [archive, traces]

    arbitrum-sepolia:
    enabled: true
    shard: primary
    provider:
    - label: arbitrum-sepolia
    url: http://proxyd-proxyd.arbitrum-sepolia:8545
    features: [archive, traces]

    # if using Prometheus and Grafana for monitoring:
    prometheus:
    serviceMonitors:
    enabled: true

    grafana:
    dashboards: true
    datasources: true

    - name: graph-network-indexer
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/graph-network-indexer
    version: 0.2.5
    values:
    # This is a values.yaml override file for https://github.com/graphops/launchpad-charts/tree/main/charts/graph-network-indexer
    - indexerDefaults:
    config:
    ethereum: "http://proxyd-proxyd.arbitrum-sepolia:8545"
    ethereum-network: "arbitrum-sepolia"
    network-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia"
    graph-node-query-endpoint: "http://graph-node-query:8000"
    graph-node-status-endpoint: "http://graph-node-block-ingestor:8030/graphql"
    postgres-host: "<your-indexer-metadata-postgresql-host>" ## change me
    postgres-database: "<your-indexer-metadata-postgresql-db>" ## change me

    indexerAgent:
    config:
    collect-receipts-endpoint: "https://gateway-testnet-arbitrum.network.thegraph.com/collect-receipts"
    network-subgraph-deployment: "QmT8UDGK7zKd2u2NQZwhLYHdA4KM55QsivkE3ouCuX6fEj" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md
    epoch-subgraph-endpoint: "https://api.thegraph.com/subgraphs/name/graphprotocol/arbitrum-sepolia-ebo" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md
    epoch-subgraph-deployment: "QmTpu2mVquoMpr4SWSM77nGkU3tcUS1Bhk1sVHpjDrAUAx"
    graph-node-admin-endpoint: "http://graph-node-block-ingestor:8020"
    public-indexer-url: "<your-public-indexer-url>" ## change me
    index-node-ids: "graph-node-index-0" # if more than one graph-node index, specify as comma delimited list ie "graph-node-index-0, graph-node-index-1"

    secretEnv:
    INDEXER_AGENT_MNEMONIC:
    secretName: <your-secret-containing-your-graph-operator-mnemonic>
    key: mnemonic
    INDEXER_AGENT_POSTGRES_USERNAME:
    secretName: <your-secret-containing-indexer-metadata-postgresql-username>
    key: username
    INDEXER_AGENT_POSTGRES_PASSWORD:
    secretName: <your-secret-containing-indexer-metadata-postgresql-password>
    key: password


    indexerService:
    replicas: 1 # scale me

    config:
    client-signer-address: "0xe1EC4339019eC9628438F8755f847e3023e4ff9c" # find at https://github.com/graphprotocol/indexer/blob/main/docs/networks.md

    secretEnv:
    INDEXER_SERVICE_MNEMONIC:
    secretName: <your-secret-containing-your-graph-operator-mnemonic>
    key: mnemonic
    INDEXER_SERVICE_POSTGRES_USERNAME:
    secretName: <your-secret-containing-indexer-metadata-postgresql-username>
    key: username
    INDEXER_SERVICE_POSTGRES_PASSWORD:
    secretName: <your-secret-containing-indexer-metadata-postgresql-password>
    key: password
    # if using Prometheus and Grafana for monitoring:
    prometheus:
    serviceMonitors:
    enabled: true

    grafana:
    dashboards: true

    - name: subgraph-radio
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/subgraph-radio
    version: 0.2.8
    values:
    - env:
    GRAPH_NODE_STATUS_ENDPOINT: http://graph-node-block-ingestor:8030/graphql
    INDEXER_MANAGEMENT_SERVER_ENDPOINT: http://graph-network-indexer-agent:8000
    GRAPHCAST_NETWORK: "testnet"
    REGISTRY_SUBGRAPH: https://api.thegraph.com/subgraphs/name/hopeyen/graphcast-registry-arb-se
    NETWORK_SUBGRAPH: https://api.thegraph.com/subgraphs/name/graphprotocol/graph-network-arbitrum-sepolia
    secretEnv:
    MNEMONIC:
    secretName: <your-secret-containing-your-graph-operator-mnemonic>
    key: mnemonic

    - name: graph-toolbox
    namespace: arbitrum-sepolia
    createNamespace: true
    chart: graphops/graph-toolbox
    version: 0.1.0
    values:
    - config:
    graphNode:
    # -- URL to Graph Node Admin API
    adminApiUrl: http://graph-node-block-ingestor:8020
    existingConfigMap:
    # -- The name of the ConfigMap that contains your Graph Node config.toml
    configMapName: graph-node-config
    # -- The name of the data key in the ConfigMap that contains your config.toml
    configFileKey: config.toml
    indexer:
    # -- URL to Indexer Agent Management Server
    indexerAgentManagementUrl: http://graph-network-indexer-agent:8000

    aliases:
    graphman: graphman --config /graphman-config/config.toml
    indexer: graph-indexer indexer
    psql-primary-subgraph-data: >
    PGPASSWORD=$PRIMARY_SUBGRAPH_DATA_PGPASSWORD psql -w -U $PRIMARY_SUBGRAPH_DATA_PGUSER -d "host=$PRIMARY_SUBGRAPH_DATA_PGHOST,port=$PRIMARY_SUBGRAPH_DATA_PGPORT,dbname=$PRIMARY_SUBGRAPH_DATA_PGDATABASE"
    psql-indexer-metadata: >
    PGPASSWORD=$INDEXER_METDATA_PGPASSWORD psql -w -U $INDEXER_METADATA_PGUSER -d "host=$INDEXER_METADATA_PGHOST,port=$INDEXER_METADATA_PGPORT,dbname=$INDEXER_METADATA_PGDATABASE"

    env:
    PRIMARY_SUBGRAPH_DATA_PGHOST: <your-subgraph-data-postgresql-host> ## change me
    PRIMARY_SUBGRAPH_DATA_PGPORT: 5432
    PRIMARY_SUBGRAPH_DATA_PGDATABASE: <your-subgraph-data-postgresql-db> ## change me
    INDEXER_METADATA_PGHOST: <your-indexer-metadata-postgresql-host> ## change me
    INDEXER_METADATA_PGPORT: 5432
    INDEXER_METADATA_PGDATABASE: <your-indexer-metadata-postgresql-db> ## change me

    secretEnv:
    PRIMARY_SUBGRAPH_DATA_PGUSER:
    secretName: <your-secret-containing-subgraph-data-postgresql-username> ## change me
    key: username
    PRIMARY_SUBGRAPH_DATA_PGPASSWORD:
    secretName: <your-secret-containing-subgraph-data-postgresql-password> ## change me
    key: password
    INDEXER_METADATA_PGUSER:
    secretName: <your-secret-containing-indexer-metadata-postgresql-username> ## change me
    key: username
    INDEXER_METDATA_PGPASSWORD:
    secretName: <your-secret-containing-indexer-metadata-postgresql-username> ## change me
    key: password

    Proceed to deploy:

    helmfile -f path/to/helmfile.yaml sync

    🎉 Milestone: Graph Indexer running and accessible

    Once your deployments are successfully applied, your Graph Indexer should be operational, with blockchain nodes (if deployed) and the Graph Indexing stack running in your Kubernetes cluster.

    • We (optionally) configured and deployed blockchain nodes into our cluster
    • We configured and deployed the Graph Indexing stack into our cluster
    • Next: Use the remote-toolbox to allocate to subgraphs and begin serving requests
- + \ No newline at end of file diff --git a/launchpad/release-channels.html b/launchpad/release-channels.html index ffb79323..ae3419c7 100644 --- a/launchpad/release-channels.html +++ b/launchpad/release-channels.html @@ -5,13 +5,13 @@ Release Channels | GraphOps Docs - +

Release Channels

Due to the intricate nature of managing indexing operations for multiple blockchains and their associated dependencies, the Launchpad project is a complex system with numerous interdependencies.

For a reminder of the various components within Launchpad and their intricate connections, we recommend revisiting our Intro.

This guide offers a comprehensive walkthrough, outlining the steps, automated and manual, required to introduce a new version release of an application, ie. Erigon, into the launchpad-charts repository as a canary release and ultimately transitioning it to a stable state within its designated launchpad-namespace, such as Ethereum.

The diagram below provides a visual representation illustrating the interdependence and impact of various components and workflows.

Release Channels Flow

From new version to launchpad-namespaces stable

Below you can find a more comprehensive breakdown of the process, divided into automated workflows within launchpad-charts and launchpad-namespaces, as well as manual operator steps. This process guides the transition of a new application version from the initial launchpad-charts canary release to its eventual stability within the corresponding launchpad-namespaces. For this walkthrough we will use Erigon as an example.

launchpad-charts

  • On each run, bot looks-up Erigon tags and upon finding a new version, opens a PR into launchpad-charts/charts/erigon
  • The new PR triggers a workflow that publishes a new pre-release into the repo.
  • Another workflow runs and adds the newly released canary chart to the canary Helm repo index

launchpad-namespaces

  • On each run, bot checks for new chart releases and upon finding one, pushes an update branch and opens a new PR to namespaces
  • Bot runs again, auto-merges the PR and creates a tag
  • Workflow runs, updates semver tags

operator

  • Tests the new canary chart release to verify it is working properly, if it is adds commit to PR to set the stable chart release version. Following the merge of this PR, the new stable chart release is automatically issued in draft mode. This step provides the operator with an opportunity to review and manually publish the final release, ensuring precise control and quality assurance in the deployment process.
  • Run task releases:apply -- eth-sepolia which should pick changes from latest ethereum canary tag that would contain new erigon canary chart version (after renovate has run and has picked those up, which it does in 15m intervals).
  • If the previous task runs successfully and workloads appear healthy, the operator updates their helmfile reference to ethereum-canary/latest for eth-mainnet namespace and runs task releases:apply -- eth-mainnet.
  • If task releases:apply -- eth-mainnet succeeds and all workloads are healthy, operator manually tags the ethereum namespace as stable
note

Manually tagging a namespace as stable is an intentional process. Our aim is to ensure that workloads undergo comprehensive testing before being tagged as stable which signals to users readiness for running on mainnet.

Alongside the ability to choose between canary or stable releases based on user risk preferences, we've also enabled the capability to manually override a specific chart version during namespace deployment.

  - path: git::https://github.com/graphops/launchpad-namespaces.git@ethereum/helmfile.yaml?ref=ethereum-canary/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
flavor: "sepolia"
erigon:
chartVersion: "0.8.1" # to override the chart version the namespace is setup with
values:
statefulNode:
jwt:
existingSecret:
name: jwt
key: jwt
nimbus:
values:
nimbus:
jwt:
existingSecret:
name: jwt
key: jwt

Similarly to being able to override chartVersion, users have the ability to override chartUrl to specify a self-maintained chart, or a chart maintained by a different organisation.

- + \ No newline at end of file diff --git a/launchpad/supported-namespaces.html b/launchpad/supported-namespaces.html index 0463e328..458661d6 100644 --- a/launchpad/supported-namespaces.html +++ b/launchpad/supported-namespaces.html @@ -5,13 +5,13 @@ Supported Namespaces | GraphOps Docs - +

Supported Namespaces

Launchpad includes a number of prepackaged Kubernetes namespaces (see Launchpad Namespaces repo), which in turn reference Helm Charts in the Launchpad Charts repository, as well as third-party Charts. GraphOps maintains support for these namespaces, meaning that we:

  • Track upstream releases and test them
  • Move these releases through canary and stable release channels for both launchpad-charts and launchpad-namespaces
  • Evolve the Launchpad stack to meet the evolving operational needs of these applications
  • Offer support for operators experiencing challenges with these namespaces

This strategy is rooted in GraphOps' active usage of these namespaces and the applications within them. For more details on how a new application makes it from a canary release all the way to a stable launchpad-namespace please check out our release-channels guide

We welcome third-party contributors to add support for additional namespaces and applications.

Using custom releases and deploying sets of applications not defined in launchpad-namespaces

Launchpad's architecture is designed to be highly flexible and does not constrain you to deploying launchpad-namespaces.

To incorporate releases not covered within a namespace, you can utilize the helmfile.yaml that you generated during the Quick Start process.

For instance, if you required the implementation of kafka-operator for specific workloads, you would add the following code to the repositories and releases sections:

repositories:
- name: strimzi
url: https://strimzi.io/charts/

releases:
- name: strimzi
namespace: kafka
createNamespace: true
chart: strimzi/strimzi-kafka-operator
missingFileHandler: Warn
values:
- watchAnyNamespace: true
note

If you're considering the integration of a blockchain that currently falls outside the scope of Launchpad's Supported Namespaces, it's worth noting that including a new release in your helmfile.yaml might require an extra step of creating a custom Helm Chart. While certain publicly available charts (ie. Teku, Lighthouse) might be regularly maintained by external contributors, you might encounter cases where other charts are not readily supported.

- + \ No newline at end of file diff --git a/launchpad/tutorials/arbitrum-archive-kubernetes-guide.html b/launchpad/tutorials/arbitrum-archive-kubernetes-guide.html index 91b7d657..3c2844fc 100644 --- a/launchpad/tutorials/arbitrum-archive-kubernetes-guide.html +++ b/launchpad/tutorials/arbitrum-archive-kubernetes-guide.html @@ -5,13 +5,13 @@ Arbitrum Archive Mainnet Node Guide | GraphOps Docs - +

Arbitrum Archive Mainnet Node Guide

Introduction

This guide provides an end-to-end walkthrough for setting up an Indexer on the Graph Protocol Mainnet for the Arbitrum One network. It details the steps for deploying both Arbitrum Classic and Arbitrum Nitro.

Architecture Overview

Arbitrum Nitro includes a built-in proxy that automatically redirects queries for blocks older than its genesis to the Arbitrum Classic node.

Setup Environment

This guide assumes operation within a Kubernetes cluster:

  • For setups using Launchpad, follow the steps outlined here.
  • For setups using Helm only, refer to the instructions here.

Prerequisites

Before you begin, ensure you have the following:

  • An ethereum-mainnet RPC endpoint.
  • CPU: 4 Cores / 8 Threads.
  • RAM: 16 GiB.
  • Storage: 3 TiB NVMe SSD.

Kubernetes Cluster Using Launchpad

Ensure all Launchpad Prerequisites are met before proceeding.

Initial Configuration

  1. Confirm your cluster is operational by consulting our Quick Start guide.
  2. In your private infra repo pull in latest launchpad-starter changes:
task launchpad:pull-upstream-starter

Data Restoration and Configuration

  1. Blockchain node data snapshot The Arbitrum-One namespace contains default configurations for both Arbitrum Classic and Arbitrum Nitro to download data from a snapshot. The snapshots have been set by default:
restoreSnapshot:
enabled: true
snapshotUrl: https://snapshot.arbitrum.foundation/arb1/classic-archive.tar
restoreSnapshot:
enabled: true
snapshotUrl: https://snapshot.arbitrum.foundation/arb1/nitro-archive.tar

you can overwrite both of these values in <your-private-copy-of-launchpad-starter>/namespaces/arbitrum-one.yaml

  1. Connect to eth-mainnet-rpc-node Both Arbitrum Classic and Arbitrum Nitro connect to l1, and recent versions of Arbitrum Nitro require connection to a beacon chain RPC:
arbitrum-classic:
values:
arbitrum:
config:
parentChainUrl: http://your-eth-mainnet-url:8545 ## changeme
arbitrum-nitro:
values:
nitro:
config:
parentChainUrl: http://your-eth-mainnet-url:8545 ## changeme
parentChainBeaconUrl: http://your-eth-consensus-node-url:5052 ## changeme

Deploying with helm in a Kubernetes Cluster Outside Launchpad

You can find blockchain related helm packages here

Deploy Arbitrum Classic

We'll first deploy Arbitrum Classic as Arbitrum Nitro needs to connect to the Classic endpoint.

  1. Prepare your configuration file, arbitrum-classic.yaml, with the necessary RPC URL and optional snapshot URL:
arbitrum:
config:
parentChainUrl: http://your-eth-mainnet-url:8545 ## changeme
restoreSnapshot:
enabled: true
snapshotUrl: https://a-link-to-your-snapshot-archive.tar.gz ## specify only if overriding default
  1. Add the Helm repository and deploy:
helm repo add graphops http://graphops.github.io/launchpad-charts
helm install --dry-run arbitrum-classic graphops/arbitrum-classic:latest --namespace arbitrum-one --values arbitrum-classic.yaml

Deploy Arbitrum Nitro

  1. Prepare your configuration file, arbitrum-nitro.yaml, with the necessary RPC URLs, classic node URLs and optional snapshot URL:
nitro:
config:
parentChainUrl: http://your-eth-mainnet-url:8545 ## changeme
parentChainBeaconUrl: http://your-eth-consensus-node-url:5052 ## changeme
classicUrl: http://arbitrum-classic:8547/ # replace `arbitrum-classic` with the name of your arbitrum-classic release deployed at the previous step
restoreSnapshot:
enabled: true
snapshotUrl: https://a-link-to-your-snapshot-archive.tar.gz ## specify only if overriding default
  1. Deploy using helm:
helm install --dry-run arbitrum-nitro graphops/arbitrum-classic:latest --namespace arbitrum-one --values arbitrum-nitro.yaml
- + \ No newline at end of file diff --git a/launchpad/tutorials/celo-archive-kubernetes-guide.html b/launchpad/tutorials/celo-archive-kubernetes-guide.html index b0fe658d..b6269193 100644 --- a/launchpad/tutorials/celo-archive-kubernetes-guide.html +++ b/launchpad/tutorials/celo-archive-kubernetes-guide.html @@ -5,13 +5,13 @@ Celo Archive Mainnet Node Guide | GraphOps Docs - +

Celo Archive Mainnet Node Guide

Introduction

This guide is intended to be an end to end walk-through of running an Celo Archive Mainnet Node in an existing Kubernetes cluster.

Sync Duration

Sync times are reported to be in the range of 4 days on dedicated hardware.

Setup Environment

This guide assumes operation within a Kubernetes cluster:

  • For setups using Launchpad, follow the steps outlined here.
  • For setups using Helm only, refer to the instructions here.

Prerequisites

For Celo workload you will need:

  • CPU: 4 Cores / 8 Threads
  • RAM: 16 GiB
  • Storage: 3 TiB NVMe SSD

If running a Kubernetes cluster using Launchpad

All the Launchpad Prerequisites apply if running a Kubernetes cluster using Launchpad, so be sure to read them first. This guide can be used with existing Kubernetes clusters as well.

  1. Confirm your cluster is operational by consulting our Quick Start guide.

  2. In your private infra repo pull in latest launchpad-starter changes:

task launchpad:pull-upstream-starter
  1. Check default values and update as needed in <your-private-copy-of-launchpad-starter>/helmfiles/namespaces/celo-mainnet.yaml

  2. Deploy celo-mainnet namespace

task releases:apply celo-mainnet

Deploying with helm in a Kubernetes cluster outside Launchpad

You can find blockchain related helm packages here

  1. Prepare your configuration file, celo-mainnet.yaml, to override chart default values as necessary. Example:
celo:
extraArgs:
- --verbosity 3
- --syncmode full
- --gcmode archive
- --txlookuplimit=0
- --cache.preimages
- --http.corsdomain=*
- --ws # enable ws
- --http.api=eth,net,web3,debug,admin,personal
  1. Add the Helm repository and deploy:
helm repo add graphops http://graphops.github.io/launchpad-charts
helm install --dry-run celo graphops/celo:latest --namespace celo-mainnet --values celo-mainnet.yaml
- + \ No newline at end of file diff --git a/launchpad/tutorials/monitoring-stack-with-HA.html b/launchpad/tutorials/monitoring-stack-with-HA.html index ed60c783..d13bb2b6 100644 --- a/launchpad/tutorials/monitoring-stack-with-HA.html +++ b/launchpad/tutorials/monitoring-stack-with-HA.html @@ -5,14 +5,14 @@ Deploying a Monitoring stack with HA | GraphOps Docs - +

Deploying a Monitoring stack with HA

Prerequisites

  • A fully functional working Kubernetes cluster
  • Two object storage buckets: one for Logs data, used by Loki, and one for Metrics data, used by Thanos

Configuring Loki for HA

Launchpad uses the loki-distributed release for setting up Loki, which can be configured according to its values interface (as seen here)

Note: The example setups we'll show will be based on an architecture that makes use of the following components: querier, distributor, ingester, queryFrontend, gateway, compactor, ruler, indexGateway. Different architectures are possible so adjust to your needs as necessary.

For an HA setup, deploying several components with multiple replicas each, loki-distributed values can be set like in the following example snippet:

querier:
replicas: 2
maxUnavailable: 1
distributor:
replicas: 3
maxUnavailable: 2
ingester:
replicas: 3
maxUnavailable: 2
queryFrontend:
replicas: 2
maxUnavailable: 1
gateway:
replicas: 2
maxUnavailable: 1
compactor:
kind: Deployment
replicas: 1
enabled: true
ruler:
enabled: true
replicas: 2
maxUnavailable: 1
indexGateway:
enabled: true
replicas: 2
maxUnavailable: 1
loki:
structuredConfig:
ruler:
ring:
kvstore:
store: memberlist
ingester:
lifecycler:
ring:
replication_factor: 2

Note: If you use a compactor, only one will run at a time and it's not critical so you don't really need more than one instance of it.

Besides increasing the number of replicas, ingester replication_factor is of particular relevance as the Distributor will distribute the write load to multiple ingesters and will require a quorum of them to have acknowledged the write (replication_factor / 2 + 1). For lowering the chances of loosing logs, a replication_factor of at least two should be used (Loki default is 3).

Loki's storage fundamentally requires object storage, regardless of whether HA is used or if there's more than one replica for any component, as multiple components need to share this storage.

Object storage can be setup as shown in the following snippet:

loki:
structuredConfig:
storage_config:
tsdb_shipper:
active_index_directory: /var/loki/data/tsdb-index
cache_location: /var/loki/data/tsdb-cache
index_gateway_client:
# only applicable if using microservices where index-gateways are independently deployed.
# This example is using kubernetes-style naming.
server_address: dns:///loki-loki-distributed-index-gateway.monitoring.svc.cluster.local:9095
shared_store: s3
aws:
bucketnames: <<bucket>>
endpoint: <<endpoint>>
region: <<region>>
access_key_id: "${S3_ACCESS_KEY_ID}"
secret_access_key: "${S3_SECRET_ACCESS_KEY}"
insecure: false
sse_encryption: false
s3forcepathstyle: true
schema_config:
configs:
# New TSDB schema below
- from: "2024-01-01"
index:
period: 24h
prefix: index_
object_store: s3
schema: v12
store: tsdb
query_scheduler:
# the TSDB index dispatches many more, but each individually smaller, requests.
# We increase the pending request queue sizes to compensate.
max_outstanding_requests_per_tenant: 32768
querier:
max_concurrent: 16
compactor:
shared_store: s3
querier:
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
distributor:
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
ingester:
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
compactor:
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY

Here we are setting up object storage in the structuredConfig section, but to keep the credentials secret, we are adding env vars from secrets to several components and an extra command line argument -config.expand-env=true, the purpose of which is being able to use ENV vars in the structuredConfig section. With that argument, the components will replace the values such as ${S3_ACCESS_KEY_ID} by the corresponding ENV var value upon processing the config.

Besides setting up object storage, we're also configuring TSDB index schema in substitution of the default boltdb-shipper, which is a more recent and more efficient alternative to it. Doing so is not mandatory but recommended.

Putting it all together and adding a few more standard options such as persistence (PVC) to some components, and enabling ServiceMonitor and Prometheus Rules, a Launchpad Monitoring namespace helmfile.yaml Loki config could look like:

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@monitoring/helmfile.yaml?ref=monitoring-stable/latest
selectorsInherited: true
values:
- features: [ metrics, logs]
loki:
values:
loki:
structuredConfig:
ingester:
# Disable chunk transfer which is not possible with statefulsets
# and unnecessary for boltdb-shipper
max_transfer_retries: 0
chunk_idle_period: 1h
chunk_target_size: 1536000
max_chunk_age: 1h
storage_config:
tsdb_shipper:
active_index_directory: /var/loki/data/tsdb-index
cache_location: /var/loki/data/tsdb-cache
index_gateway_client:
# only applicable if using microservices where index-gateways are independently deployed.
# This example is using kubernetes-style naming.
server_address: dns:///loki-loki-distributed-index-gateway.monitoring.svc.cluster.local:9095
shared_store: s3
aws:
bucketnames: <<bucket>>
endpoint: <<endpoint>>
region: <<region>>
access_key_id: "${S3_ACCESS_KEY_ID}"
secret_access_key: "${S3_SECRET_ACCESS_KEY}"
insecure: false
sse_encryption: false
s3forcepathstyle: true
schema_config:
configs:
# New TSDB schema below
- from: "2024-01-01"
index:
period: 24h
prefix: index_
object_store: s3
schema: v12
store: tsdb
query_scheduler:
# the TSDB index dispatches many more, but each individually smaller, requests.
# We increase the pending request queue sizes to compensate.
max_outstanding_requests_per_tenant: 32768
querier:
# Each `querier` component process runs a number of parallel workers to process queries simultaneously.
# You may want to adjust this up or down depending on your resource usage
# (more available cpu and memory can tolerate higher values and vice versa),
# but we find the most success running at around `16` with tsdb
max_concurrent: 16
compactor:
shared_store: s3
ruler:
ring:
kvstore:
store: memberlist
rule_path: /tmp/loki/scratch
alertmanager_url: http://kube-prometheus-stack-alertmanager:9093
external_url: <<your alertmanager external URL>>

querier:
replicas: 2
maxUnavailable: 1
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
distributor:
replicas: 3
maxUnavailable: 2
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
ingester:
replicas: 3
maxUnavailable: 2
persistence:
enabled: true
inMemory: false
claims:
- name: data
size: 10Gi
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
queryFrontend:
replicas: 2
maxUnavailable: 1
gateway:
replicas: 2
maxUnavailable: 1
compactor:
kind: Deployment
replicas: 1
enabled: true
extraArgs:
- -config.expand-env=true
extraEnv:
- name: S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_ACCESS_KEY_ID
- name: S3_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: <<bucket-secret>>
key: S3_SECRET_ACCESS_KEY
ruler:
enabled: true
replicas: 2
maxUnavailable: 1
indexGateway:
enabled: true
replicas: 2
maxUnavailable: 1
serviceMonitor:
enabled: true
prometheusRule:
enabled: true
namespace: monitoring

We've also added setting up Ruler with Alertmanager's endpoint (which can be deployed by the Monitoring Namespace as well, as will be seen in the Metrics section).

Setting up a Prometheus Stack with HA

Thanos

For an HA Prometheus Stack we'll need Thanos which is not yet part of the Monitoring Namespace, so we'll start by going over how to deploy it with Launchpad.

Thanos requires object storage so a bucket (and credentials) will be needed. To deploy Thanos we're going to use bitnami's thanos chart, and we'll deploy that with Launchpad as in the following example helmfile:

repositories:
- name: bitnami
url: https://charts.bitnami.com/bitnami

releases:
- name: thanos
namespace: monitoring
createNamespace: true
chart: bitnami/thanos
version: ~12.20
missingFileHandler: Warn
values:
- existingObjstoreSecret: <<thanos-objstore-secret>>
query:
replicaCount: 2
dnsDiscovery:
sidecarsService: "prometheus-operated"
sidecarsNamespace: "monitoring"
replicaLabel:
- prometheus_replica
queryFrontend:
enabled: true
replicaCount: 2
compactor:
enabled: true
persistence:
enabled: true
retentionResolutionRaw: 30d
retentionResolution5m: 30d
retentionResolution1h: 10y
storegateway:
enabled: true
replicaCount: 2
persistence:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
prometheusRule:
enabled: true

Warning: Never try to run more than one instance of compactor. If your object storage does not support locking, it will lead to error states.

Where we added the bitnami repository, and a release to deploy the Thanos chart from that repository. From the values used in this example, notice the query.dnsDiscovery and query.replicaLabel keys, as those values need to match the ones used in Thanos Prometheus sidecar, deployed in the kube-prometheus-stack release with the Monitoring Namespace.

There is one extra thing needed for Thanos, a secret with the bucket credentials as referred previously with <<thanos-objstore-secret>>. That secret need to have a key called objstore.yml, and its value content should be yaml and have keys like these:

type: S3
config:
endpoint: <<endpoint>>
bucket: <<bucket name>>
bucket_lookup_type: path
insecure: false
access_key: <<access_key>>
secret_key: <<secret_key>>

bucket_lookup_type can be auto, path or virtual_host, and you would want to use path for Ceph Object Storage. You can check here all the available options.

Adding a secret like that with Launchpad and Sealed Secrets will add a release like so:

releases:
- name: thanos-objstore-secret
namespace: monitoring
chart: graphops/resource-injector
values:
- resources:
thanos-objstore-secret:
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: thanos-objstore-secret
namespace: monitoring
spec:
encryptedData:
objstore.yml: <<SealedSecrets Encrypted Data>>

The last remaining required Thanos component, the prometheus sidecar, will be deployed with kube-prometheus-stack so keep reading.

Prometheus Stack

There are three components we want to focus on, Prometheus, Grafana and Alertmanager. We'll start with adjusting Alertmanager's config for HA which is the simplest:

alertmanager:
alertmanagerSpec:
replicas: 3

This will change our setup to a 3 replica Alertmanager and that's all that's required.

For grafana we have the added requirement of changing from the default embedded sqlite database to a shared database like Postgres.

So let's start by adding a release to create a Postgres database with the Postgres-Operator (from Launchpad Postgres-Operator Namespace). As an example:

releases:
- name: grafana-database
namespace: monitoring
createNamespace: true
chart: graphops/resource-injector
version: 0.2.0
missingFileHandler: Warn
values:
- resources:
grafana-database:
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: grafana-database
teamId: "pg"
numberOfInstances: 2
users:
grafana:
- superuser
- createdb
enableMasterLoadBalancer: false
enableReplicaLoadBalancer: false
enableConnectionPooler: false
enableReplicaConnectionPooler: false
databases:
grafana: grafana
postgresql:
version: "15"
parameters: {}
volume:
size: 1Gi
storageClass: <<your_storage_class>>
resources:
requests:
cpu: 250m
memory: 1Gi
limits:
cpu: 1000m
memory: 4Gi
patroni:
initdb:
encoding: "UTF8"
locale: "C"
pg_hba:
- local all all trust
- hostssl all +zalandos 127.0.0.1/32 pam
- host all all 127.0.0.1/32 md5
- hostssl all +zalandos ::1/128 pam
- host all all ::1/128 md5
- local replication standby trust
- hostssl replication standby all md5
- hostnossl all all all md5
- hostssl all +zalandos all pam
- hostssl all all all md5
podAnnotations:
coa.zalan.do/auto-create-database: "true"

Having that database, adjusting grafana values setup can be achieved like so:

grafana:
replicas: 2
envValueFrom:
DATABASE_PASSWORD:
secretKeyRef:
name: grafana.grafana-database.credentials.postgresql.acid.zalan.do
key: password
sidecar:
datasources:
url: http://thanos-query-frontend:9090
createPrometheusReplicasDatasources: false
grafana.ini:
database:
type: postgres
host: grafana-database.monitoring.svc:5432
name: grafana
user: grafana
password: "$__env{DATABASE_PASSWORD}"

Finally, we need to adjust Prometheus to increase replicas and use Thanos sidecar.

A workable set of values for accomplishing that looks like:

prometheus:
prometheusSpec:
replicas: 2
shards: 1
thanos:
objectStorageConfig:
existingSecret:
name: <<thanos-objstore-secret>>
key: objstore.yml
replicaExternalLabelName: prometheus_replica
thanosService:
enabled: true
thanosServiceMonitor:
enabled: true

Notice the sidecar will be configured to use the same secret provisioned before for Thanos, and the replicaExternalLabelName matches the value used before as well.

Taking all of this together, here's an example of an helmfile that deploys Thanos and kube-prometheus-stack setting the most important values for HA:

repositories:
- name: bitnami
url: https://charts.bitnami.com/bitnami

helmfiles:
- path: git::https://github.com/graphops/launchpad-namespaces.git@monitoring/helmfile.yaml?ref=monitoring-stable/latest
selectorsInherited: true
values:
- helmDefaults:
<<: *helmDefaults
features: [ metrics, logs]
kube-prometheus-stack:
values:
kube-prometheus-stack:
alertmanager:
alertmanagerSpec:
replicas: 3
grafana:
replicas: 2
envValueFrom:
DATABASE_PASSWORD:
secretKeyRef:
name: grafana.grafana-database.credentials.postgresql.acid.zalan.do
key: password
sidecar:
datasources:
url: http://thanos-query-frontend:9090
createPrometheusReplicasDatasources: false
grafana.ini:
database:
type: postgres
host: grafana-database.monitoring.svc:5432
name: grafana
user: grafana
password: "$__env{DATABASE_PASSWORD}"
prometheus:
prometheusSpec:
replicas: 2
shards: 1
thanos:
objectStorageConfig:
existingSecret:
name: <<thanos-objstore-secret>>
key: objstore.yml
replicaExternalLabelName: prometheus_replica
thanosService:
enabled: true
thanosServiceMonitor:
enabled: true

releases:
- name: thanos-objstore-secret
namespace: monitoring
chart: graphops/resource-injector
values:
- resources:
thanos-objstore-secret:
apiVersion: bitnami.com/v1alpha1
kind: SealedSecret
metadata:
name: thanos-objstore-secret
namespace: monitoring
spec:
encryptedData:
objstore.yml: <<SealedSecrets Encrypted Data>>

- name: thanos
namespace: monitoring
createNamespace: true
chart: bitnami/thanos
version: ~12.20
missingFileHandler: Warn
values:
- existingObjstoreSecret: <<thanos-objstore-secret>>
query:
replicaCount: 2
dnsDiscovery:
sidecarsService: "prometheus-operated"
sidecarsNamespace: "monitoring"
replicaLabel:
- prometheus_replica
queryFrontend:
enabled: true
replicaCount: 2
compactor:
enabled: true
persistence:
enabled: true
retentionResolutionRaw: 30d
retentionResolution5m: 30d
retentionResolution1h: 10y
storegateway:
enabled: true
replicaCount: 2
persistence:
enabled: true
metrics:
enabled: true
serviceMonitor:
enabled: true
prometheusRule:
enabled: true

- name: grafana-database
namespace: monitoring
createNamespace: true
chart: graphops/resource-injector
version: 0.2.0
missingFileHandler: Warn
values:
- resources:
grafana-database:
apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: grafana-database
teamId: "pg"
numberOfInstances: 2
users:
grafana:
- superuser
- createdb
enableMasterLoadBalancer: false
enableReplicaLoadBalancer: false
enableConnectionPooler: false
enableReplicaConnectionPooler: false
databases:
grafana: grafana
postgresql:
version: "15"
parameters: {}
volume:
size: 1Gi
storageClass: <<your_storage_class>>
resources:
requests:
cpu: 250m
memory: 1Gi
limits:
cpu: 1000m
memory: 4Gi
patroni:
initdb:
encoding: "UTF8"
locale: "C"
pg_hba:
- local all all trust
- hostssl all +zalandos 127.0.0.1/32 pam
- host all all 127.0.0.1/32 md5
- hostssl all +zalandos ::1/128 pam
- host all all ::1/128 md5
- local replication standby trust
- hostssl replication standby all md5
- hostnossl all all all md5
- hostssl all +zalandos all pam
- hostssl all all all md5
podAnnotations:
coa.zalan.do/auto-create-database: "true"

- + \ No newline at end of file diff --git a/launchpad/tutorials/postgresql_ha.html b/launchpad/tutorials/postgresql_ha.html index 55aae846..fd407ff9 100644 --- a/launchpad/tutorials/postgresql_ha.html +++ b/launchpad/tutorials/postgresql_ha.html @@ -5,13 +5,13 @@ Overview of High Availability in PostgreSQL | GraphOps Docs - +

Overview of High Availability in PostgreSQL

One of the prerequisites of running an indexer stack is currently using PostgreSQL as a database for storing indexer metadata and subgraph data. To ensure redundancy of data and operations and enable systems to continue functioning despite individual component failures we want to account for the following areas as they relate to running PostgreSQL:

  • Automatic Failover: the ability to automatically switch operations to standby replicas if the primary database server fails, ensuring service continuity.

  • Data Integrity and Consistency: the ability to maintain data integrity and consistency across primary and replica servers, even in failover scenarios, through WAL and Transaction Management

  • Scalability: the ability to support scalability, allowing databases to handle increased loads by distributing read queries across replicas.

  • Disaster Recovery: planning for data backups and restores, PITRs(Point In Time Recoverys) ensuring that data is replicated to geographically diverse locations, protecting against site-wide failures.

  • Monitoring and Health Checks: continuous monitoring of database health and performance metrics to detect and address issues before they lead to downtime.

This guide takes an indexer through the different steps needed to configure HA in PostgreSQL, utilising the postgres-operator and graph namespaces as starting points.

Prerequisites

  • A fully functional working Kubernetes cluster.
  • Object storage buckets for WAL (Write-Ahead Logs) archiving and base backups.

Configuring Postgresql with Zalando's Operator

Launchpad leverages the Zalando postgres-operator for seamless creation and management of PostgreSQL databases within Kubernetes, facilitating highly-available clusters with Patroni.

Following the deployment of the postgres-operator namespace, you're set to initiate PostgreSQL database creation.

Scalability

The graph namespace is preconfigured to deploy one replica PostgreSQL for subgraph-data and one for indexer-metadata. To scale the number of replicas, simply modify the numberOfInstances attribute in your Helmfile. Example:

graph-database:
values:
resources:
postgres-cr-primary-subgraph-data:
spec:
numberOfInstances: 3

This configuration initiates a primary instance for handling writes and reads alongside two read-only replicas. The failover protocol, orchestrated by Patroni, promotes a replica to the primary role in the event of a primary node's failure.

Given that graph-node query nodes require write permissions to run SQL migrations, the intended way to use read-only replicas with graph-node is the following:

store:
primary:
enabled: true
connection: "postgresql://${PRIMARY_SUBGRAPH_DATA_PGUSER}:${PRIMARY_SUBGRAPH_DATA_PGPASSWORD}@${PRIMARY_SUBGRAPH_DATA_PGHOST}:${PRIMARY_SUBGRAPH_DATA_PGPORT}/${PRIMARY_SUBGRAPH_DATA_PGDATABASE}"
weight: 0
"primary.replicas.repl1":
enabled: true
connection: "postgresql://${PRIMARY_SUBGRAPH_DATA_PGUSER}:${PRIMARY_SUBGRAPH_DATA_PGPASSWORD}@${PRIMARY_SUBGRAPH_DATA_PGHOST_REPL}:${PRIMARY_SUBGRAPH_DATA_PGPORT}/${PRIMARY_SUBGRAPH_DATA_PGDATABASE}"
weight: 1

The above ensures that write requests will be handled by the primary instance and read requests will be handled by replica instances.

danger

Before setting up your graph-node-query nodes with read-only replicas beware that there is a ongoing issue that makes query results not deterministic because the read replicas can trail behind the main database, usually up to 30s.

The data between servers is replicated through WAL(Write-Ahead Logs) streaming replication which allows for changes to be streamed in real-time or near real-time to replicas. By first recording changes in the Write-Ahead Logging (WAL) system instead of directly applying every change to the disk immediately, PostgreSQL prioritises data safety, enforces atomic writes and minimizes I/O operations. Another benefit of using WAL is that in case of system failure, the database can be rebuilt from the latest available base backup and by replaying the WAL files. Note that a base backup is a full copy of the database cluster's data files, taken at a specific point in time, which serves as a starting point for both recovery and replication processes.

Implementing WAL Archiving and Base Backups

By archiving the WAL data we can support reverting to any time instant covered by the available WAL data: we simply install a prior base backup of the database, and replay the WAL just as far as the desired time.

While archive_mode is enabled by default, it requires specific configurations for functionality, including setting AWS_ENDPOINT and providing valid bucket credentials. Verify archive_mode using patronictl edit-config within your database pod. To configure AWS_ENDPOINT for archiving and backups, create a postgres-env-config ConfigMap:

kind: ConfigMap
apiVersion: v1
metadata:
name: postgres-env-config
namespace: postgres-operator # any namespace can be used
data:
AWS_ENDPOINT: http://your-object-storage-endpoint.com
AWS_S3_FORCE_PATH_STYLE: "true" # needed if your object storage solution uses path style bucket naming convention instead of DNS ie. Ceph
USE_WALG_BACKUP: "true"
USE_WALG_RESTORE: "true"
WALG_DISABLE_S3_SSE: "true"
BACKUP_NUM_TO_RETAIN: "4"
BACKUP_SCHEDULE: "00 02 * * sun"
WAL_BUCKET_SCOPE_PREFIX: ""
WAL_BUCKET_SCOPE_SUFFIX: ""
note

When using object storage for WAL archiving by default the PostgreSQL Operator expects that the bucket endpoint follows the DNS style naming convention. If you’re using object storage that follows path style naming convention for buckets (ie. Ceph) you need to pass AWS_S3_FORCE_PATH_STYLE: “true” to the postgres-operator configmap

Ensure your databases are equipped with the necessary credentials for WAL files and basebackups storage:

graph-database:
values:
resources:
postgres-cr-primary-subgraph-data:
spec:
env:
- name: WAL_S3_BUCKET
value: <name-of-your-bucket>
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: subgraph-database-bucket-secret
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: subgraph-database-bucket-secret
key: AWS_SECRET_ACCESS_KEY
- name: BACKUP_NUM_TO_RETAIN
value: "2"
- name: BACKUP_SCHEDULE
value: '00 00 * * sun'
tip

Important mentions about WAL files that can impact the availability of your cluster:

  • Should a replica experience a failure, it's important to note that WAL files will be retained and not deleted until either the replica has been successfully recovered, or removed. This retention policy is crucial for ensuring data integrity and consistency across the database cluster. However, it can lead to rapid disk space consumption, posing a risk of exhausting available storage.

  • Whenever taking a basebackup (new replica, standby, etc), WAL files accumulate at a fast pace and exhaust disk space. Beware of that.

Setting up Clones and Standby Clusters

A Clone is a PIT copy of the production database that one would use for testing for development or for major upgrades - think of it as an independent staging area. The postgres-operator allows for two ways to create clones:

  • Clone from an S3 bucket (recommended)
  • Clone directly from a live instance

To clone from an S3 bucket you need to define a new PostgreSQL CRD resource with spec.clone section defined. Example:

apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: primary-subgraph-data-clone
spec:
clone:
# can be found in the metadata.uid of the source cluster Postgresql resource def
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
# name of the cluster being cloned
cluster: "primary-subgraph-data"
# the new cluster will be cloned using the latest backup available before the timestamp
timestamp: "2024-04-12T12:40:33+00:00"
# the below s3_ parameters are required only when using non AWS S3 object storage
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal"
s3_endpoint: <your-s3-endpoint>
s3_force_path_style: true
env:
- name: CLONE_AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: primary-subgraph-data-bucket-secret
key: AWS_ACCESS_KEY_ID
- name: CLONE_AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: primary-subgraph-data-bucket-secret
key: AWS_SECRET_ACCESS_KEY

Cloning directly from your source DB cluster is done via pg_basebackup. To use this feature simply define your clone's PostgreSQL CRD as above and leave out the timestamp field from the clone section.

info

The operator will connect to the service of the source cluster by name. If the cluster is called test, then the connection string will look like host=test port=5432), which means that you can clone only from clusters within the same namespace.

To set up a new standby or clone PostgreSQL instance that streams from a live instance, you must ensure that the new instance has the correct credentials. This involves copying the credentials from the source cluster's secrets to successfully bootstrap the standby cluster or clone.

A Standby Cluster is a cluster that first clones a database, and keeps replicating changes afterwards. It can exist in a different location than its source database, but unlike cloning, the PostgreSQL version between source and target cluster has to be the same. A Standby Cluster is a great way to ensure you have a disaster recovery plan if you main database fails.

Similarly to cloning you can start a Standby Cluster by streaming changes from archived WAL files or by streaming changes directly from your primary database.

To start a cluster as a standby from archived WAL files, add the following standby section in the Postgres CR definition:

apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: primary-subgraph-data-standby
spec:
standby:
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
s3_endpoint: <your-s3-endpoint>
s3_force_path_style: true # optional but needed if your object storage solution uses path style bucket naming convention instead of DNS ie. Ceph
env:
- name: STANDBY_AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: primary-subgraph-data-bucket-secret
key: AWS_ACCESS_KEY_ID
- name: STANDBY_AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: primary-subgraph-data-bucket-secret
key: AWS_SECRET_ACCESS_KEY

To start a cluster as a standby from a remote primary, add the following standby options in the PostgreSQL CRD definition, instead of the s3_ options:

spec:
standby:
standby_host: "<your-source-db-host>.<namespace>"
standby_port: "5433"
note

For standby clusters, specifying both S3 storage and remote live clusters as data sources is not possible; attempting to configure both simultaneously will result in an error. You must choose either S3 or a remote live cluster as the source but not both.

Promoting a standby cluster to a database cluster

To promote a standby cluster to a proper database cluster you have to ensure it stops replicating changes from the source, and starts accepting writes instead. To promote, remove the standby section from the postgres cluster manifest. A rolling update will be triggered removing the STANDBY_* environment variables from the pods, followed by a Patroni config update that promotes the cluster.

Monitoring

By default the postgres-operator does not come setup up with any monitoring capabilities. However, to enable metric collection for database performance and WALs performance the following exporters can be used:

note

The above mentioned exporters are not the only ones that can be used. There is a large variety of PostgreSQL exporters that you can pick from, however the wal-g-exporter specifically has been used as it designed to work well with Zalando's Spilo.

To enable the use of the above exporters you need to pass the following configuration to the postgres-operator to run your postgresql dbs with exporter sidecars:

postgres-operator:
values:
configGeneral:
sidecars:
- name: wal-g-exporter
image: ghcr.io/thedatabaseme/wal-g-exporter:0.3.1
imagePullPolicy: IfNotPresent
env:
- name: HTTP_PORT
value: "9351"
- name: PGUSER
value: "$(POSTGRES_USER)"
- name: PGPASSWORD
value: "$(POSTGRES_PASSWORD)"
ports:
- name: wal-g-exporter
containerPort: 9351
protocol: TCP
- name: exporter
image: quay.io/prometheuscommunity/postgres-exporter:v0.15.0
ports:
- name: pg-exporter
containerPort: 9187
protocol: TCP
resources:
requests:
cpu: 50m
memory: 200M
env:
- name: CLUSTER_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.labels['cluster-name']
- name: DATA_SOURCE_NAME
value: "postgresql://$(POSTGRES_USER):$(POSTGRES_PASSWORD)@localhost:5432/postgres?sslmode=disable"

Additionally the wal-g-exporter relies on having all needed WAL-G environment variables within an envdir under /run/etc/wal-e.d/env. As such you need to add the following spec to each postgresql CR definitions for the aforementioned path to be shared with the exporter sidecars:

apiVersion: "acid.zalan.do/v1"
kind: postgresql
metadata:
name: <you-db-name>
spec:
additionalVolumes:
- name: walg
mountPath: /run/etc
targetContainers:
- postgres
- wal-g-exporter
volumeSource:
emptyDir: {}
# the rest of your spec goes here

Once you've updated your postgres-operator spec and the CR of each of your PostgreSQL databases it's time to add PodMonitors for Prometheus to track and collect the metrics. Example:

apiVersion: monitoring.coreos.com/v1
kind: PodMonitor
metadata:
name: <your-database-name>
namespace: <your-database-namespace>
spec:
selector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: <your-database-name>
podMetricsEndpoints:
- port: pg-exporter
path: /metrics
scrapeTimeout: "60s"
honorLabels: true
- port: wal-g-exporter
path: /metrics
scrapeTimeout: "60s"
honorLabels: true
- targetPort: 8008
path: /metrics
scrapeTimeout: "10s"
honorLabels: true
namespaceSelector:
matchNames:
- <your-database-namespace>

Useful commands

When using Zalando's operator, the pods running the PostgreSQL database come installed with useful scripts and CLIs as they are running the Spilo image. One such functionality is patronictl which is used to manage and interact with your database cluster.

You can use the patroni reinit <cluster_name> <member_name> in the event one of the cluster members needs to be reinitialized after falling out of sync with the primary due to corruption or other issues that prevent it from catching up through normal replication. It can also be used to clean a member that has been problematic or to refresh its data completely for consistency checks. This command resets a replica by wiping its existing data and then resynchronizing it from the current leader or another specified member of the cluster.

- + \ No newline at end of file diff --git a/mips-resources/intro.html b/mips-resources/intro.html index 186244d1..de1b3e56 100644 --- a/mips-resources/intro.html +++ b/mips-resources/intro.html @@ -5,13 +5,13 @@ Introduction | GraphOps Docs - +

Introduction

It's an exciting time to be participating in The Graph ecosystem! During Graph Day 2022 Yaniv Tal announced the sunsetting of the hosted service, a moment The Graph ecosystem has been working towards for many years.

To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the Migration Infrastructure Providers (MIPs) program.

The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer.

The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to migration grants for subgraph developers using multi-chain subgraphs.

Useful Resources

- + \ No newline at end of file diff --git a/mips-resources/mips-faq.html b/mips-resources/mips-faq.html index 530053dc..c8ab09fa 100644 --- a/mips-resources/mips-faq.html +++ b/mips-resources/mips-faq.html @@ -5,13 +5,13 @@ MIPs FAQs | GraphOps Docs - +

MIPs FAQs

1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed?

Yes, it is indeed.

For context, the arbitration charter, learn more about the charter here, specifies the methodology for generating a POI for a failed subgraph.

A community member, SunTzu, has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo here.

2. Which chain will the MIPs program incentivise first?

The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3.

3. How will new chains be added to the MIPs program?

New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support.

4. How will we know when the network is ready for a new chain?

The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs.

5. How are rewards divided per chain?

Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network.

6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that?

You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See The MIPs notion page to learn more about the phases.

7. When will rewards be distributed?

MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle.

8. How does scoring work?

Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on:

Subgraph Coverage

  • Are you providing maximal support for subgraphs per chain?

  • During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support.

Quality Of Service

  • Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)?

  • Is the Indexer supporting dapp developers being reactive to their needs?

Is Indexer allocating efficiently, contributing to the overall health of the network?

Community Support

  • Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain?

  • Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum?

9. How will the Discord role be assigned?

Moderators will assign the roles in the next few days.

10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards?

Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet.

11. At what point do you expect participants to add a mainnet deployment?

There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be shared in this notion page soon.

12. Will rewards be subject to vesting?

The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement.

13. For teams with more than one member, will all the team members be given a MIPs Discord role?

Yes

14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet?

Yes

15. During the MIPs program, will there be a period to dispute invalid POI?

To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation

17. Can we combine two vesting contracts?

No. The options are: you can delegate one to the other one or run two separate indexers.

18. KYC Questions?

Please email info@thegraph.foundation

19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready?

Yes

We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies.

21. What is “handler gas cost”?

It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains.

- + \ No newline at end of file