diff --git a/packages/nextra-theme/package.json b/packages/nextra-theme/package.json index b9e48cf37a2a..ab364a838e72 100644 --- a/packages/nextra-theme/package.json +++ b/packages/nextra-theme/package.json @@ -25,8 +25,8 @@ "typecheck": "tsc --noEmit" }, "peerDependencies": { - "@edgeandnode/gds": "3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", - "@edgeandnode/go": "4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", + "@edgeandnode/gds": "3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", + "@edgeandnode/go": "4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", "@emotion/react": "^11.11", "next": "^13", "next-seo": "^6", @@ -43,8 +43,8 @@ "react-use": "^17.4.2" }, "devDependencies": { - "@edgeandnode/gds": "3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", - "@edgeandnode/go": "4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", + "@edgeandnode/gds": "3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", + "@edgeandnode/go": "4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", "@emotion/react": "^11.11.1", "@types/lodash": "^4.14.202", "@types/react": "^18.2.45", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 11e576a36b2e..41b810547f69 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -73,11 +73,11 @@ importers: version: 17.4.2(react-dom@18.2.0)(react@18.2.0) devDependencies: '@edgeandnode/gds': - specifier: 3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11 - version: 3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) + specifier: 3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b + version: 3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) '@edgeandnode/go': - specifier: 4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11 - version: 4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) + specifier: 4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b + version: 4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) '@emotion/react': specifier: ^11.11.1 version: 11.11.1(@types/react@18.2.45)(react@18.2.0) @@ -164,11 +164,11 @@ importers: specifier: ^5.29.1 version: 5.29.1(hardhat@2.19.3) '@edgeandnode/gds': - specifier: 3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11 - version: 3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) + specifier: 3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b + version: 3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) '@edgeandnode/go': - specifier: 4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11 - version: 4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) + specifier: 4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b + version: 4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) '@emotion/react': specifier: ^11.11.1 version: 11.11.1(@types/react@18.2.45)(react@18.2.0) @@ -2016,8 +2016,8 @@ packages: - supports-color dev: true - /@edgeandnode/gds@3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1): - resolution: {integrity: sha512-nOjpDQ8+mE+pkR4j3tOWfPhVIavZAJpvis1h7zdbDj+P4EXTLMaDGqUCdTLKExfhc8N7b0N4yS/k+gG+ONmW8A==} + /@edgeandnode/gds@3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1): + resolution: {integrity: sha512-J3iuL3r12vmzLHmFdtZzigpDMJrVhVxmtutIglI4I4XlgOXi+f8DxOBrlK0DIqOl1GEzR3PcVIFMpr361OSBeg==} peerDependencies: '@emotion/react': ^11 dayjs: ^1.11 @@ -2082,11 +2082,11 @@ packages: - prop-types - utf-8-validate - /@edgeandnode/go@4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1): - resolution: {integrity: sha512-R4xI5Y7p17OYn7OU3wjZrs/9FQWEJ/p5cX0kruZhMrKS99SePWRhaC/sBV/j/0qYc6AmEsZc4qg1bU7kzEaq8Q==} + /@edgeandnode/go@4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@edgeandnode/common@5.29.1)(@edgeandnode/gds@3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b)(@emotion/react@11.11.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(next@14.0.4)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1): + resolution: {integrity: sha512-bx/Nisv4e6TzzNO3yPG71/gRiN4D7NgDwa950iM3CDFWaMX782/tUOgdlwuzEME6iT5C9XQsu/QknC4DTWEBpQ==} peerDependencies: - '@edgeandnode/common': ^5.29.0 - '@edgeandnode/gds': ^3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11 + '@edgeandnode/common': ^5.29.1 + '@edgeandnode/gds': ^3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b '@emotion/react': ^11.11 next: ^13 react: ^18 @@ -2097,7 +2097,7 @@ packages: optional: true dependencies: '@edgeandnode/common': 5.29.1(hardhat@2.19.3) - '@edgeandnode/gds': 3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) + '@edgeandnode/gds': 3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b(@emotion/react@11.11.1)(@theme-ui/core@0.16.1)(@types/react-dom@18.2.18)(@types/react@18.2.45)(dayjs@1.11.10)(hardhat@2.19.3)(next@14.0.4)(prop-types@15.8.1)(react-dom@18.2.0)(react@18.2.0)(theme-ui@0.16.1) '@emotion/react': 11.11.1(@types/react@18.2.45)(react@18.2.0) '@radix-ui/react-dialog': 1.0.5(@types/react-dom@18.2.18)(@types/react@18.2.45)(react-dom@18.2.0)(react@18.2.0) '@radix-ui/react-navigation-menu': 1.1.4(@types/react-dom@18.2.18)(@types/react@18.2.45)(react-dom@18.2.0)(react@18.2.0) @@ -10893,7 +10893,7 @@ packages: human-signals: 4.3.1 is-stream: 3.0.0 merge-stream: 2.0.0 - npm-run-path: 5.1.0 + npm-run-path: 5.2.0 onetime: 6.0.0 signal-exit: 3.0.7 strip-final-newline: 3.0.0 @@ -10908,7 +10908,7 @@ packages: human-signals: 5.0.0 is-stream: 3.0.0 merge-stream: 2.0.0 - npm-run-path: 5.1.0 + npm-run-path: 5.2.0 onetime: 6.0.0 signal-exit: 4.1.0 strip-final-newline: 3.0.0 @@ -14609,8 +14609,8 @@ packages: path-key: 3.1.1 dev: true - /npm-run-path@5.1.0: - resolution: {integrity: sha512-sJOdmRGrY2sjNTRMbSvluQqg+8X7ZK61yvzBEIDhz4f8z1TZFYABsqjjCBd/0PUNE9M6QDgHJXQkGUEm7Q+l9Q==} + /npm-run-path@5.2.0: + resolution: {integrity: sha512-W4/tgAXFqFA0iL7fk0+uQ3g7wkL8xJmx3XdK0VGb4cHW//eZTtKGvFBBoRKVTpY7n6ze4NL9ly7rgXcHufqXKg==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} dependencies: path-key: 4.0.0 diff --git a/website/i18n.ts b/website/i18n.ts index af101249268f..4a99926e9642 100644 --- a/website/i18n.ts +++ b/website/i18n.ts @@ -1,10 +1,12 @@ import { Locale, NestedStrings, Translations, useI18n as _useI18n } from '@edgeandnode/gds' import ar from '@/pages/ar/translations' +import cs from '@/pages/cs/translations' import de from '@/pages/de/translations' import en from '@/pages/en/translations' import es from '@/pages/es/translations' import fr from '@/pages/fr/translations' +import ha from '@/pages/ha/translations' import hi from '@/pages/hi/translations' import it from '@/pages/it/translations' import ja from '@/pages/ja/translations' @@ -13,20 +15,24 @@ import mr from '@/pages/mr/translations' import nl from '@/pages/nl/translations' import pl from '@/pages/pl/translations' import pt from '@/pages/pt/translations' +import ro from '@/pages/ro/translations' import ru from '@/pages/ru/translations' import sv from '@/pages/sv/translations' import tr from '@/pages/tr/translations' import uk from '@/pages/uk/translations' import ur from '@/pages/ur/translations' import vi from '@/pages/vi/translations' +import yo from '@/pages/yo/translations' import zh from '@/pages/zh/translations' const appLocales = [ Locale.ARABIC, + // Locale.CZECH, // Locale.GERMAN, Locale.ENGLISH, Locale.SPANISH, // Locale.FRENCH, + // Locale.HAUSA, Locale.HINDI, // Locale.ITALIAN, Locale.JAPANESE, @@ -35,12 +41,14 @@ const appLocales = [ // Locale.DUTCH, // Locale.POLISH, Locale.PORTUGUESE, + // Locale.ROMANIAN, Locale.RUSSIAN, - // Locale.SWEDISH, - // Locale.TURKISH, + Locale.SWEDISH, + Locale.TURKISH, // Locale.UKRAINIAN, Locale.URDU, // Locale.VIETNAMESE, + // Locale.YORUBA, Locale.CHINESE, ] as const @@ -52,21 +60,14 @@ export const supportedLocales = appLocales as Mutable export type AppLocale = (typeof supportedLocales)[number] -export type AppTranslations = Translations & { - [key in AppLocale]: { - global: NestedStrings - index: NestedStrings - docsearch: NestedStrings - supportedNetworks: NestedStrings - } -} - export const translations = { ar, + cs, de, en, es, fr, + ha, hi, it, ja, @@ -75,13 +76,24 @@ export const translations = { nl, pl, pt, + ro, ru, sv, tr, uk, ur, vi, + yo, zh, -} satisfies AppTranslations +} satisfies Translations & { + [key in AppLocale]: { + global: NestedStrings + index: NestedStrings + docsearch: NestedStrings + supportedNetworks: NestedStrings + } +} + +export type AppTranslations = typeof translations -export const useI18n = () => _useI18n() +export const useI18n = () => _useI18n() diff --git a/website/package.json b/website/package.json index ee485db19776..5fd08d4fa00d 100644 --- a/website/package.json +++ b/website/package.json @@ -14,8 +14,8 @@ }, "dependencies": { "@edgeandnode/common": "^5.29.1", - "@edgeandnode/gds": "3.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", - "@edgeandnode/go": "4.0.0-global-header-1703167287784-4d7f05b34403f749b2baf2c11af2882b2acb4b11", + "@edgeandnode/gds": "3.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", + "@edgeandnode/go": "4.0.0-global-header-1703180036960-6b16dd059ed284aceacf266e91ef74c6e7a1ee3b", "@emotion/react": "^11.11.1", "@graphprotocol/nextra-theme": "workspace:*", "mixpanel-browser": "^2.48.1", diff --git a/website/pages/ar/arbitrum/arbitrum-faq.mdx b/website/pages/ar/arbitrum/arbitrum-faq.mdx index 059561d24711..2d3f7ee483d5 100644 --- a/website/pages/ar/arbitrum/arbitrum-faq.mdx +++ b/website/pages/ar/arbitrum/arbitrum-faq.mdx @@ -6,7 +6,7 @@ Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitru ## لماذا يقوم The Graph بتطبيق حل L2؟ -من خلال توسيع نطاق TheGrraph في L2، يمكن للمشاركين في الشبكة توقع ما يلي: +By scaling The Graph on L2, network participants can expect: - Upwards of 26x savings on gas fees @@ -31,7 +31,7 @@ The Graph community decided to move forward with Arbitrum last year after the ou ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## بصفتي مطور subgraph ، أو مستهلك بيانات ، أو مفهرس ، أو مسنق ، أو مفوض ، ماذا علي أن أفعل الآن؟ +## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx index 535e741cfba6..ce87a0888ae8 100644 --- a/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ar/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: الأسئلة الشائعة حول أدوات النقل L2 --- -> لم يتم إصدار أدوات نقل L2 حتى الآن. من المتوقع أن تكون متاحة في صيف عام 2023. +## عام -## ما هي أدوات النقل L2؟ +### ما هي أدوات النقل L2؟ -جعل The Graph تكلفة المشاركة في الشبكة أرخص بمقدار 26 مرة للمساهمين من خلال نشر البروتوكول على منصة Arbitrum One. تم إنشاء أدوات التحويل إلى L2 بواسطة المطورين الأساسيين لتسهيل الانتقال إلى L2. لكل مشارك في البروتوكول، سيتم مشاركة مجموعة من أدوات التحويل لتجربة سلسة عند الانتقال إلى L2، مما تجنب فترات الذوبان أو الاضطرار إلى سحب GRT يدويًا. ستتطلب هذه الأدوات منك اتباع مجموعة محددة من الخطوات اعتمادًا على الدور الذي تلعبه داخل The Graph وما تقوم بنقله إلى L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### هل يمكنني استخدام نفس المحفظة التي استخدمها في Ethereum mainnet؟ إذا كنت تستخدم محفظة [EOA] \(https://ethereum.org/en/developers/docs/accounts/#types-of-account) ، فيمكنك استخدام نفس العنوان. إذا كانت محفظة Ethereum mainnet الخاصة بك عبارة عن عقد (مثل multisig) ، فيجب عليك تحديد [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) حيث سيتم إرسال التحويل الخاص بك. يرجى التحقق من العنوان بعناية لأن أي تحويلات إلى عنوان غير صحيح يمكن أن تؤدي إلى خسارة غير قابلة للرجوع. إذا كنت ترغب في استخدام multisig على L2 ، فتأكد من نشر عقد multisig على Arbitrum One. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## نقل الـ Subgraph (الرسم البياني الفرعي) -## كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ +### كيفكيف أقوم بتحويل الـ subgraph الخاص بي؟ + + لنقل الـ subgraph الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: @@ -28,227 +52,281 @@ title: الأسئلة الشائعة حول أدوات النقل L2 5. جدث عنوان URL للاستعلام (مستحسن) -\\ \* لاحظ أنه يجب عليك تأكيد النقل في غضون 7 أيام وإلا فقد يتم فقد الـ subgraph الخاص بك. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كان هناك أي مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## من أين يجب أن أبدأ التحويل ؟ +### من أين يجب أن أبدأ التحويل ؟ يمكنك بدء عملية النقل من [Subgraph Studio] \(https://thegraph.com/studio/) ، [Explorer ،] \(https://thegraph.com/explorer) أو من أي صفحة تفاصيل subgraph. انقر فوق الزر "Transfer Subgraph" في صفحة تفاصيل الرسم الـ subgraph لبدء النقل. -## كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي +### كم من الوقت سأنتظر حتى يتم نقل الـ subgraph الخاص بي يستغرق وقت النقل حوالي 20 دقيقة. يعمل جسر Arbitrum في الخلفية لإكمال نقل الجسر تلقائيًا. في بعض الحالات ، قد ترتفع تكاليف الغاز وستحتاج إلى تأكيد المعاملة مرة أخرى. -## هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ +### هل سيظل الـ subgraph قابلاً للاكتشاف بعد أن أنقله إلى L2؟ سيكون الـ subgraph الخاص بك قابلاً للاكتشاف على الشبكة التي تم نشرها عليها فقط. على سبيل المثال ، إذا كان الـ subgraph الخاص بك موجودًا على Arbitrum One ، فيمكنك العثور عليه فقط في Explorer على Arbitrum One ولن تتمكن من العثور عليه على Ethereum. يرجى التأكد من تحديد Arbitrum One في مبدل الشبكة في أعلى الصفحة للتأكد من أنك على الشبكة الصحيحة. بعد النقل ، سيظهر الـ L1 subgraph على أنه مهمل. -## هل يلزم نشر الـ subgraph الخاص بي لنقله؟ +### هل يلزم نشر الـ subgraph الخاص بي لنقله؟ للاستفادة من أداة نقل الـ subgraph ، يجب أن يكون الرسم البياني الفرعي الخاص بك قد تم نشره بالفعل على شبكة Ethereum الرئيسية ويجب أن يكون لديه إشارة تنسيق مملوكة للمحفظة التي تمتلك الرسم البياني الفرعي. إذا لم يتم نشر الرسم البياني الفرعي الخاص بك ، فمن المستحسن أن تقوم ببساطة بالنشر مباشرة على Arbitrum One - ستكون رسوم الغاز أقل بكثير. إذا كنت تريد نقل رسم بياني فرعي منشور ولكن حساب المالك لا يملك إشارة تنسيق عليه ، فيمكنك الإشارة بمبلغ صغير (على سبيل المثال 1 GRT) من ذلك الحساب ؛ تأكد من اختيار إشارة "auto-migrating". -## ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ +### ماذا يحدث لإصدار Ethereum mainnet للرسم البياني الفرعي الخاص بي بعد أن النقل إلى Arbitrum؟ بعد نقل الرسم البياني الفرعي الخاص بك إلى Arbitrum ، سيتم إهمال إصدار Ethereum mainnet. نوصي بتحديث عنوان URL للاستعلام في غضون 48 ساعة. ومع ذلك ، هناك فترة سماح تحافظ على عمل عنوان URL للشبكة الرئيسية الخاصة بك بحيث يمكن تحديث أي دعم dapp لجهة خارجية. -## بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ +### بعد النقل ، هل أحتاج أيضًا إلى إعادة النشر على Arbitrum؟ بعد فترة النقل البالغة 20 دقيقة ، ستحتاج إلى تأكيد النقل لإكمال النقل ، أداة النقل ستوجهك للقيام بذلك. سيستمر دعم L1 endpoint الخاص بك خلال فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديثه عندما يكون ذلك مناسبًا لك. -## هل سيكون هناك وقت تعطل للـ endpoint الخاصة بي أثناء إعادة النشر؟ +### Will my endpoint experience downtime while re-publishing? -هنا يجب ألا يكون هناك وقت تعطل عند استخدام أداة النقل لنقل الرسم البياني الفرعي الخاص بك إلى L2 ، ستكون L1 endpoint مدعومة أثناء فترة النقل وفترة السماح. من المستحسن أن تقوم بتحديث endpoint الخاصة بك عندما يكون ذلك مناسبًا لك. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ +### هل يتم نشر وتخطيط الإصدار بنفس الطريقة في الـ L2 كما هو الحال في شبكة Ethereum Ethereum mainnet؟ -نعم. عند النشر في Subgraph Studio تأكد من تحديد Arbitrum One كشبكتك المنشورة. سيتوفر في الاستوديو أحدث endpoint والتي تشير إلى أحدث إصدار محدث من الرسم البياني الفرعي. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ +### هل سينتقل تنسيق الـ subgraph مع الـ subgraph ؟ -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +إذا اخترت إشارة الترحيل التلقائي auto-migrating ، فسيتم نقل 100٪ من التنسيق مع الرسم البياني الفرعي الخاص بك إلى Arbitrum One. سيتم تحويل كل إشارة التنسيق الخاصة بالرسم الفرعي إلى GRT في وقت النقل ، وسيتم استخدام GRT المقابل لإشارة التنسيق الخاصة بك لصك الإشارة على L2 subgraph. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +يمكن للمنسقين الآخرين اختيار ما إذا كانوا سيسحبون أجزاء من GRT ، أو ينقلونه أيضًا إلى L2 لإنتاج إشارة على نفس الرسم البياني الفرعي. -## هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ +### هل يمكنني إعادة الرسم البياني الفرعي الخاص بي إلى Ethereum mainnet بعد أن أقوم بالنقل؟ بمجرد النقل ، سيتم إهمال إصدار شبكة Ethereum mainnet للرسم البياني الفرعي الخاص بك. إذا كنت ترغب في العودة إلى mainnet ، فستحتاج إلى إعادة النشر (redeploy) والنشر مرة أخرى على mainnet. ومع ذلك ، لا يُنصح بشدة بالتحويل مرة أخرى إلى شبكة Ethereum mainnet حيث سيتم في النهاية توزيع مكافآت الفهرسة بالكامل على Arbitrum One. -## لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ +### لماذا أحتاج إلى Bridged ETH لإكمال النقل؟ يتم دفع رسوم الغاز في Arbitrum One باستخدام ETHbridged ETH (ETH الذي تم ربطه بـ Arbitrum One). ومع ذلك ، فإن رسوم الغاز أقل بكثير عند مقارنتها بشبكة Ethereum mainnet. -## Curation Signal(إشارة التنسيق) +## Delegation(التفويض) -## How do I transfer my curation? +### كيف أنقل تفويضي؟ -To transfer your curation, you will need to complete the following steps: + -1. ابدأ نقل الإشارة على شبكة Ethereum mainnet +لنقل تفويضك ، ستحتاج إلى إكمال الخطوات التالية: -2. Specify an L2 Curator address\* +1. ابدأ نقل التفويض على شبكة Ethereum mainnet +2. انتظر 20 دقيقة للتأكيد +3. قم بتأكيد نقل التفويض على Arbitrum -3. انتظر 20 دقيقة للتأكيد +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\\ \* إذا لزم الأمر - تستخدم عنوان عقد. +### ماذا يحدث لمكافآتي إذا بدأت عملية تحويل وكان لا يزال التخصيص مفتوحا على Ethereum mainnet؟ -## How will I know if the subgraph I curated has moved to L2? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### ماذا يحدث إذا كان المفهرس الذي أفوضه حاليًا غير موجود في Arbitrum One؟ -## What if I do not wish to move my curation to L2? +لن يتم تمكين أداة النقل L2 إلا إذا قام المفهرس الذي فوضته بتحويل حصته إلى Arbitrum. -عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. +### هل يملك المفوضين خيارا للتفويض إلى مفهرس آخر؟ -## How do I know my curation successfully transferred? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. +### ماذا لو لم أتمكن من العثور على المفهرس الذي قمت بالتوفيض إليه في L2؟ -## Can I transfer my curation on more than one subgraph at a time? +ستكتشف أداة النقل L2 المفهرس الذي قمت بالتفويض إليه مسبقًا تلقائيًا. -لا يوجد خيار كهذا حالياً. +### هل سأكون قادرًا على المزج والمطابقة أو "نشر" تفويضي عبر مفهرس جديد أو مفهرسين جدد بدلاً من المفهرس السابق؟ -## Indexer Stake(حصة المفهرس) +ستعمل أداة نقل L2 دائمًا على نقل التفويض إلى نفس المفهرس الذي فوضته سابقًا. وبمجرد الانتقال إلى L2 ، يمكنك إلغاء التفويض وانتظار فترة الذوبان ، وتحديد ما إذا كنت ترغب في تقسيم التفويض الخاص بك. -## كيف يمكنني تحويل حصتي إلى Arbitrum؟ +### هل أنا خاضع لفترة الانتظار أم يمكنني السحب فورًا بعد استخدام أداة نقل التفويض L2؟ -لتحويل حصتك ، ستحتاج إلى إكمال الخطوات التالية: +تتيح لك أداة النقل الانتقال على الفور إلى L2. إذا كنت ترغب في إلغاء التفويض ، فسيتعين عليك انتظار فترة الذوبان. ومع ذلك ، إذا قام المفهرس بتحويل جميع حصته إلى L2 ، فيمكنك السحب على شبكة Ethereum mainnet فورا. -1. ابدأ تحويل الحصص على شبكة Ethereum mainnet +### هل يمكن أن تتأثر مكافآتي سلبًا إذا لم أحول تفويضي؟ -2. انتظر 20 دقيقة للتأكيد +من المتوقع أن تنتقل جميع مشاركات الشبكة إلى Arbitrum One في المستقبل. -3. Confirm stake transfer on Arbitrum +### كم من الوقت يستغرق استكمال نقل تفويضي إلى L2؟ -\\ \* لاحظ أنه يجب عليك تأكيد التحويل في غضون 7 أيام وإلا قد تفقد حصتك. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كانت هناك أي مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/graphprotocol). +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## هل سيتم تحويل حصتي بالكامل؟ +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? -يمكنك اختيار مقدار حصتك المراد تحويلها. إذا اخترت تحويل حصتك بالكامل مرة واحدة ، فستحتاج إلى إغلاق أي تخصيصات مفتوحة أولاً. +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -إذا كنت تخطط لنقل أجزاء من حصتك في معاملات متعددة ، فيجب عليك دائمًا تحديد نفس عنوان المستفيد. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -ملاحظة: يجب أن تفي بالحد الأدنى من متطلبات الحصة على L2 في المرة الأولى التي تستخدم فيها أداة التحويل. يجب أن يرسل المفهرسون 100 ألف GRT كحد أدنى (عند استدعاء هذه الوظيفة في المرة الأولى). في حالة ترك جزء من الحصة على L1 ، يجب أن يكون أيضًا أكثر من 100 ألف GRT كحد أدنى وأن يكون كافيًا (جنبًا إلى جنب مع التفويضات) لتغطية مخصصاتك المفتوحة. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## كم من الوقت لدي لتأكيد تحويل حصتي إلى Arbitrum؟ +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\\ _ \\ _ \\ \* يجب تأكيد معاملتك لإتمام تحويل الحصة على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقدان الحصة. +### هل هناك ضريبة على التفويض؟ -## What if I have open allocations? +لا ، يتم تفويض التوكن المستلمة على L2 إلى المفهرس المحدد نيابة عن المفوض المحدد دون فرض ضريبة التفويض. -إذا كنت لا ترسل كل حصصك، فإن أداة نقل L2 ستتحقق من أن الحد الأدنى 100 ألف GRT لا يزال في شبكة Ethereum mainnet وأن حصتك المتبقية وتفويضك كافيان لتغطية أي تخصيصات مفتوحة. قد تحتاج إلى إغلاق التخصيصات المفتوحة إذا كان رصيد GRT الخاص بك لا يغطي الحد الأدنى + المخصصات المفتوحة. +### Will my unrealized rewards be transferred when I transfer my delegation? -## باستخدام أدوات النقل ، هل من الضروري الانتظار 28 يومًا لإلغاء الحصة في Ethereum mainnet قبل التحويل؟ +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -لا ، يمكنك تحويل حصتك إلى L2 على الفور ، ولا داعي لإلغاء حصتك والانتظار قبل استخدام أداة التحويل. لا يسري الانتظار لمدة 28 يومًا إلا إذا كنت ترغب في سحب الحصة إلى محفظتك ، على شبكة Ethereum mainnet أو L2. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## كم من الوقت سيستغرق تحويل حصتي؟ +### Is moving delegations to L2 mandatory? Is there a deadline? -ستستغرق أداة النقل L2 حوالي 20 دقيقة لإكمال تحويل حصتك. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -لا ، لكي يقوم المفوضون بنقل GRT المفوضة إلى Arbitrum ، يجب أن يكون المفهرس الذي يتم التفويض إليه نشطًا في L2. +### I don't see a button to transfer my delegation. Why is that? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation(التفويض) +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + +## Curation Signal(إشارة التنسيق) + +### كيف أنقل إشارة التنسيق الخاص بي؟ + +لنقل التنسيق الخاص بك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ نقل الإشارة على شبكة Ethereum mainnet -## How do I transfer my delegation? +2. حدد عنوان L2 للمنسق \\ \* -To transfer your delegation, you will need to complete the following steps: +3. انتظر 20 دقيقة للتأكيد + +\\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. + +### كيف سأعرف ما إذا كان الرسم البياني الفرعي الذي قمت بعمل إشارة تنسيق عليه قد انتقل إلى L2؟ -1. Initiate delegation transfer on Ethereum mainnet +عند عرض صفحة تفاصيل الرسم البياني الفرعي ، ستعلمك لافتة بأنه تم نقل هذا الرسم البياني الفرعي. يمكنك اتباع التعليمات لنقل إشارة التنسيق الخاص بك. يمكنك أيضًا العثور على هذه المعلومات في صفحة تفاصيل الرسم البياني الفرعي لأي رسم بياني فرعي تم نقله. + +### ماذا لو كنت لا أرغب في نقل إشارة التنسيق الخاص بي إلى L2؟ + +عندما يتم إهمال الرسم البياني الفرعي ، يكون لديك خيار سحب الإشارة. وبالمثل ، إذا انتقل الرسم البياني الفرعي إلى L2 ، فيمكنك اختيار سحب الإشارة في شبكة Ethereum الرئيسية أو إرسال الإشارة إلى L2. + +### كيف أعرف أنه تم نقل إشارة التنسيق بنجاح؟ + +يمكن الوصول إلى تفاصيل الإشارة عبر Explorer بعد حوالي 20 دقيقة من بدء أداة النقل للـ L2. + +### هل يمكنني نقل إشاة التنسيق الخاص بي على أكثر من رسم بياني فرعي في وقت واحد؟ + +لا يوجد خيار كهذا حالياً. + +## Indexer Stake(حصة المفهرس) + +### كيف يمكنني تحويل حصتي إلى Arbitrum؟ + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + + +لتحويل حصتك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ تحويل الحصص على شبكة Ethereum mainnet 2. انتظر 20 دقيقة للتأكيد -3. Confirm delegation transfer on Arbitrum +3. Confirm stake transfer on Arbitrum -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## ماذا يحدث لمكافآتي إذا بدأت عملية تحويل وكان لا يزال التخصيص مفتوحا على Ethereum mainnet؟ +### هل سيتم تحويل حصتي بالكامل؟ -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +يمكنك اختيار مقدار حصتك المراد تحويلها. إذا اخترت تحويل حصتك بالكامل مرة واحدة ، فستحتاج إلى إغلاق أي تخصيصات مفتوحة أولاً. -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +إذا كنت تخطط لنقل أجزاء من حصتك في معاملات متعددة ، فيجب عليك دائمًا تحديد نفس عنوان المستفيد. + +ملاحظة: يجب أن تفي بالحد الأدنى من متطلبات الحصة على L2 في المرة الأولى التي تستخدم فيها أداة التحويل. يجب أن يرسل المفهرسون 100 ألف GRT كحد أدنى (عند استدعاء هذه الوظيفة في المرة الأولى). في حالة ترك جزء من الحصة على L1 ، يجب أن يكون أيضًا أكثر من 100 ألف GRT كحد أدنى وأن يكون كافيًا (جنبًا إلى جنب مع التفويضات) لتغطية مخصصاتك المفتوحة. -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +### كم من الوقت لدي لتأكيد تحويل حصتي إلى Arbitrum؟ -## Do Delegators have the option to delegate to another Indexer? +\\ _ \\ _ \\ \* يجب تأكيد معاملتك لإتمام تحويل الحصة على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقدان الحصة. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### ماذا لو كان لدي تخصيصات مفتوحة؟ -## ماذا لو لم أتمكن من العثور على المفهرس الذي قمت بالتوفيض إليه في L2؟ +إذا كنت لا ترسل كل حصصك، فإن أداة نقل L2 ستتحقق من أن الحد الأدنى 100 ألف GRT لا يزال في شبكة Ethereum mainnet وأن حصتك المتبقية وتفويضك كافيان لتغطية أي تخصيصات مفتوحة. قد تحتاج إلى إغلاق التخصيصات المفتوحة إذا كان رصيد GRT الخاص بك لا يغطي الحد الأدنى + المخصصات المفتوحة. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### باستخدام أدوات النقل ، هل من الضروري الانتظار 28 يومًا لإلغاء الحصة في Ethereum mainnet قبل التحويل؟ -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +لا ، يمكنك تحويل حصتك إلى L2 على الفور ، ولا داعي لإلغاء حصتك والانتظار قبل استخدام أداة التحويل. لا يسري الانتظار لمدة 28 يومًا إلا إذا كنت ترغب في سحب الحصة إلى محفظتك ، على شبكة Ethereum mainnet أو L2. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### كم من الوقت سيستغرق تحويل حصتي؟ -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +ستستغرق أداة النقل L2 حوالي 20 دقيقة لإكمال تحويل حصتك. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### هل يجب أن أقوم بالفهرسة على Arbitrum قبل أن أنقل حصتي؟ -## Can my rewards be negatively impacted if I do not transfer my delegation? +يمكنك تحويل حصتك بشكل فعال أولاً قبل إعداد الفهرسة ، ولكن لن تتمكن من المطالبة بأي مكافآت على L2 حتى تقوم بتخصيصها لـ subgraphs على L2 وفهرستها وعرض POIs. -It is anticipated that all network participation will move to Arbitrum One in the future. +### هل يستطيع المفوضون نقل تفويضهم قبل نقل indexing stake الخاص بي؟ -## How long does it take to complete the transfer of my delegation to L2? +لا ، لكي يقوم المفوضون بنقل GRT المفوضة إلى Arbitrum ، يجب أن يكون المفهرس الذي يتم التفويض إليه نشطًا في L2. -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -## Is there any delegation tax? +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -## Vesting Contract Transfer +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -## How do I transfer my vesting contract? +## نقل Vesting Contract(عقد الاستحقاق) -To transfer your vesting, you will need to complete the following steps: +### كيف أنقل عقد الاستحقاق الخاص بي؟ -1. Initiate the vesting transfer on Ethereum mainnet +لتحويل استحقاقك ، ستحتاج إلى إكمال الخطوات التالية: + +1. ابدأ تحويل الاستحقاق على شبكة Ethereum mainnet 2. انتظر 20 دقيقة للتأكيد -3. Confirm vesting transfer on Arbitrum +3. قم بالتأكيد على نقل الاستحقاق على Arbitrum + +### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا جزئيًا فقط؟ -## How do I transfer my vesting contract if I am only partially vested? + -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) 2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. 3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. -4. Withdraw any remaining ETH from the transfer tool contract +4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) + +### كيف يمكنني تحويل عقد الاستحقاق الخاص بي إذا كنت مخولًا بالكامل؟ -## How do I transfer my vesting contract if I am fully vested? + -For those that are fully vested, the process is similar: +بالنسبة لمن يتمتعون بكامل الصلاحيات ، فإن العملية مماثلة: -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +1. قم بإيداع بعض ETH في عقد أداة النقل (يمكن أن تساعد واجهة المستخدم في تقدير مبلغ معقول) -2. Set your L2 address with a call to the transfer tool contract +2. قم بتعيين عنوان L2 الخاص بك من خلال استدعاء عقد أداة النقل -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +3. أرسل حصتك / تفويضك إلى L2 من خلال وظائف أداة التحويل "locked" في L1 Staking contract. -4. Withdraw any remaining ETH from the transfer tool contract +4. سحب أي ETH متبقي من عقد أداة النقل(transfer tool contract) -## Can I transfer my vesting contract to Arbitrum? +### هل يمكنني نقل عقد الاستحقاق الخاص بي إلى Arbitrum؟ -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +يمكنك تحويل رصيد GRT الخاص بعقد الاستحقاق الخاص بك إلى عقد استحقاق آخر على L2. هذا شرط أساسي لنقل الحصة أو التفويض من عقد الاستحقاق الخاص بك إلى L2. يجب أن يحتوي عقد الاستحقاق على مبلغ غير صفري من GRT (يمكنك تحويل مبلغ صغير إليه مثل 1 GRT إذا لزم الأمر). -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +عندما تقوم بتحويل GRT من عقد الاستحقاق L1 الخاص بك إلى L2 ، يمكنك اختيار المبلغ الذي تريد إرساله ويمكنك القيام بذلك عدة مرات. سيتم بدء عقد الاستحقاق على L2 في المرة الأولى التي تقوم فيها بتحويل GRT. تتم عمليات النقل باستخدام أداة النقل(Transfer Tool) التي ستكون مرئية في ملف تعريف Explorer الخاص بك عند الاتصال بحساب عقد الاستحقاق. @@ -256,27 +334,27 @@ When you transfer GRT from your L1 vesting contract to L2, you can choose the am إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. -## أنا أستخدم عقد الاستحقاق الخاص بي للقيام بالتخزين (staking) في mainnet. هل يمكنني تحويل حصتي إلى Arbitrum؟ +### أنا أستخدم عقد الاستحقاق الخاص بي للقيام بالتخزين (staking) في mainnet. هل يمكنني تحويل حصتي إلى Arbitrum؟ نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل الحصة بحيث تكون مملوكة لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعض رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك تحويل حصتك إلى أي عنوان على L2 ، ولكن يجب عليك تعيينها مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### أنا أستخدم عقد الاستحقاق الخاص بي للتفويض على mainnet. هل يمكنني نقل تفويضاتي إلى Arbitrum؟ -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +نعم ، ولكن إذا كان عقدك لا يزال مستحقًا ، فيمكنك فقط نقل التفويض بحيث يكون مملوكًا لعقد الاستحقاق L2 الخاص بك. يجب أولاً تهيئة عقد L2 هذا عن طريق تحويل بعضا من رصيد GRT باستخدام أداة تحويل عقد الاستحقاق في Explorer. إذا كان عقدك مخولًا بالكامل ، فيمكنك نقل تفويضك إلى أي عنوان في L2 ، ولكن يجب عليك تعيينه مسبقًا وإيداع بعض ETH لأداة التحويل L2 لدفع ثمن غاز L2. -## هل يمكنني تحديد مستفيد مختلف لعقد الاستحقاق الخاص بي على L2؟ +### هل يمكنني تحديد مستفيد مختلف لعقد الاستحقاق الخاص بي على L2؟ نعم ، في المرة الأولى التي تقوم فيها بتحويل رصيد وإعداد عقد استحقاق L2 ، يمكنك تحديد مستفيد من L2. تأكد من أن هذا المستفيد عبارة عن محفظة يمكنها إجراء المعاملات على Arbitrum One ، يجب أن تكون EOA أو multisig تم نشرها على Arbitrum One. -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +إذا كان عقدك مخولًا بالكامل ، فلن تقوم بإعداد عقد استحقاق على L2 ؛ بدلاً من ذلك ، ستقوم بتعيين عنوان محفظة L2 وستكون هذه هي المحفظة المستلمة لحصتك أو تفويضك في Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### عقدي مخول بالكامل. هل يمكنني نقل حصتي أو تفويضي إلى عنوان آخر ليس عقداً استحقاقيا على L2؟ -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +نعم. إذا لم تقم بتحويل أي رصيد من عقود الاستحقاق إلى L2 ، وكان عقد الاستحقاق الخاص بك مخولًا بالكامل ، فلا يجب عليك تحويل عقد الاستحقاق الخاص بك إلى L2. بدلاً من ذلك ، يمكنك استخدام أدوات التحويل لتعيين عنوان محفظة L2 ، وتحويل حصتك أو تفويضك مباشرةً إلى هذه المحفظة العادية على L2. -This allows you to transfer your stake or delegation to any L2 address. +هذا يسمح لك بتحويل حصتك أو تفويضك إلى أي عنوان L2. -## عقد الاستحقاق الخاص بي لا يزال مستحقًا. كيف أقوم بتحويل رصيد عقد الاستحقاق الخاص بي إلى L2؟ +### عقد الاستحقاق الخاص بي لا يزال مستحقًا. كيف أقوم بتحويل رصيد عقد الاستحقاق الخاص بي إلى L2؟ تنطبق هذه الخطوات فقط إذا كان عقدك لا يزال مستحقًا ، أو إذا كنت قد استخدمت هذه العملية من قبل عندما كان عقدك لا يزال مستحقًا. @@ -296,20 +374,38 @@ This allows you to transfer your stake or delegation to any L2 address. \\ \* إذا لزم الأمر -أنت تستخدم عنوان عقد. -\\ _ \\ _ \\ _ \\ _ يجب تأكيد معاملتك لإتمام تحويل الرصيد على Arbitrum. يجب إكمال هذه الخطوة في غضون 7 أيام وإلا فقد يتم فقد الرصيد. في معظم الحالات ، سيتم تشغيل هذه الخطوة تلقائيًا ، ولكن قد تكون هناك حاجة إلى تأكيد يدوي إذا كان هناك ارتفاع في أسعار الغاز على Arbitrum. إذا كانت هناك أية مشكلة أثناء هذه العملية ، فستكون هناك موارد للمساعدة: اتصل بالدعم على support@thegraph.com أو على [Discord] \(https://discord.gg/graphprotocol). +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. -## هل يمكنني إرجاع عقد الاستحقاق إلى L1؟ +### هل يمكنني إرجاع عقد الاستحقاق إلى L1؟ ليست هناك حاجة للقيام بذلك لأن عقد الاستحقاق الخاص بك لا يزال في L1. عندما تستخدم أدوات التحويل ، فأنت تقوم فقط بإنشاء عقد جديد في L2 مرتبط بعقد الاستحقاق L1 الخاص بك ، ويمكنك إرسال GRT ذهابًا وإيابًا بينهما. -## لماذا أحتاج إلى تغيير عقد الاستحقاق الخاص بي من البداية؟ +### لماذا أحتاج إلى تغيير عقد الاستحقاق الخاص بي من البداية؟ -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +يجب عليك إعداد عقد استحقاق L2 حتى يتمكن هذا الحساب من امتلاك حصتك أو تفويضك في L2. وإلا ، لن يكون هناك وسيلة لك لنقل الحصة / التفويض إلى L2 دون "الهروب" من عقد الاستحقاق. -## ماذا يحدث إذا حاولت سحب عقدي عندما لم يتم تنفيذه بالكامل؟هل هذا ممكن؟ +### ماذا يحدث إذا حاولت سحب عقدي عندما لم يتم تنفيذه بالكامل؟هل هذا ممكن؟ هذا ليس احتمال. يمكنك إعادة الأموال إلى L1 وسحبها هناك. -## ماذا لو لم أرغب في نقل عقد الاستحقاق الخاص بي إلى L2؟ +### ماذا لو لم أرغب في نقل عقد الاستحقاق الخاص بي إلى L2؟ -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +يمكنك الاستمرار في التخزين / التفويض على L1. بمرور الوقت ، قد ترغب في التفكير في الانتقال إلى L2 لتمكين المكافآت هناك حيث يتوسع البروتوكول في Arbitrum. لاحظ أن أدوات التحويل هذه مخصصة لمنح العقود المسموح لها بالمشاركة والتفويض في البروتوكول. إذا كان عقدك لا يسمح بالتخزين أو التفويض ، أو كان قابلاً للإلغاء ، فلا توجد أداة نقل متاحة. ستظل قادرًا على سحب GRT من L1 عندما يكون ذلك متاحًا. diff --git a/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx index 6ac37ef04e01..6b4031767fb4 100644 --- a/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ar/arbitrum/l2-transfer-tools-guide.mdx @@ -2,23 +2,23 @@ title: دليل أدوات نقل(Transfer Tools) L2 --- -> لم يتم إصدار أدوات نقل الطبقة الثانية L2 حتى الآن. من المتوقع أن تكون متاحة في صيف عام 2023. - جعل The Graph من السهل الانتقال إلى L2 على Arbitrum One. لكل مشارك في البروتوكول ، توجد مجموعة من أدوات نقل L2 لجعل النقل إلى L2 سلسًا لجميع المشاركين في الشبكة. ستطلب منك هذه الأدوات اتباع مجموعة محددة من الخطوات بناءً على ما تقوم بنقله. بعض الأسئلة المتكررة حول هذه الأدوات تمت الإجابة عليها في [الأسئلة الشائعة حول أدوات نقل الطبقة الثانية] \(/arbitrum/l2-transfer-tools-faq). تحتوي الأسئلة الشائعة على تفسيرات متعمقة لكيفية استخدام الأدوات وكيفية عملها والأمور التي يجب وضعها في الاعتبار عند إستخدامها. ## كيف تنقل الغراف الفرعي الخاص بك إلى شبكة آربترم (الطبقة الثانية) -## فوائد نقل الرسوم البيانية الفرعية الخاصة بك + + +## فوائد نقل الغراف الفرعي الخاصة بك -مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. ترث آربترم، سلسلة كتل من الطبقة الثانية أو "L2"، الأمان من سلسلة إيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ جذري. +مجتمع الغراف والمطورون الأساسيون كانوا [يستعدون] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) للإنتقال إلى آربترم على مدى العام الماضي. وتعتبر آربترم سلسلة كتل من الطبقة الثانية أو "L2"، حيث ترث الأمان من سلسلة الإيثيريوم ولكنها توفر رسوم غازٍ أقل بشكلٍ كبير. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +عندما تقوم بنشر أو ترقية الغرافات الفرعية الخاصة بك إلى شبكة الغراف، فأنت تتفاعل مع عقودٍ ذكيةٍ في البروتوكول وهذا يتطلب دفع رسوم الغاز باستخدام عملة الايثيريوم. من خلال نقل غرافاتك الفرعية إلى آربترم، فإن أي ترقيات مستقبلية لغرافك الفرعي ستتطلب رسوم غازٍ أقل بكثير. الرسوم الأقل، وكذلك حقيقة أن منحنيات الترابط التنسيقي على الطبقة الثانية مستقيمة، تجعل من الأسهل على المنسِّقين الآخرين تنسيق غرافك الفرعي، ممّا يزيد من مكافآت المفهرِسين على غرافك الفرعي. هذه البيئة ذات التكلفة-الأقل كذلك تجعل من الأرخص على المفهرسين أن يقوموا بفهرسة وخدمة غرافك الفرعي. سوف تزداد مكافآت الفهرسة على آربترم وتتناقص على شبكة إيثيريوم الرئيسية على مدى الأشهر المقبلة، لذلك سيقوم المزيد والمزيد من المُفَهرِسين بنقل ودائعهم المربوطة وتثبيت عملياتهم على الطبقة الثانية. ## فهم ما يحدث مع الإشارة وغرافك الفرعي على الطبقة الأولى وعناوين مواقع الإستعلام -لنقل الرسم البياني الفرعي إلى Arbitrum يتم استخدام جسر Arbitrum GRT ، والذي يستخدم بدوره جسر Arbitrum الأصلي ، لإرسال الرسم البياني الفرعي إلى L2. سيؤدي "النقل" إلى إهمال الرسم البياني الفرعي على الشبكة الرئيسية وإرسال المعلومات لإعادة إنش اء الرسم البياني الفرعي على L2 باستخدام الجسر. وسيشمل أيضًا GRT hالذي تم استخدامه للإشارة ، والذي يجب أن يكون أكثر من صفر حتى يقبل الجسر النقل. +عند نقل سبجراف إلى Arbitrum، يتم استخدام جسر Arbitrum GRT، الذي بدوره يستخدم جسر Arbitrum الأصلي، لإرسال السبجراف إلى L2. سيؤدي عملية "النقل" إلى إهمال السبجراف على شبكة الإيثيريوم الرئيسية وإرسال المعلومات لإعادة إنشاء السبجراف على L2 باستخدام الجسر. ستتضمن أيضًا رصيد GRT المرهون المرتبط بمالك السبجراف، والذي يجب أن يكون أكبر من الصفر حتى يقبل الجسر النقل. عندما تختار نقل الرسم البياني الفرعي ، سيؤدي ذلك إلى تحويل جميع إشارات التنسيق الخاصة بالرسم الفرعي إلى GRT. هذا يعادل "إهمال" الرسم البياني الفرعي على الشبكة الرئيسية. سيتم إرسال GRT المستخدمة لعملية التنسيق الخاصة بك إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي ، حيث سيتم استخدامها لإنتاج الإشارة نيابة عنك. @@ -30,7 +30,7 @@ When you publish or upgrade your subgraph to The Graph Network, you're interacti ## اختيار محفظة L2 الخاصة بك -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +عندما قمت بنشر subgraph الخاص بك على الشبكة الرئيسية ، فقد استخدمت محفظة متصلة لإنشاء subgraph ، وتمتلك هذه المحفظة NFT الذي يمثل هذا subgraph ويسمح لك بنشر التحديثات. عند نقل الرسم البياني الفرعي إلى Arbitrum ، يمكنك اختيار محفظة مختلفة والتي ستمتلك هذا الـ subgraph NFT على L2. @@ -82,7 +82,7 @@ When you published your subgraph on mainnet, you used a connected wallet to crea في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز L2 المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى الرسم البياني الفرعي في عقود Arbitrum. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على Arbitrum إلى فشل هذا التنفيذ التلقائي. في هذه الحالة ، ستكون "التذكرة" التي ترسل مخططك الفرعي إلى L2 معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. -في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة L2 تحتوي بعضاً من ETH على Arbitrum ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Confirm Transfer" لإعادة محاولة المعاملة. +في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة على L2 تحتوي بعضاً من ETH على Arbitrum ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Confirm Transfer" لإعادة محاولة المعاملة. ![تأكيد النقل إلى L2](/img/confirmTransferToL2.png) @@ -110,17 +110,17 @@ When you published your subgraph on mainnet, you used a connected wallet to crea عندما ينقل مالك الرسم البياني الفرعي رسمًا فرعيًا إلى Arbitrum ، يتم تحويل كل إشارات الرسم البياني الفرعي لـ GRT في نفس الوقت. ينطبق هذا على الإشارة "التي تم ترحيلها تلقائيًا" ، وهي الإشارة التي لا ترتبط بنسخة محددة أو نشر محدد للرسم البياني الفرعي ولكنها تتبع أحدث إصدار من الرسم البياني الفرعي. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +هذا التحويل من الإشارة لـ GRT هو نفس ما سيحدث إذا قام مالك الرسم البياني الفرعي بإيقاف الرسم البياني الفرعي في L1. عندما يتم إيقاف الرسم البياني الفرعي أو نقله ، يتم "حرق" جميع إشارات التنسيق في وقت واحد (باستخدام منحنى ربط التنسيق) ويتم الاحتفاظ بـ GRT الناتج بواسطة عقد GNS الذكي (هذا هو العقد الذي يعالج ترقيات الرسم البياني الفرعي ويعالج الإشارة التي تم ترحيلها تلقائيًا). وبالتالي ، فإن كل منسق(Curator) على هذا الرسم البياني الفرعي لديه الحق في المطالبة بـ GRT تتناسب مع كمية الأسهم التي يمتلكها في الرسم البياني الفرعي. جزء من تلك الـ GRT يتم إرسالها لمالك الرسم البياني الفرعي إلى L2 جمباً إلى جمب مع الرسم البياني الفرعي. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +في هذه المرحلة ، لن يتراكم أي رسوم استعلام أخرى على الـ GRT المنسقة ، لذلك يمكن للمنسقين اختيار سحب GRT أو نقلها إلى نفس الرسم البياني الفرعي على L2 ، حيث يمكن استخدامها لاصدار إشارة تنسيق جديدة. لا داعي للعجلة للقيام بذلك حيث يمكن أن يتم الاحتفاظ بـ GRT إلى أجل غير مسمى ويحصل الجميع على كمية تتناسب مع أسهمهم ، بغض النظر عن وقت قيامهم بذلك. ## اختيار محفظة L2 الخاصة بك -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +إذا قررت نقل GRT التي تم تنسيقها إلى L2 ، فيمكنك اختيار محفظة مختلفة والتي ستمتلك إشارة التنسيق على L2. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +إذا كنت تستخدم محفظة "عادية" مثل Metamask (حساب مملوك خارجيًا EOA ، محفظة ليست بعقد ذكي) ، فهذا اختياري ويوصى بالاحتفاظ بعنوان المنسق(Curator) نفسه كما في L1. إذا كنت تستخدم محفظة بعقد ذكي ، مثل multisig (على سبيل المثال Safe) ، فإن اختيار عنوان مختلف لمحفظة L2 أمر إلزامي ، حيث من المرجح أن هذا الحساب موجود فقط على mainnet ولن تكون قادرًا على إجراء المعاملات على Arbitrum باستخدام هذه المحفظة. إذا كنت ترغب في الاستمرار في استخدام محفظة عقد ذكية أو multisig ، فقم بإنشاء محفظة جديدة على Arbitrum واستخدم عنوانها كعنوان محفظة استلام على L2. @@ -128,11 +128,11 @@ If you're using a "regular" wallet like Metamask (an Externally Owned Account or ## إرسال التنسيق إلى L2: الخطوة 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +قبل البدء في عملية النقل ، يجب أن تقرر العنوان الذي سيمتلك التنسيق على L2 (راجع "اختيار محفظة L2 الخاصة بك" أعلاه) ، ويوصى بامتلاك بعضاً من ETH لرسوم الغاز الذي تم توصيله بالفعل على Arbitrum في حالة الحاجة إلى إعادة تنفيذ الرسالة على L2. يمكنك شراء ETH من بعض المنصات وسحبها مباشرة إلى Arbitrum ، أو يمكنك استخدام جسر Arbitrum لإرسال ETH من محفظة mainnet إلى L2: [bridge.arbitrum.io] \(http://bridge.arbitrum.io) - نظرًا لأن رسوم الغاز على Arbitrum منخفضة جدًا ، فإنك ستحتاج فقط إلى مبلغ صغير ، على سبيل المثال من المحتمل أن يكون 0.01 ETH أكثر من كافٍ. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +إذا تم نقل الرسم البياني الفرعي الذي قمت بتنسيقه إلى L2 ، فسترى رسالة على Explorer تخبرك بأنك تقوم بالتنسيق على رسم بياني فرعي تم نقله. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +عند الاطلاع على صفحة الرسم البياني الفرعي ، يمكنك اختيار سحب أو نقل التنسيق. سيؤدي النقر فوق "Transfer Signal to Arbitrum" إلى فتح أداة النقل. ![نقل الإشارة](/img/transferSignalL2TransferTools.png) @@ -140,7 +140,7 @@ When looking at the subgraph page, you can choose to withdraw or transfer the cu إذا قمت بتنفيذ هذه الخطوة ، ** تأكد من المتابعة حتى إكمال الخطوة 3 في أقل من 7 أيام ، وإلا فسيتم فقد إشارة GRT. ** هذا بسبب كيفية عمل رسائل L1-L2 على Arbitrum: الرسائل التي يتم إرسالها عبر الجسر هي "retry-able tickets" يجب تنفيذها في غضون 7 أيام ، وقد يحتاج التنفيذ الأولي إلى إعادة المحاولة إذا كانت هناك ارتفاع في سعر الغاز على Arbitrum. -## Sending curation to L2: step 2 +## إرسال التنسيق إلى L2: الخطوة 2 البدء في عملية النقل: @@ -150,16 +150,16 @@ When looking at the subgraph page, you can choose to withdraw or transfer the cu بمجرد انتهاء وقت الانتظار ، ستحاول Arbitrum تنفيذ النقل تلقائيًا على عقود L2. -![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) +![إرسال إشارة التنسيق إلى L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## إرسال التنسيق إلى L2: الخطوة 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +في معظم الحالات ، سيتم تنفيذ هذه الخطوة تلقائيًا لأن غاز L2 المضمن في الخطوة 1 يجب أن يكون كافيًا لتنفيذ المعاملة التي تتلقى التنسيق في عقود Arbitrum. ومع ذلك ، في بعض الحالات ، من الممكن أن يؤدي ارتفاع أسعار الغاز على Arbitrum إلى فشل هذا التنفيذ التلقائي. في هذه الحالة ، ستكون "التذكرة" التي ترسل تنسيقك إلى L2 معلقة وتتطلب إعادة المحاولة في غضون 7 أيام. في هذا الحالة ، فستحتاج إلى الاتصال باستخدام محفظة على L2 تحتوي بعضاً من ETH على Arbitrum ، وتبديل شبكة محفظتك إلى Arbitrum ، والنقر فوق "Confirm Transfer" لإعادة محاولة المعاملة. ![أرسل إشارة إلى L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## سحب التنسيق الخاص بك على L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +إذا كنت تفضل عدم إرسال GRT إلى L2 ، أو كنت تفضل إنشاء جسر GRT يدويًا ، فيمكنك سحب GRT الذي استخدمته في التنسيق على L1. على اللافتة في صفحة الرسم البياني الفرعي ، اختر "Withdraw Signal" وقم بتأكيد المعاملة ؛ سيتم إرسال GRT إلى عنوان المنسق الخاص بك. diff --git a/website/pages/ar/billing.mdx b/website/pages/ar/billing.mdx index 8c99b8b5126d..763ebdbdaf2a 100644 --- a/website/pages/ar/billing.mdx +++ b/website/pages/ar/billing.mdx @@ -37,8 +37,12 @@ title: الفوترة ### إضافة GRT باستخدام محفظة تشفير + + > تمت كتابة هذا القسم بافتراض أن لديك بالفعل GRT في محفظتك المشفرة ، وأنت على شبكة Ethereum mainnet. إذا لم يكن لديك GRT ، فيمكنك التعرف على كيفية الحصول على GRT [ هنا ](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. انتقل إلى [ صفحة فوترة Subgraph Studio ](https://thegraph.com/studio/billing/). 2. انقر على زر "توصيل المحفظة" في الزاوية اليمنى العليا من الصفحة. ستتم إعادة توجيهك إلى صفحة اختيار المحفظة. حدد محفظتك وانقر على "توصيل". @@ -71,6 +75,8 @@ title: الفوترة ### إضافة GRT باستخدام محفظة متعددة التوقيع (multisig wallet) + + 1. انتقل إلى [ صفحة فوترة Subgraph Studio](https://thegraph.com/studio/billing/). 2. انقر على زر "توصيل المحفظة " في الزاوية اليمنى العليا من الصفحة. حدد محفظتك وانقر على "توصيل". إذا كنت تستخدم [ Gnosis-Safe ](https://gnosis-safe.io/) ، فستتمكن من توصيل multisig بالإضافة إلى محفظة التوقيع الخاصة بك. ثم قم بتوقيع الرسالة المرتبطة. هذا لن يكلف أي غاز. @@ -101,7 +107,7 @@ This section will show you how to get GRT to pay for query fees. ### Coinbase -سيكون هذا دليلًا تفصيليًا لشراء GRT على Coinbase. +This will be a step by step guide for purchasing GRT on Coinbase. 1. انتقل إلى [ Coinbase ](https://www.coinbase.com/) وأنشئ حسابًا. 2. بمجرد إنشاء حساب ، ستحتاج إلى التحقق من هويتك من خلال عملية تعرف على العميل المعروفة باسم KYC. هذه إجرائات روتينية لجميع منصات تداول العملات المشفرة المركزية أو المحافظ الخاصة. @@ -117,11 +123,11 @@ This section will show you how to get GRT to pay for query fees. - أدخل مبلغ GRT الذي تريد إرساله وعنوان المحفظة الذي تريد الإرسال إليه. - انقر على "متابعة" وقم بتأكيد معاملتك. -يرجى ملاحظة أنه بالنسبة لمبالغ الشراء الكبيرة ، قد يطلب منك Coinbase الانتظار من 7 إلى 10 أيام قبل تحويل المبلغ بالكامل إلى محفظة تشفير. -يمكنك معرفة المزيد حول الحصول على GRT على Coinbase [ هنا ](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i- buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -سيكون هذا دليلًا تفصيليًا لشراء GRT على Binance. +This will be a step by step guide for purchasing GRT on Binance. 1. انتقل إلى [ Binance ](https://www.binance.com/en) وأنشئ حسابًا. 2. بمجرد إنشاء حساب ، ستحتاج إلى التحقق من هويتك من خلال عملية تعرف باسم KYC (أو اعرف عميلك). هذا إجراء روتيني لجميع المنصات المركزية أو المحافظ الخاصه. @@ -137,11 +143,11 @@ This section will show you how to get GRT to pay for query fees. - أدخل كمية GRT الذي تريد إرساله وعنوان المحفظة الموجودة في القائمة البيضاء الذي تريد إرساله إليه. - انقر على "متابعة" وقم بتأكيد معاملتك. -يمكنك معرفة المزيد حول الحصول على GRT على Binance [ هنا ](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -هذه هي الطريقة التي يمكنك بها شراء GRT على Uniswap. +This is how you can purchase GRT on Uniswap. 1. انتقل إلى [ Uniswap ](https://app.uniswap.org/#/swap) وقم بتوصيل محفظتك. 2. حدد التوكن الذي ترغب في استبداله. حدد ETH. @@ -151,8 +157,52 @@ This section will show you how to get GRT to pay for query fees. 5. انقر على زر "مبادلة". 6. قم بتأكيد المعاملة في محفظتك وانتظر حتى تتم المعالجة. -يمكنك التعرف على المزيد حول الحصول على GRT على Uniswap [ هنا ](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. انتقل إلى [ Coinbase ](https://www.coinbase.com/) وأنشئ حسابًا. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - انقر على "متابعة" وقم بتأكيد معاملتك. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. انتقل إلى [ Binance ](https://www.binance.com/en) وأنشئ حسابًا. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - انقر على "متابعة" وقم بتأكيد معاملتك. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## جسر Arbitrum -تم تصميم عقد الفوترة فقط لربط GRT من شبكة Ethereum mainnet إلى شبكة Arbitrum. إذا كنت ترغب في نقل GRT من Arbitrum مرة أخرى إلى Ethereum mainnet ، فستحتاج إلى استخدام [ Arbitrum Bridge ](https://bridge.arbitrum.io/؟l2ChainId=42161). +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/ar/chain-integration-overview.mdx b/website/pages/ar/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/ar/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ar/cookbook/arweave.mdx b/website/pages/ar/cookbook/arweave.mdx index 696f94fbdf23..24eb6fe6bdda 100644 --- a/website/pages/ar/cookbook/arweave.mdx +++ b/website/pages/ar/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -46,7 +46,7 @@ The requirements for Arweave subgraphs are covered by the [existing documentatio This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. -أثناء تطوير الـ subgraph ، هناك أمران رئيسيان: +During subgraph development there are two key commands: ``` $ graph codegen # generates types from the schema file identified in the manifest @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ar/cookbook/grafting.mdx b/website/pages/ar/cookbook/grafting.mdx index b2b81368f2a1..3ceb3758235c 100644 --- a/website/pages/ar/cookbook/grafting.mdx +++ b/website/pages/ar/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/ar/cookbook/near.mdx b/website/pages/ar/cookbook/near.mdx index cb79d02771e8..40e13f93cbd1 100644 --- a/website/pages/ar/cookbook/near.mdx +++ b/website/pages/ar/cookbook/near.mdx @@ -41,7 +41,7 @@ title: بناء Subgraphs على NEAR **AssemblyScript Mappings:** [AssemblyScript code](/developing/assemblyscript-api) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. -أثناء تطوير الـ subgraph ، هناك أمران رئيسيان: +During subgraph development there are two key commands: ```bash $ graph codegen # generates types from the schema file identified in the manifest @@ -277,7 +277,7 @@ accounts: ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## المراجع diff --git a/website/pages/ar/cookbook/upgrading-a-subgraph.mdx b/website/pages/ar/cookbook/upgrading-a-subgraph.mdx index 208d84e7a49e..e093ce7b1c5f 100644 --- a/website/pages/ar/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ar/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### المتطلبات الأساسية - You have already deployed a subgraph on the hosted service. -- القراف الفرعي يقوم بفهرسة سلسلة متوفرة (أو متوفرة في النسخة التجريبية) على شبكة القراف. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. @@ -19,7 +19,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t > You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). -1. احصل على أحدث إصدار من graph-cli المثبت: +1. Get the latest version of the graph-cli installed: ```sh npm install -g @graphprotocol/graph-cli @@ -153,8 +153,8 @@ If you would like to update an existing subgraph on the network, you can do this graph deploy --studio ``` -3. اختبر الإصدار الجديد في Subgraph Studio من خلال الاستعلام في الـ playground -4. انشر الإصدار الجديد على شبكة The Graph. تذكر أن هذا يتطلب غاز (كما هو موضح في القسم أعلاه). +3. Test the new version in the Subgraph Studio by querying in the playground +4. Publish the new version on The Graph Network. Remember that this requires gas (as described in the section above). ### Owner Update Fee: Deep Dive @@ -164,23 +164,23 @@ An update requires GRT to be migrated from the old version of the subgraph to th The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. -كمثال على ذلك ، هذه هي الحالة فقط إذا كان الـ subgraph الخاص بك يتم تنسيقه بشكل نشط: +Let's make an example, this is only the case if your subgraph is being actively curated on: -- تتم الإشارة بـ 100،000 GRT باستخدام الترحيل التلقائي في v1 لـ subgraph +- 100,000 GRT is signaled using auto-migrate on v1 of a subgraph - Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned - The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. _While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ -### الحفاظ على إصدار مستقر من Subgraph +### Maintaining a Stable Version of a Subgraph If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. -### تحديث البيانات الوصفية (Metadata) لـ Subgraph +### Updating the Metadata of a Subgraph -يمكنك تحديث البيانات الوصفية لـ subgraphs الخاص بك دون الحاجة إلى نشر إصدار جديد. تتضمن البيانات الوصفية اسم الـ subgraph والصورة والوصف و URL لموقع الويب و URL كود المصدر والفئات. يمكن للمطورين القيام بذلك عن طريق تحديث تفاصيل الـ subgraph الخاصة بهم في Subgraph Studio حيث يمكنك تعديل جميع الحقول الملائمة. +You can update the metadata of your subgraphs without having to publish a new version. The metadata includes the subgraph name, image, description, website URL, source code URL, and categories. Developers can do this by updating their subgraph details in the Subgraph Studio where you can edit all applicable fields. Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. diff --git a/website/pages/ar/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ar/deploying/deploying-a-subgraph-to-studio.mdx index c82b2baa4813..d0b78a80f940 100644 --- a/website/pages/ar/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ar/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: نشر Subgraph في Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/ar/deploying/hosted-service.mdx b/website/pages/ar/deploying/hosted-service.mdx index 5870269c55d1..2dc9334a219c 100644 --- a/website/pages/ar/deploying/hosted-service.mdx +++ b/website/pages/ar/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ar/deploying/subgraph-studio.mdx b/website/pages/ar/deploying/subgraph-studio.mdx index 8bc6e4c311d3..020b33dfeb62 100644 --- a/website/pages/ar/deploying/subgraph-studio.mdx +++ b/website/pages/ar/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. سجّل الدخول باستخدام محفظتك - يمكنك القيام بذلك عبر MetaMask أو WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## كيف تنشئ الـ Subgraph الخاص بك في Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -هذا أفضل جزء! عندما تنشئ subgraph لأول مرة ، سيتم توجيهك لملء: - -- اسم الـ Subgraph الخاص بك -- صورة -- الوصف -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- الموقع إلكتروني + ## توافق الـ Subgraph مع شبكة The Graph diff --git a/website/pages/ar/developing/creating-a-subgraph.mdx b/website/pages/ar/developing/creating-a-subgraph.mdx index 6c14c2c2c543..464e74166db3 100644 --- a/website/pages/ar/developing/creating-a-subgraph.mdx +++ b/website/pages/ar/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: الإدخالات الهامة لتحديث manifest هي: -- `description`: وصف يمكن قراءته لماهية الـ subgraph. يتم عرض هذا الوصف بواسطة Graph Explorer عند نشر الـ subgraph على الـ Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ dataSources: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: ملف ABI واحد أو أكثر لعقد المصدر بالإضافة إلى العقود الذكية الأخرى والتي تتفاعل معها من داخل الـ mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### الفلاتر المدعومة +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### دالة الـ Mapping The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting على Subgraphs موجودة +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/ar/developing/developer-faqs.mdx b/website/pages/ar/developing/developer-faqs.mdx index da3006ee099a..94efea905584 100644 --- a/website/pages/ar/developing/developer-faqs.mdx +++ b/website/pages/ar/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/ar/developing/graph-ts/api.mdx b/website/pages/ar/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..e7893f15fbb3 --- /dev/null +++ b/website/pages/ar/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +هذه الصفحة توثق APIs المضمنة التي يمكن استخدامها عند كتابة subgraph mappings. يتوفر نوعان من APIs خارج الصندوق: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## مرجع API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- الأوامر الأساسية منخفضة المستوى للترجمة بين أنظمة الأنواع المختلفة مثل Ethereum و JSON و GraphQL و AssemblyScript. + +### إصدارات + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| الاصدار | ملاحظات الإصدار | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### الأنواع المضمنة (Built-in) + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +'import { ByteArray } from '@graphprotocol/graph-ts +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +'import { BigDecimal } from '@graphprotocol/graph-ts +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +'import { BigInt } from '@graphprotocol/graph-ts +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +'import { TypedMap } from '@graphprotocol/graph-ts +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +'import { Bytes } from '@graphprotocol/graph-ts +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### العنوان + +```typescript +'import { Address } from '@graphprotocol/graph-ts +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### مخزن API + +```typescript +'import { store } from '@graphprotocol/graph-ts +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### إنشاء الكيانات + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### تحميل الكيانات من المخزن + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### تحديث الكيانات الموجودة + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### إزالة الكيانات من المخزن + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### API إيثيريوم + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### دعم أنواع الإيثيريوم + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### الأحداث وبيانات الكتلة/ الإجراء + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### الوصول إلى حالة العقد الذكي Smart Contract + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### معالجة الاستدعاءات المعادة + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### تشفير/فك تشفير ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### تسجيل قيمة واحدة أو أكثر + +##### تسجيل قيمة واحدة + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### تسجيل إدخال واحد من مصفوفة موجودة + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### تسجيل إدخالات متعددة من مصفوفة موجودة + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### تسجيل إدخال محدد من مصفوفة موجودة + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### تسجيل معلومات الحدث + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +'import { ipfs } from '@graphprotocol/graph-ts +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +'import { crypto } from '@graphprotocol/graph-ts +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +'import { json, JSONValueKind } from '@graphprotocol/graph-ts +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### مرجع تحويلات الأنواع + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### البيانات الوصفية لمصدر البيانات + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### الكيان و DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ar/developing/graph-ts/common-issues.mdx b/website/pages/ar/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..b8af1bf4d691 --- /dev/null +++ b/website/pages/ar/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: مشاكل شائعة في أسمبلي سكريبت (AssemblyScript) +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- متغيرات الفئات الخاصة (Private) في [AssembyScript] \(https://www.assemblyscript.org/status.html#language-features) غير مفروضة بشكل كامل. ليس هناك طريقة لحماية متغيرات الفئات من التعديل المباشر من كائن الفئة. +- لا يتم توريث النطاق في [دوال الإغلاق](https://www.assemblyscript.org/status.html#on-closures)، أي لا يمكن استخدام المتغيرات المعلنة خارج دوال الإغلاق. الشرح في [ النقاط الهامة للمطورين #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ar/developing/supported-networks.mdx b/website/pages/ar/developing/supported-networks.mdx index b8fe4a4e2b58..8df633d1d26c 100644 --- a/website/pages/ar/developing/supported-networks.mdx +++ b/website/pages/ar/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/ar/firehose.mdx b/website/pages/ar/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/ar/firehose.mdx +++ b/website/pages/ar/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ar/glossary.mdx b/website/pages/ar/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/ar/glossary.mdx +++ b/website/pages/ar/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ar/graphcast.mdx b/website/pages/ar/graphcast.mdx index d1de65171b9a..4965e86446ab 100644 --- a/website/pages/ar/graphcast.mdx +++ b/website/pages/ar/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/ar/index.json b/website/pages/ar/index.json index dcb5b59f7bb9..358d7708f4e8 100644 --- a/website/pages/ar/index.json +++ b/website/pages/ar/index.json @@ -23,8 +23,8 @@ "description": "استخدم Studio لإنشاء subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "ترحيل ال Subgraphs إلى شبكة TheGraph" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "الخدمة المستضافة (Hosted Service)", - "description": "قم بإنشاء واستكشاف ال Subgraphs على ال Hosted service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "الشبكات المدعومة", - "description": "يدعم TheGraph الشبكات التالية على شبكة TheGraph و Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "الخدمة المستضافة", - "betaWarning": "الشبكة في مرحلة beta. استخدم بحذر." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ar/mips-faqs.mdx b/website/pages/ar/mips-faqs.mdx index 97c4cd412c37..dfbc9049c656 100644 --- a/website/pages/ar/mips-faqs.mdx +++ b/website/pages/ar/mips-faqs.mdx @@ -1,9 +1,11 @@ --- -title: الاسئلة الشائعة حول MIPs +title: MIPs FAQs --- ## مقدمة +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). @@ -15,18 +17,18 @@ The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to r ### Useful Resources - [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [كيف تصبح مفهرسًا فعالًا على شبكة الغراف] \(https://thegraph.com/blog/how-to-become-indexer/) -- [مركز معرفة المفهرس:ستجد هناك معلومات حول المفهرسين] \(https://thegraph.academy/indexers/) -- [مُحسِّن التخصيص] \(https://github.com/graphprotocol/allocationopt.jl) -- [أدوات تحسين التخصيص] \(https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) +- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) +- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) ### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -نعم ، يمكنك ذلك. +Yes, it is indeed. For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -قام أحد أعضاء المجتمع ،ويدعى [SunTzu] \(https://github.com/suntzu93) ، بإنشاء نص برمجي يقوم بتنفيذ هذه العملية وفقًا لمنهجية مخطط التحكيم. يمكنك الاطلاع عليها من [here](https://github.com/suntzu93/get_valid_poi_subgraph). +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). ### 2. Which chain will the MIPs program incentivise first? diff --git a/website/pages/ar/network/benefits.mdx b/website/pages/ar/network/benefits.mdx index 13a4da8d42e6..a54a14a768c9 100644 --- a/website/pages/ar/network/benefits.mdx +++ b/website/pages/ar/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/ar/network/explorer.mdx b/website/pages/ar/network/explorer.mdx index ff709fc8f4b0..30ef685d8b1c 100644 --- a/website/pages/ar/network/explorer.mdx +++ b/website/pages/ar/network/explorer.mdx @@ -74,7 +74,7 @@ If you want to learn more about the Curator role, you can do so by visiting the يلعب المفوضون دورا رئيسيا في الحفاظ على الأمن واللامركزية في شبكة The Graph. يشاركون في الشبكة عن طريق تفويض (أي ، "Staking") توكن GRT إلى مفهرس واحد أو أكثر. بدون المفوضين، من غير المحتمل أن يربح المفهرسون مكافآت ورسوم مجزية. لذلك ، يسعى المفهرسون إلى جذب المفوضين من خلال منحهم جزءا من مكافآت الفهرسة ورسوم الاستعلام التي يكسبونها. -يقوم المفوضون بدورهم باختيار المفهرسين بناء على عدد من المتغيرات المختلفة ، مثل الأداء السابق ، ومعدلات مكافأة الفهرسة ، واقتطاع رسوم الاستعلام query fee cuts. يمكن أن تلعب السمعة داخل المجتمع دورا في هذا! يوصى بالتواصل مع المفهرسين المختارين عبر [ The Graph's Discord ](https://discord.gg/graphprotocol) أو [ منتدى The Graph ](https://forum.thegraph.com/)! +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![صورة المستكشف 7](/img/Delegation-Overview.png) diff --git a/website/pages/ar/network/indexing.mdx b/website/pages/ar/network/indexing.mdx index 9f3d7ecd476c..abe53eae2f89 100644 --- a/website/pages/ar/network/indexing.mdx +++ b/website/pages/ar/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Allocations are continuously accruing rewards while they're active and allocated ### ما هي خصومات رسوم الاستعلام ومتى يتم توزيعها؟ -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- ** مُطالب به ** - هي الحالة النهائية للتخصيص ؛ وهي التي سلكت مجراها كمخصصة نشطة ، وتم توزيع جميع المكافآت المؤهلة وتمت المطالبة بخصومات رسوم الاستعلام. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ar/new-chain-integration.mdx b/website/pages/ar/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/ar/new-chain-integration.mdx +++ b/website/pages/ar/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ar/operating-graph-node.mdx b/website/pages/ar/operating-graph-node.mdx index ac8e5046131d..646ec2d5dffd 100644 --- a/website/pages/ar/operating-graph-node.mdx +++ b/website/pages/ar/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/ar/publishing/publishing-a-subgraph.mdx b/website/pages/ar/publishing/publishing-a-subgraph.mdx index 1fce064a595e..89aec5bee958 100644 --- a/website/pages/ar/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ar/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -للحصول على إرشادات حول كيفية نشر subgraph على الشبكة اللامركزية ، راجع [ هذا الفيديو ](https://youtu.be/HfDgC2oNnwo؟t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ar/querying/querying-the-hosted-service.mdx b/website/pages/ar/querying/querying-the-hosted-service.mdx index 937537ae30b9..5cac9c5c607d 100644 --- a/website/pages/ar/querying/querying-the-hosted-service.mdx +++ b/website/pages/ar/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -مع نشر الـ subgraph ، قم بزيارة [Hosted Service](https://thegraph.com/hosted-service/) لفتح واجهة [GraphiQL](https://github.com/graphql/graphiql) حيث يمكنك استكشاف GraphQL API المنشور لـ subgraph عن طريق إصدار الاستعلامات وعرض المخطط. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ An example is provided below, but please see the [Query API](/querying/graphql-a } ``` -## Using The Hosted Service +## Using the hosted service -يُعد Graph Explorer و GraphQL playground الخاص به طريقة مفيدة لاستكشاف والاستعلام عن الـ subgraphs المنشورة على Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. بعض الميزات الرئيسية مفصلة أدناه: diff --git a/website/pages/ar/querying/querying-with-python.mdx b/website/pages/ar/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/ar/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ar/quick-start.mdx b/website/pages/ar/quick-start.mdx new file mode 100644 index 000000000000..08bde551c494 --- /dev/null +++ b/website/pages/ar/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: بداية سريعة +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- : (Manifest(subgraph.yaml يحدد ال manifest مصادر البيانات التي سيقوم Subgraphs الخاص بك بفهرستها. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- (AssemblyScript Mappings (mapping.ts هذا هو الكود الذي يترجم البيانات من مصادر البيانات الخاصة بك إلى الكيانات المحددة في المخطط. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- وثق وأنشر ال Subgraph الخاص بك. يمكن العثور على مفتاح النشر في صفحة Subgraph في Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ar/substreams.mdx b/website/pages/ar/substreams.mdx index 08312a8e84b8..86f612820182 100644 --- a/website/pages/ar/substreams.mdx +++ b/website/pages/ar/substreams.mdx @@ -2,8 +2,43 @@ title: متعدد-السلاسل --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ar/sunrise.mdx b/website/pages/ar/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ar/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ar/tokenomics.mdx b/website/pages/ar/tokenomics.mdx index fd5054c9e9d0..009dccf5a950 100644 --- a/website/pages/ar/tokenomics.mdx +++ b/website/pages/ar/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat إنه مشابه لنموذج B2B2C ، إلا أنه مدعوم بشبكة لا مركزية من المشاركين. يعمل المشاركون في الشبكة معًا لتوفير البيانات للمستخدمين النهائيين مقابل مكافآت GRT. GRT هو أداة العمل الذي ينسق بين موفري البيانات والمستهلكين. تعمل GRT كأداة مساعدة للتنسيق بين موفري البيانات والمستهلكين داخل الشبكة وتحفيز المشاركين في البروتوكول على تنظيم البيانات بشكل فعال. -باستخدام The Graph ، يمكن للمستخدمين الوصول بسهولة إلى بيانات البلوكتشين، والدفع فقط مقابل المعلومات المحددة التي يحتاجون إليها. يتم استخدام The Graph بواسطة العديد من [ التطبيقات الشائعة ](https://thegraph.com/explorer) في نظام web3 البيئي اليوم. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. يقوم الغراف بفهرسة بيانات blockchain بنفس طريقة فهرسة Google للويب. في الواقع ، ربما كنت تستخدم الغراف بالفعل دون أن تدرك ذلك. إذا كنت قد شاهدت الواجهة الأمامية لـ dapp الذي يحصل على بياناته من subgraph! ، فقد استعلمت عن البيانات من ال subgraph! @@ -75,7 +75,7 @@ Curators pay a 1% curation tax when they curate a new subgraph. This curation ta يمكن للمفهرسين ربح مكافآت GRT بطريقتين: -1. رسوم الاستعلام: المطورون أو المستخدمون يدفعون GRT مقابل استعلامات الsubgraph. يتم إيداع رسوم الاستعلام في حوض الخصم وتوزيعها على المفهرسين. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. مكافآت الفهرسة: يتم توزيع 3% من الإصدار السنوي على المفهرسين بناءً على عدد الsubgraphs التي يقومون بفهرستها. هذه المكافآت تشجع المفهرسين على فهرسة الsubgraphs ، أحيانًا قبل البدء بفرض الرسوم على الاستعلامات ،يقوم المفهرسون بتجميع وتقديم أدلة فهرسة (POIs) للتحقق من دقة فهرسة البيانات التي قاموا بفهرستها. diff --git a/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..43f96152931a 100644 --- a/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/cs/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## Subgraph Transfer -## How do I transfer my subgraph? +### How do I transfer my subgraph? + + To transfer your subgraph, you will need to complete the following steps: @@ -30,55 +54,147 @@ To transfer your subgraph, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Where should I initiate my transfer from? You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. -## How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -## Will my subgraph still be discoverable after I transfer it to L2? +### Will my subgraph still be discoverable after I transfer it to L2? Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. -## Does my subgraph need to be published to transfer it? +### Does my subgraph need to be published to transfer it? To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. -## After I transfer, do I also need to re-publish on Arbitrum? +### After I transfer, do I also need to re-publish on Arbitrum? After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Will my subgraph's curation move with my subgraph? If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my subgraph back to Ethereum mainnet after I transfer? Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Why do I need bridged ETH to complete my transfer? Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curation Signal -## How do I transfer my curation? +### How do I transfer my curation? To transfer your curation, you will need to complete the following steps: @@ -90,25 +206,29 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -## How will I know if the subgraph I curated has moved to L2? +### How will I know if the subgraph I curated has moved to L2? When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. -## What if I do not wish to move my curation to L2? +### What if I do not wish to move my curation to L2? When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. -## How do I know my curation successfully transferred? +### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -## Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one subgraph at a time? There is no bulk transfer option at this time. ## Indexer Stake -## How do I transfer my stake to Arbitrum? +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + To transfer your stake, you will need to complete the following steps: @@ -120,7 +240,7 @@ To transfer your stake, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Will all of my stake transfer? You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. @@ -128,91 +248,45 @@ If you plan on transferring parts of your stake over multiple transactions, you Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. -## How much time do I have to confirm my stake transfer to Arbitrum? +### How much time do I have to confirm my stake transfer to Arbitrum? \*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. -## What if I have open allocations? +### What if I have open allocations? If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. -## How long will it take to transfer my stake? +### How long will it take to transfer my stake? It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. -## Do I have to index on Arbitrum before I transfer my stake? +### Do I have to index on Arbitrum before I transfer my stake? You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. -## Can Delegators move their delegation before I move my indexing stake? +### Can Delegators move their delegation before I move my indexing stake? No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -## Delegation - -## How do I transfer my delegation? - -To transfer your delegation, you will need to complete the following steps: - -1. Initiate delegation transfer on Ethereum mainnet - -2. Wait 20 minutes for confirmation - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? - -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. - -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -## What if I can't find the Indexer I'm delegating to on L2? +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -The L2 transfer tool will automatically detect the Indexer you previously delegated to. - -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? - -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. - -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? - -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? - -It is anticipated that all network participation will move to Arbitrum One in the future. - -## How long does it take to complete the transfer of my delegation to L2? - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -## Is there any delegation tax? - -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## Vesting Contract Transfer -## How do I transfer my vesting contract? +### How do I transfer my vesting contract? To transfer your vesting, you will need to complete the following steps: @@ -222,7 +296,9 @@ To transfer your vesting, you will need to complete the following steps: 3. Confirm vesting transfer on Arbitrum -## How do I transfer my vesting contract if I am only partially vested? +### How do I transfer my vesting contract if I am only partially vested? + + 1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) @@ -232,7 +308,9 @@ To transfer your vesting, you will need to complete the following steps: 4. Withdraw any remaining ETH from the transfer tool contract -## How do I transfer my vesting contract if I am fully vested? +### How do I transfer my vesting contract if I am fully vested? + + For those that are fully vested, the process is similar: @@ -244,7 +322,7 @@ For those that are fully vested, the process is similar: 4. Withdraw any remaining ETH from the transfer tool contract -## Can I transfer my vesting contract to Arbitrum? +### Can I transfer my vesting contract to Arbitrum? You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). @@ -256,27 +334,27 @@ Please note that you will not be able to release/withdraw GRT from the L2 vestin If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -298,18 +376,36 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/cs/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/cs/billing.mdx b/website/pages/cs/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/cs/billing.mdx +++ b/website/pages/cs/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/cs/chain-integration-overview.mdx b/website/pages/cs/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/cs/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/cs/cookbook/arweave.mdx b/website/pages/cs/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/cs/cookbook/arweave.mdx +++ b/website/pages/cs/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/cs/cookbook/grafting.mdx b/website/pages/cs/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/cs/cookbook/grafting.mdx +++ b/website/pages/cs/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/cs/cookbook/near.mdx b/website/pages/cs/cookbook/near.mdx index 879e8e5c15aa..304e1202e278 100644 --- a/website/pages/cs/cookbook/near.mdx +++ b/website/pages/cs/cookbook/near.mdx @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/cs/cookbook/upgrading-a-subgraph.mdx b/website/pages/cs/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/cs/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/cs/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/cs/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/cs/deploying/hosted-service.mdx b/website/pages/cs/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/cs/deploying/hosted-service.mdx +++ b/website/pages/cs/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/cs/deploying/subgraph-studio.mdx b/website/pages/cs/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/cs/deploying/subgraph-studio.mdx +++ b/website/pages/cs/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/cs/developing/creating-a-subgraph.mdx b/website/pages/cs/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/cs/developing/creating-a-subgraph.mdx +++ b/website/pages/cs/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/cs/developing/developer-faqs.mdx b/website/pages/cs/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/cs/developing/developer-faqs.mdx +++ b/website/pages/cs/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/cs/developing/graph-ts/api.mdx b/website/pages/cs/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/cs/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/cs/developing/graph-ts/common-issues.mdx b/website/pages/cs/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/cs/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/cs/developing/supported-networks.mdx b/website/pages/cs/developing/supported-networks.mdx index 58ce56345f7c..cd82305bfce2 100644 --- a/website/pages/cs/developing/supported-networks.mdx +++ b/website/pages/cs/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/cs/firehose.mdx b/website/pages/cs/firehose.mdx new file mode 100644 index 000000000000..02f0d63c72db --- /dev/null +++ b/website/pages/cs/firehose.mdx @@ -0,0 +1,22 @@ +--- +title: Firehose +--- + +![Firehose Logo](/img/firehose-logo.png) + +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. + +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. + +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/cs/glossary.mdx b/website/pages/cs/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/cs/glossary.mdx +++ b/website/pages/cs/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/cs/graphcast.mdx b/website/pages/cs/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/cs/graphcast.mdx +++ b/website/pages/cs/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/cs/index.json b/website/pages/cs/index.json index 9e28e13d5001..ddbbb68445fe 100644 --- a/website/pages/cs/index.json +++ b/website/pages/cs/index.json @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/cs/mips-faqs.mdx b/website/pages/cs/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/cs/mips-faqs.mdx +++ b/website/pages/cs/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/cs/network/benefits.mdx b/website/pages/cs/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/cs/network/benefits.mdx +++ b/website/pages/cs/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/cs/network/delegating.mdx b/website/pages/cs/network/delegating.mdx index 61f9e3fa7785..4a6d6e00b73e 100644 --- a/website/pages/cs/network/delegating.mdx +++ b/website/pages/cs/network/delegating.mdx @@ -83,10 +83,6 @@ Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. -Get started delegating with this quickstart video: - - - ## Delegator FAQs and Bugs ### MetaMask "Pending Transaction" Bug diff --git a/website/pages/cs/network/indexing.mdx b/website/pages/cs/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/cs/network/indexing.mdx +++ b/website/pages/cs/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/cs/new-chain-integration.mdx b/website/pages/cs/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/cs/new-chain-integration.mdx +++ b/website/pages/cs/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/cs/operating-graph-node.mdx b/website/pages/cs/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/cs/operating-graph-node.mdx +++ b/website/pages/cs/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/cs/querying/querying-the-hosted-service.mdx b/website/pages/cs/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/cs/querying/querying-the-hosted-service.mdx +++ b/website/pages/cs/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/cs/querying/querying-with-python.mdx b/website/pages/cs/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/cs/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/cs/quick-start.mdx b/website/pages/cs/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/cs/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/cs/substreams.mdx b/website/pages/cs/substreams.mdx new file mode 100644 index 000000000000..2a06de8ac868 --- /dev/null +++ b/website/pages/cs/substreams.mdx @@ -0,0 +1,44 @@ +--- +title: Substreams +--- + +![Substreams Logo](/img/substreams-logo.png) + +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. + +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/cs/sunrise.mdx b/website/pages/cs/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/cs/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/cs/tokenomics.mdx b/website/pages/cs/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/cs/tokenomics.mdx +++ b/website/pages/cs/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/cs/translations.ts b/website/pages/cs/translations.ts new file mode 100644 index 000000000000..340f7eeea339 --- /dev/null +++ b/website/pages/cs/translations.ts @@ -0,0 +1,13 @@ +import supportedNetworks from './developing/supported-networks.json' +import docsearch from './docsearch.json' +import global from './global.json' +import index from './index.json' + +const translations = { + global, + index, + docsearch, + supportedNetworks, +} + +export default translations diff --git a/website/pages/de/arbitrum/arbitrum-faq.mdx b/website/pages/de/arbitrum/arbitrum-faq.mdx index 849d08c92b93..23fc14587397 100644 --- a/website/pages/de/arbitrum/arbitrum-faq.mdx +++ b/website/pages/de/arbitrum/arbitrum-faq.mdx @@ -1,78 +1,78 @@ --- -title: Arbitrum FAQ +title: Arbitrum-FAQ --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Klicken Sie [hier](#billing-on-arbitrum-faqs), wenn Sie zu den Arbitrum Billing FAQs springen möchten. -## Why is The Graph implementing an L2 Solution? +## Warum implementiert The Graph eine L2-Lösung? -By scaling The Graph on L2, network participants can expect: +Durch die Skalierung von The Graph auf L2 können die Netzwerkteilnehmer erwarten: -- Upwards of 26x savings on gas fees +- Bis zu 26-fache Einsparungen bei den Gebühren für Gas -- Faster transaction speed +- Schnellere Transaktionsgeschwindigkeit -- Security inherited from Ethereum +- Von Ethereum übernommene Sicherheit -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Die Skalierung der Smart Contracts des Protokolls auf L2 ermöglicht den Netzwerkteilnehmern eine häufigere Interaktion zu geringeren Kosten in Form von Gasgebühren. Zum Beispiel könnten Indexer Zuweisungen öffnen und schließen, um eine größere Anzahl von Subgraphen mit größerer Häufigkeit zu indexieren, Entwickler könnten Subgraphen mit größerer Leichtigkeit bereitstellen und aktualisieren, Delegatoren könnten GRT mit größerer Häufigkeit delegieren und Kuratoren könnten Signale zu einer größeren Anzahl von Subgraphen hinzufügen oder entfernen - Aktionen, die zuvor als zu kostenintensiv angesehen wurden, um sie häufig auszuführen. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +DieThe Graph-Community beschloss letztes Jahr nach dem Ergebnis der [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)-Diskussion, mit Arbitrum weiterzumachen. -## What do I need to do to use The Graph on L2? +## Was muss ich tun, um The Graph auf L2 zu nutzen? -Users bridge their GRT and ETH  using one of the following methods: +Die Benutzer überbrücken ihre GRT und ETH mit einer der folgenden Methoden: -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Die The Graph-Brücke auf Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +Um die Vorteile von The Graph auf L2 zu nutzen, verwenden Sie diesen Dropdown-Schalter, um zwischen den Ketten umzuschalten. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Dropdown-Schalter zum Aktivieren von Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## Was muss ich als Entwickler von Subgraphen, Datenkonsument, Indexer, Kurator oder Delegator jetzt tun? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Es besteht kein unmittelbarer Handlungsbedarf, jedoch werden die Netzwerkteilnehmer ermutigt, mit der Umstellung auf Arbitrum zu beginnen, um von den Vorteilen von L2 zu profitieren. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Kernentwicklerteams arbeiten an der Erstellung von L2-Transfer-Tools, die die Übertragung von Delegation, Kuration und Subgraphen auf Arbitrum erheblich erleichtern werden. Netzwerkteilnehmer können davon ausgehen, dass L2-Transfer-Tools bis zum Sommer 2023 verfügbar sein werden. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +Ab dem 10. April 2023 werden 5% aller Indexierungs-Rewards auf Arbitrum geprägt. Mit zunehmender Beteiligung des Netzwerks und der Zustimmung des Rates werden die Indexierungsprämien schrittweise von Ethereum auf Arbitrum und schließlich vollständig auf Arbitrum umgestellt. -## If I would like to participate in the network on L2, what should I do? +## Was muss ich tun, wenn ich am L2-Netz teilnehmen möchte? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Bitte helfen Sie [test the network](https://testnet.thegraph.com/explorer) auf L2 und berichten Sie über Ihre Erfahrungen in [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Sind mit der Skalierung des Netzes auf L2 irgendwelche Risiken verbunden? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Alle Smart Contracts wurden gründlich [audited] \(https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Alles wurde gründlich getestet, und es gibt einen Notfallplan, um einen sicheren und nahtlosen Übergang zu gewährleisten. Einzelheiten finden Sie [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## Werden die bestehenden Subgraphen auf Ethereum weiterhin funktionieren? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Ja, die The Graph Netzwerk-Verträge werden parallel sowohl auf Ethereum als auch auf Arbitrum laufen, bis sie zu einem späteren Zeitpunkt vollständig auf Arbitrum umgestellt werden. -## Will GRT have a new smart contract deployed on Arbitrum? +## Wird GRT einen neuen Smart Contract auf Arbitrum bereitstellen? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Ja, GRT hat einen zusätzlichen [Smart Contract auf Arbitrum] \(https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). Der Ethereum-Hauptnetz-[GRT-Vertrag](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) wird jedoch weiterhin funktionieren. -## Billing on Arbitrum FAQs +## Abrechnung auf Arbitrum FAQs -## What do I need to do about the GRT in my billing balance? +## Was muss ich mit den GRT in meinem Rechnungssaldo tun? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Nichts! Ihr GRT wurde sicher zu Arbitrum migriert und wird, während Sie dies lesen, zur Bezahlung von Abfragen verwendet. -## How do I know my funds have migrated securely to Arbitrum? +## Woher weiß ich, dass meine Guthaben sicher zu Arbitrum migriert sind? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Alle GRT-Abrechnungssalden wurden bereits erfolgreich auf Arbitrum migriert. Sie können den Abrechnungsvertrag auf Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a) einsehen. -## How do I know the Arbitrum bridge is secure? +## Woher weiß ich, dass die Arbitrum-Brücke sicher ist? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Die Brücke wurde [umfangreich geprüft] \(https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest), um die Sicherheit für alle Nutzer zu gewährleisten. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Was muss ich tun, wenn ich neue GRT von meiner Ethereum Mainnet Wallet hinzufüge? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Das Hinzufügen von GRT zu Ihrem Arbitrum-Abrechnungssaldo kann mit nur einem Klick in [Subgraph Studio] \(https://thegraph.com/studio/) erfolgen. Sie können Ihr GRT ganz einfach mit Arbitrum verbinden und Ihre API-Schlüssel in einer einzigen Transaktion füllen. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Besuchen Sie die [Abrechnungsseite] \(https://thegraph.com/docs/en/billing/) für detaillierte Anweisungen zum Hinzufügen, Abheben oder Erwerben von GRT. diff --git a/website/pages/de/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/de/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..47bc07a3d2a7 100644 --- a/website/pages/de/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/de/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: L2-Übertragungs-Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### Was sind L2-Transfer-Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. -## Subgraph Transfer +### Kann ich dieselbe Wallet verwenden, die ich im Ethereum Mainnet benutze? -## How do I transfer my subgraph? +Wenn Sie eine [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) Wallet verwenden, können Sie dieselbe Adresse verwenden. Wenn Ihr Ethereum Mainnet Wallet ein Kontrakt ist (z.B. ein Multisig), dann müssen Sie eine [Arbitrum Wallet Adresse](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) angeben, an die Ihr Transfer gesendet wird. Bitte überprüfen Sie die Adresse sorgfältig, da Überweisungen an eine falsche Adresse zu einem dauerhaften Verlust führen können. Wenn Sie einen Multisig auf L2 verwenden möchten, stellen Sie sicher, dass Sie einen Multisig-Vertrag auf Arbitrum One einsetzen. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Was passiert, wenn ich meinen Transfer nicht innerhalb von 7 Tagen abschließe? -3. Confirm subgraph transfer on Arbitrum\* +Die L2-Transfer-Tools verwenden den nativen Mechanismus von Arbitrum, um Nachrichten von L1 nach L2 zu senden. Dieser Mechanismus wird "retryable ticket" genannt und wird von allen nativen Token-Bridges verwendet, einschließlich der Arbitrum GRT-Bridge. Sie können mehr über wiederholbare Tickets in den [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) lesen. -4. Finish publishing subgraph on Arbitrum +Wenn Sie Ihre Vermögenswerte (Subgraph, Anteil, Delegation oder Kuration) an L2 übertragen, wird eine Nachricht über die Arbitrum GRT-Brücke gesendet, die ein wiederholbares Ticket in L2 erstellt. Das Transfer-Tool beinhaltet einen gewissen ETH-Wert in der Transaktion, der verwendet wird, um 1) die Erstellung des Tickets und 2) das Gas für die Ausführung des Tickets in L2 zu bezahlen. Da jedoch die Gaspreise in der Zeit, bis das Zertifikat zur Ausführung in L2 bereit ist, schwanken können, ist es möglich, dass dieser automatische Ausführungsversuch fehlschlägt. Wenn das passiert, hält die Arbitrum-Brücke das wiederholbare Zertifikat für bis zu 7 Tage am Leben, und jeder kann versuchen, das Ticket erneut "einzulösen" (was eine Geldbörse mit etwas ETH erfordert, die mit Arbitrum verbunden ist). -5. Update Query URL (recommended) +Dies ist der so genannte "Bestätigungsschritt" in allen Übertragungswerkzeugen - er wird in den meisten Fällen automatisch ausgeführt, da die automatische Ausführung meist erfolgreich ist, aber es ist wichtig, dass Sie sich vergewissern, dass die Übertragung erfolgreich war. Wenn dies nicht gelingt und es innerhalb von 7 Tagen keine erfolgreichen Wiederholungsversuche gibt, verwirft die Arbitrum-Brücke das Ticket, und Ihre Assets (Subgraph, Pfahl, Delegation oder Kuration) gehen verloren und können nicht wiederhergestellt werden. Die Entwickler des Graph-Kerns haben ein Überwachungssystem eingerichtet, um diese Situationen zu erkennen und zu versuchen, die Tickets einzulösen, bevor es zu spät ist, aber es liegt letztendlich in Ihrer Verantwortung, sicherzustellen, dass Ihr Transfer rechtzeitig abgeschlossen wird. Wenn Sie Probleme mit der Bestätigung Ihrer Transaktion haben, wenden Sie sich bitte an [dieses Formular] \(https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) und die Entwickler des Kerns werden Ihnen helfen. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## Subgraph-Transfer + +### Wie übertrage ich meinen Subgraphen + + + +Um Ihren Subgraphen zu übertragen, müssen Sie die folgenden Schritte ausführen: + +1. Starten Sie den Transfer im Ethereum-Mainnet + +2. 20 Minuten auf Bestätigung warten + +3. Bestätigung der Übertragung von Subgraphen auf Arbitrum\* + +4. Veröffentlichung des Subgraphen auf Arbitrum beenden + +5. Abfrage-URL aktualisieren (empfohlen) \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Von wo aus soll ich meine Übertragung veranlassen? + +Sie können die Übertragung vom [Subgraph Studio] \(https://thegraph.com/studio/), vom [Explorer] \(https://thegraph.com/explorer) oder von einer beliebigen Subgraph-Detailseite aus starten. Klicken Sie auf die Schaltfläche "Subgraph übertragen" auf der Detailseite des Subgraphen, um die Übertragung zu starten. + +### Wie lange muss ich warten, bis mein Subgraph übertragen wird? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Die Übertragungszeit beträgt etwa 20 Minuten. Die Arbitrum-Brücke arbeitet im Hintergrund, um den Übergang automatisch abzuschließen. In einigen Fällen können die Gaskosten in die Höhe schnellen und Sie müssen die Transaktion erneut bestätigen. -## How long do I need to wait until my subgraph is transferred +### Wird mein Subgraph noch auffindbar sein, nachdem ich ihn auf L2 übertragen habe? -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +Ihr Subgraph ist nur in dem Netzwerk auffindbar, in dem er veröffentlicht ist. Wenn Ihr Subgraph zum Beispiel auf Arbitrum One ist, können Sie ihn nur im Explorer auf Arbitrum One finden und nicht auf Ethereum. Bitte vergewissern Sie sich, dass Sie Arbitrum One in der Netzwerkumschaltung oben auf der Seite ausgewählt haben, um sicherzustellen, dass Sie sich im richtigen Netzwerk befinden. Nach der Übertragung wird der L1-Subgraph als veraltet angezeigt. -## Will my subgraph still be discoverable after I transfer it to L2? +### Muss mein Subgraph ( Teilgraph ) veröffentlicht werden, um ihn zu übertragen? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Um das Subgraph-Transfer-Tool nutzen zu können, muss Ihr Subgraph bereits im Ethereum-Mainnet veröffentlicht sein und über ein Kurationssignal verfügen, das der Wallet gehört, die den Subgraph besitzt. Wenn Ihr Subgraph nicht veröffentlicht ist, empfehlen wir Ihnen, ihn einfach direkt auf Arbitrum One zu veröffentlichen - die damit verbundenen Gasgebühren sind erheblich niedriger. Wenn Sie einen veröffentlichten Subgraphen übertragen wollen, aber das Konto des Eigentümers kein Signal darauf kuratiert hat, können Sie einen kleinen Betrag (z.B. 1 GRT) von diesem Konto signalisieren; stellen Sie sicher, dass Sie ein "auto-migrating" Signal wählen. -## Does my subgraph need to be published to transfer it? +### Was passiert mit der Ethereum-Mainnet-Version meines Subgraphen, nachdem ich zu Arbitrum übergehe? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +Nach der Übertragung Ihres Subgraphen auf Arbitrum wird die Ethereum-Hauptnetzversion veraltet sein. Wir empfehlen Ihnen, Ihre Abfrage-URL innerhalb von 48 Stunden zu aktualisieren. Es gibt jedoch eine Schonfrist, die Ihre Mainnet-URL funktionsfähig hält, so dass jede Drittanbieter-Dapp-Unterstützung aktualisiert werden kann. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Muss ich nach der Übertragung auch auf Arbitrum neu veröffentlichen? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +Nach Ablauf des 20-minütigen Übertragungsfensters müssen Sie die Übertragung mit einer Transaktion in der Benutzeroberfläche bestätigen, um die Übertragung abzuschließen. Ihr L1-Endpunkt wird während des Übertragungsfensters und einer Schonfrist danach weiterhin unterstützt. Es wird empfohlen, dass Sie Ihren Endpunkt aktualisieren, wenn es Ihnen passt. -## After I transfer, do I also need to re-publish on Arbitrum? +### Will my endpoint experience downtime while re-publishing? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Will there be a down-time to my endpoint while re-publishing? +### Ist die Veröffentlichung und Versionierung auf L2 die gleiche wie im Ethereum-Mainnet? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Bewegt sich die Kuration meines Untergraphen ( Subgraphen ) mit meinem Untergraphen? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Wenn Sie die automatische Signalmigration gewählt haben, werden 100 % Ihrer eigenen Kuration mit Ihrem Subgraphen zu Arbitrum One übertragen. Alle Kurationssignale des Subgraphen werden zum Zeitpunkt des Transfers in GRT umgewandelt, und die GRT, die Ihrem Kurationssignal entsprechen, werden zum Prägen von Signalen auf dem L2-Subgraphen verwendet. -## Will my subgraph's curation move with my subgraph? +Andere Kuratoren können wählen, ob sie ihren Anteil an GRT zurückziehen oder ihn ebenfalls auf L2 übertragen, um das Signal auf demselben Untergraphen zu prägen. -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +### Kann ich meinen Subgraph nach dem Transfer zurück ins Ethereum Mainnet verschieben? -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Nach der Übertragung wird Ihre Ethereum-Mainnet-Version dieses Untergraphen veraltet sein. Wenn Sie zum Mainnet zurückkehren möchten, müssen Sie Ihre Version neu bereitstellen und zurück zum Mainnet veröffentlichen. Es wird jedoch dringend davon abgeraten, zurück ins Ethereum Mainnet zu wechseln, da die Indexierungsbelohnungen schließlich vollständig auf Arbitrum One verteilt werden. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Warum brauche ich überbrückte ETH, um meine Überweisung abzuschließen? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Gasgebühren auf Arbitrum One werden mit überbrückter ETH bezahlt (d.h. ETH, die zu Arbitrum One überbrückt wurde). Allerdings sind die Gasgebühren im Vergleich zum Ethereum Mainnet deutlich niedriger. -## Why do I need bridged ETH to complete my transfer? +## Delegation -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### Wie übertrage ich meine Delegation? -## Curation Signal + -## How do I transfer my curation? +Um Ihre Delegation zu übertragen, müssen Sie die folgenden Schritte ausführen: -To transfer your curation, you will need to complete the following steps: +1. Initiieren einer Delegationsübertragung im Ethereum-Mainnet +2. 20 Minuten auf Bestätigung warten +3. Bestätigung der Delegationsübertragung auf Arbitrum -1. Initiate signal transfer on Ethereum mainnet +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -2. Specify an L2 Curator address\* +### Was passiert mit meinen Rewards, wenn ich einen Transfer mit einer offenen Zuteilung im Ethereum Mainnet initiiere? -3. Wait 20 minutes for confirmation +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -\*If necessary - i.e. you are using a contract address. +### Was passiert, wenn der Indexer, an den ich derzeit delegiere, nicht auf Arbitrum One ist? -## How will I know if the subgraph I curated has moved to L2? +Das L2-Transfer-Tool wird nur aktiviert, wenn der Indexer, den Sie delegiert haben, seinen eigenen Anteil an Arbitrum übertragen hat. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Haben Delegatoren die Möglichkeit, an einen anderen Indexierer zu delegieren? -## What if I do not wish to move my curation to L2? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Was ist, wenn ich den Indexer, an den ich delegiere, auf L2 nicht finden kann? -## How do I know my curation successfully transferred? +Das L2-Übertragungstool erkennt automatisch den Indexer, an den Sie zuvor delegiert haben. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +### Kann ich meine Delegationen auf neue oder mehrere Indexer statt auf den vorherigen Indexer verteilen? -## Can I transfer my curation on more than one subgraph at a time? +Mit dem L2-Transfertool wird Ihre Delegation immer an denselben Indexer übertragen, an den Sie zuvor delegiert haben. Sobald Sie zu L2 umgezogen sind, können Sie die Delegation aufheben, die Auftauzeit abwarten und entscheiden, ob Sie Ihre Delegation aufteilen möchten. -There is no bulk transfer option at this time. +### Unterliege ich der Wartezeit oder kann ich sofort abheben, nachdem ich das L2 Delegationstool benutzt habe? -## Indexer Stake +Mit dem Transfer-Tool können Sie sofort zu L2 wechseln. Wenn Sie den Transfer wieder rückgängig machen möchten, müssen Sie die Sperrfrist abwarten. Wenn ein Indexer jedoch seinen gesamten Einsatz auf L2 übertragen hat, können Sie sofort auf dem Ethereum-Mainnet abheben. -## How do I transfer my stake to Arbitrum? +### Kann es sich negativ auf meine Prämien auswirken, wenn ich meine Delegation nicht übertrage? -To transfer your stake, you will need to complete the following steps: +Es wird davon ausgegangen, dass die gesamte Netzbeteiligung in Zukunft zu Arbitrum One wechseln wird. -1. Initiate stake transfer on Ethereum mainnet +### Wie lange dauert es, bis die Übertragung meiner Delegation auf L2 abgeschlossen ist? -2. Wait 20 minutes for confirmation +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -3. Confirm stake transfer on Arbitrum +### Kann ich meine Delegation übertragen, wenn ich eine GRT Vesting Contract/Token Lock Wallet verwende? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Ja! Der Prozess ist ein wenig anders, weil Vesting-Verträge die ETH, die für die Bezahlung des L2-Gases benötigt werden, nicht weiterleiten können, also müssen Sie sie vorher einzahlen. Wenn Ihr Berechtigungsvertrag nicht vollständig freigeschaltet ist, müssen Sie außerdem zuerst einen Gegenkontrakt auf L2 initialisieren und können die Delegation dann nur auf diesen L2-Berechtigungsvertrag übertragen. Die Benutzeroberfläche des Explorers kann Sie durch diesen Prozess leiten, wenn Sie sich über die Vesting Lock Wallet mit dem Explorer verbunden haben. -## Will all of my stake transfer? +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +### Fällt eine Delegationssteuer an? -## How much time do I have to confirm my stake transfer to Arbitrum? +Nein. Auf L2 erhaltene Token werden im Namen des angegebenen Delegators an den angegebenen Indexierer delegiert, ohne dass eine Delegationssteuer erhoben wird. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### Will my unrealized rewards be transferred when I transfer my delegation? -## What if I have open allocations? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Is moving delegations to L2 mandatory? Is there a deadline? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## How long will it take to transfer my stake? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## Do I have to index on Arbitrum before I transfer my stake? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +### I don't see a button to transfer my delegation. Why is that? -## Can Delegators move their delegation before I move my indexing stake? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -## How do I transfer my delegation? +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -To transfer your delegation, you will need to complete the following steps: +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -1. Initiate delegation transfer on Ethereum mainnet +## Kurationssignal -2. Wait 20 minutes for confirmation +### Wie übertrage ich meine Kuration? -3. Confirm delegation transfer on Arbitrum +Um Ihre Kuration zu übertragen, müssen Sie die folgenden Schritte ausführen: -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +1. Signalübertragung im Ethereum-Mainnet einleiten -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +2. Spezifizieren Sie eine L2-Kurator-Adresse\* -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +3. 20 Minuten auf Bestätigung warten -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +\* Falls erforderlich - d.h. wenn Sie eine Vertragsadresse verwenden. -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +### Wie erfahre ich, ob der von mir kuratierte Subgraph nach L2 umgezogen ist? -## Do Delegators have the option to delegate to another Indexer? +Auf der Seite mit den Details der Subgraphen werden Sie durch ein Banner darauf hingewiesen, dass dieser Subgraph übertragen wurde. Sie können der Aufforderung folgen, um Ihre Kuration zu übertragen. Diese Information finden Sie auch auf der Seite mit den Details zu jedem verschobenen Subgraphen. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Was ist, wenn ich meine Kuration nicht auf L2 verschieben möchte? -## What if I can't find the Indexer I'm delegating to on L2? +Wenn ein Subgraph veraltet ist, haben Sie die Möglichkeit, Ihr Signal zurückzuziehen. Wenn ein Subgraph nach L2 verschoben wurde, können Sie wählen, ob Sie Ihr Signal im Ethereum-Mainnet zurückziehen oder das Signal an L2 senden. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Woran erkenne ich, dass meine Kuration erfolgreich übertragen wurde? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Die Signaldetails werden etwa 20 Minuten nach dem Start des L2-Transfertools über den Explorer zugänglich sein. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### Kann ich meine Kuration auf mehr als einen Subgraphen zur gleichen Zeit übertragen? -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +Zurzeit gibt es keine Option für Massenübertragungen. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +## Indexer-Einsatz -## Can my rewards be negatively impacted if I do not transfer my delegation? +### Wie übertrage ich meine Anteile auf Arbitrum? -It is anticipated that all network participation will move to Arbitrum One in the future. +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. -## How long does it take to complete the transfer of my delegation to L2? + -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Um Ihren Einsatz zu übertragen, müssen Sie die folgenden Schritte ausführen: + +1. Initiieren Sie den Stake-Transfer im Ethereum-Mainnet + +2. 20 Minuten auf Bestätigung warten + +3. Bestätigen Sie die Übertragung von Anteilen auf Arbitrum + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Wird mein gesamter Einsatz übertragen? + +Sie können wählen, wie viel von Ihrem Einsatz Sie übertragen möchten. Wenn Sie Ihren gesamten Einsatz auf einmal übertragen möchten, müssen Sie zunächst alle offenen Zuteilungen schließen. -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +Wenn Sie vorhaben, Teile Ihres Anteils über mehrere Transaktionen zu übertragen, müssen Sie immer dieselbe Adresse des Begünstigten angeben. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +Hinweis: Sie müssen die Mindestanforderungen für den Einsatz auf L2 erfüllen, wenn Sie das Übertragungswerkzeug zum ersten Mal verwenden. Indexierer müssen (beim ersten Aufruf dieser Funktion) den Mindestbetrag von 100k GRT übermitteln. Wenn Sie einen Teil des Einsatzes auf L1 belassen, muss dieser ebenfalls über dem Minimum von 100k GRT liegen und (zusammen mit Ihren Delegationen) ausreichen, um Ihre offenen Zuteilungen zu decken. -## Is there any delegation tax? +### Wie viel Zeit habe ich, um die Übertragung meiner Anteile auf Arbitrum zu bestätigen? -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +\*\*\* Sie müssen Ihre Transaktion bestätigen, um die Übertragung des Einsatzes auf Arbitrum abzuschließen. Dieser Schritt muss innerhalb von 7 Tagen abgeschlossen werden, sonst kann der Einsatz verloren gehen. -## Vesting Contract Transfer +### Was ist, wenn ich offene Zuteilungen habe? -## How do I transfer my vesting contract? +Wenn Sie nicht Ihren gesamten Einsatz senden, prüft das L2-Transfer-Tool, ob mindestens 100k GRT im Ethereum-Mainnet verbleiben und Ihr verbleibender Einsatz und Ihre Delegation ausreichen, um alle offenen Zuteilungen zu decken. Möglicherweise müssen Sie offene Zuteilungen schließen, wenn Ihr GRT-Guthaben die Mindestbeträge + offene Zuteilungen nicht abdeckt. -To transfer your vesting, you will need to complete the following steps: +### Muss man bei der Verwendung der Transfer-Tools 28 Tage warten, bis man im Ethereum-Mainnet unstake ist, bevor man transferieren kann? -1. Initiate the vesting transfer on Ethereum mainnet +Nein, Sie können Ihren Einsatz sofort auf L2 übertragen. Sie müssen den Einsatz nicht aufheben und warten, bevor Sie das Transfer-Tool verwenden. Die 28-tägige Wartezeit gilt nur, wenn Sie den Einsatz wieder auf Ihre Wallet, das Ethereum-Mainnet oder L2 auszahlen möchten. -2. Wait 20 minutes for confirmation +### Wie lange dauert es, bis mein Anteil übertragen wird? -3. Confirm vesting transfer on Arbitrum +Es dauert etwa 20 Minuten, bis das L2-Transfertool die Übertragung Ihres Einsatzes abgeschlossen hat. -## How do I transfer my vesting contract if I am only partially vested? +### Muss ich auf Arbitrum indexieren, bevor ich meinen Einsatz übertrage? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +Sie können Ihren Einsatz zuerst überweisen, bevor Sie die Indizierung einrichten, aber Sie können keine Belohnungen auf L2 beanspruchen, bevor Sie Subgraphen auf L2 zuweisen, sie indizieren und POIs präsentieren. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +### Können Delegatoren ihre Delegation verschieben, bevor ich meine Indizierungsbeteiligung verschiebe? -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Nein, damit Delegatoren ihre delegierten GRT an Arbitrum übertragen können, muss der Indexer, an den sie delegieren, auf L2 aktiv sein. -4. Withdraw any remaining ETH from the transfer tool contract +### Kann ich meinen Einsatz übertragen, wenn ich einen GRT Sperrvertrag / ein Token Lock Wallet verwende? -## How do I transfer my vesting contract if I am fully vested? +Ja! Der Prozess ist ein wenig anders, weil Vesting-Verträge die ETH, die für die Bezahlung des L2-Gases benötigt werden, nicht weiterleiten können, so dass Sie sie vorher einzahlen müssen. Wenn Ihr Freizügigkeitsvertrag nicht vollständig freigeschaltet ist, müssen Sie außerdem zuerst einen Gegenkontrakt auf L2 initialisieren und können den Anteil nur auf diesen L2-Freizügigkeitsvertrag übertragen. Die Benutzeroberfläche des Explorers kann Sie durch diesen Prozess führen, wenn Sie sich mit dem Explorer über die Vesting Lock Wallet verbunden haben. -For those that are fully vested, the process is similar: +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -2. Set your L2 address with a call to the transfer tool contract +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -4. Withdraw any remaining ETH from the transfer tool contract +## Unverfallbare Vertragsübertragung -## Can I transfer my vesting contract to Arbitrum? +### Wie übertrage ich meinen Vesting-Vertrag? -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +Um Ihren Einsatz zu übertragen, müssen Sie die folgenden Schritte ausführen: -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +1. Initiieren Sie die Vesting-Übertragung im Ethereum-Mainnet -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +2. 20 Minuten auf Bestätigung warten -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +3. Bestätigen Sie die Übertragung von Anteilen auf Arbitrum -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### Wie übertrage ich meinen unverfallbaren Vertrag, wenn ich nur teilweise unverfallbar bin? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? + -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +1. Zahlen Sie etwas ETH in den Transfer-Tool-Vertrag ein (UI kann helfen, einen angemessenen Betrag zu schätzen) -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +2. Senden Sie einige gesperrte GRT über den Transfer-Tool-Vertrag an L2, um die L2-Freizügigkeitssperre zu initialisieren. Dadurch wird auch die Adresse des L2-Begünstigten festgelegt. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +3. Senden Sie Ihren Einsatz bzw. Ihre Delegation über die "gesperrten" Transfer-Tool-Funktionen im L1-Einsatzvertrag an L2. -## Can I specify a different beneficiary for my vesting contract on L2? +4. Restliche ETH aus dem Transfer-Tool-Vertrag abziehen -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### Wie übertrage ich meinen Freizügigkeitsvertrag, wenn ich eine vollständige Freizügigkeit erlangt habe? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. + -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +Für diejenigen, die voll berechtigt sind, ist das Verfahren ähnlich: -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +1. Zahlen Sie etwas ETH in den Transfer-Tool-Vertrag ein (UI kann helfen, einen angemessenen Betrag zu schätzen) -This allows you to transfer your stake or delegation to any L2 address. +2. Stellen Sie Ihre L2-Adresse mit einem Aufruf des Transfer-Tool-Vertrags ein -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +3. Senden Sie Ihren Einsatz bzw. Ihre Delegation über die "gesperrten" Transfer-Tool-Funktionen im L1-Einsatzvertrag an L2. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +4. Restliche ETH aus dem Transfer-Tool-Vertrag abziehen -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +### Kann ich meinen Vesting-Vertrag auf Arbitrum übertragen? -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +Sie können den GRT-Saldo Ihres Vesting-Vertrags auf einen Vesting-Vertrag in L2 übertragen. Dies ist eine Voraussetzung für die Übertragung von Anteilen oder Delegationen von Ihrem Freizügigkeitsvertrag auf L2. Der Vesting-Vertrag muss einen GRT-Betrag haben, der nicht Null ist (Sie können bei Bedarf einen kleinen Betrag wie 1 GRT auf ihn übertragen). -2. Revoke protocol access to the vesting contract (needed for the next step) +Wenn Sie GRT von Ihrem L1-Vesting-Vertrag auf L2 übertragen, können Sie den Betrag wählen, den Sie senden möchten, und Sie können dies so oft tun, wie Sie möchten. Der Vesting-Vertrag L2 wird initialisiert, wenn Sie das erste Mal GRT übertragen. -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Die Übertragungen erfolgen über ein Übertragungstool, das in Ihrem Explorer-Profil angezeigt wird, wenn Sie sich mit dem Vesting-Vertragskonto verbinden. -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +Bitte beachten Sie, dass Sie GRT aus dem Vesting-Vertrag L2 erst am Ende Ihres Vesting-Zeitraums freigeben/abziehen können, wenn Ihr Vertrag vollständig unverfallbar ist. Wenn Sie vor diesem Zeitpunkt GRT freigeben müssen, können Sie die GRT mit einem anderen, für diesen Zweck verfügbaren Transfertool zurück auf den L1-Freizügigkeitsvertrag übertragen. -5. Wait 20 minutes for confirmation +Wenn Sie noch kein Guthaben auf L2 übertragen haben und Ihr Vesting-Vertrag vollständig ausübbar ist, sollten Sie Ihren Vesting-Vertrag nicht auf L2 übertragen. Stattdessen können Sie die Transfer-Tools verwenden, um eine L2-Wallet-Adresse festzulegen und Ihren Einsatz oder Ihre Delegation direkt auf diese reguläre Wallet auf L2 zu übertragen. -6. Confirm the balance transfer on L2 +### Ich verwende meinen Vesting-Vertrag für Einsätze im Mainnet. Kann ich meinen Einsatz auf Arbitrum übertragen? -\*If necessary - i.e. you are using a contract address. +Ja, aber wenn Ihr Vertrag noch ausläuft, können Sie den Einsatz nur so übertragen, dass er zu Ihrem auslaufenden L2-Vertrag gehört. Sie müssen diesen L2-Vertrag zunächst initialisieren, indem Sie ein gewisses GRT-Guthaben mit dem Freizügigkeitsvertrags-Transfertool im Explorer übertragen. Wenn Ihr Vertrag vollständig unverfallbar ist, können Sie Ihren Einsatz auf eine beliebige Adresse in L2 übertragen, aber Sie müssen ihn vorher festlegen und einige ETH für das L2-Transfer-Tool hinterlegen, um das L2-Gas zu bezahlen. + +### Ich verwende meinen Freizügigkeitsvertrag, um im Mainnet zu delegieren. Kann ich meine Delegationen auf Arbitrum übertragen? + +Ja, aber wenn Ihr Vertrag noch unverfallbar ist, können Sie die Delegation nur so übertragen, dass sie im Besitz Ihres L2-Vesting-Vertrags ist. Sie müssen diesen L2-Vertrag zunächst initialisieren, indem Sie ein gewisses GRT-Guthaben mit dem Freizügigkeitsvertrags-Transfertool im Explorer übertragen. Wenn Ihr Vertrag vollständig unverfallbar ist, können Sie Ihre Delegation an eine beliebige Adresse in L2 übertragen, aber Sie müssen sie vorher festlegen und einige ETH für das L2-Transfer-Tool hinterlegen, um für L2-Gas zu bezahlen. + +### Kann ich einen anderen Empfänger für meinen Freizügigkeitsvertrag auf L2 angeben? + +Ja, wenn Sie zum ersten Mal ein Guthaben übertragen und Ihren L2-Freizügigkeitsvertrag einrichten, können Sie einen L2-Begünstigten angeben. Stellen Sie sicher, dass dieser Begünstigte eine Wallet ist, die Transaktionen auf Arbitrum One durchführen kann, d.h. es muss eine EOA oder eine Multisig sein, die auf Arbitrum One eingesetzt wird. + +Wenn Ihr Kontrakt vollständig ausübbar ist, werden Sie keinen ausübbaren Kontrakt auf L2 einrichten; stattdessen werden Sie eine L2-Wallet-Adresse festlegen, die die empfangende Wallet für Ihren Einsatz oder Ihre Delegation auf Arbitrum sein wird. + +### Mein Vertrag ist vollständig unverfallbar. Kann ich meinen Anteil oder meine Delegation auf eine andere Adresse übertragen, bei der es sich nicht um einen Vertrag mit Unverfallbarkeit L2 handelt? + +Ja. Wenn Sie noch kein Guthaben auf L2 übertragen haben und Ihr Vesting-Vertrag vollständig ausübbar ist, sollten Sie Ihren Vesting-Vertrag nicht auf L2 übertragen. Stattdessen können Sie die Transfer-Tools verwenden, um eine L2-Wallet-Adresse festzulegen und Ihren Einsatz oder Ihre Delegation direkt auf diese reguläre Wallet auf L2 zu übertragen. + +So können Sie Ihren Einsatz oder Ihre Delegation an eine beliebige L2-Adresse übertragen. + +### Mein Vesting-Vertrag ist noch unverfallbar. Wie übertrage ich mein Freizügigkeitsguthaben auf L2? + +Diese Schritte gelten nur, wenn Ihr Vertrag noch unverfallbar ist, oder wenn Sie dieses Verfahren schon einmal angewendet haben, als Ihr Vertrag noch unverfallbar war. + +Um Ihren Vesting-Vertrag auf L2 zu übertragen, senden Sie ein eventuelles GRT-Guthaben mit Hilfe der Transfer-Tools an L2, wodurch Ihr L2-Vesting-Vertrag initialisiert wird: + +1. Zahlen Sie einige ETH in den Transfer-Tool-Vertrag ein (diese werden zur Bezahlung von L2-Gas verwendet) + +2. Widerruf des Protokollzugriffs auf den Vesting-Vertrag (erforderlich für den nächsten Schritt) + +3. Geben Sie dem Protokoll Zugriff auf den Vesting-Vertrag (damit Ihr Vertrag mit dem Übertragungstool interagieren kann) + +4. Geben Sie eine Empfängeradresse L2\* an und initiieren Sie den Guthaben-Transfer im Ethereum-Mainnet + +5. 20 Minuten auf Bestätigung warten + +6. Bestätigen Sie die Übertragung des Saldos auf L2 + +\* Falls erforderlich - d.h. wenn Sie eine Vertragsadresse verwenden. \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Kann ich meinen Vertrag mit unverfallbarer Anwartschaft zurück nach L1 verschieben? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +Das ist nicht nötig, da sich Ihr Vesting-Vertrag noch in L1 befindet. Wenn Sie die Übertragungstools verwenden, erstellen Sie einfach einen neuen Vertrag in L2, der mit Ihrem Vesting-Vertrag in L1 verbunden ist, und Sie können GRT zwischen den beiden Verträgen hin- und herschicken. -## Why do I need to move my vesting contract to begin with? +### Warum muss ich meinen Vesting-Vertrag zunächst einmal verschieben? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +Sie müssen einen Unverfallbarkeitsvertrag für L2 einrichten, damit dieses Konto Eigentümer Ihres Einsatzes oder Ihrer Delegation auf L2 werden kann. Andernfalls gibt es für Sie keine Möglichkeit, den Einsatz/die Delegation auf L2 zu übertragen, ohne den Vesting-Vertrag zu "umgehen". -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Was passiert, wenn ich versuche, meinen Vertrag auszuzahlen, obwohl er nur teilweise unverfallbar ist? Ist das möglich? -This is not a possibility. You can move funds back to L1 and withdraw them there. +Dies ist nicht möglich. Sie können Gelder zurück nach L1 verschieben und sie dort abheben. -## What if I don't want to move my vesting contract to L2? +### Was ist, wenn ich meinen Vertrag mit Unverfallbarkeit nicht auf L2 übertragen möchte? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +Sie können weiterhin auf L1 staken/delegieren. Im Laufe der Zeit können Sie in Erwägung ziehen, auf L2 zu wechseln, um dort Belohnungen zu ermöglichen, wenn das Protokoll auf Arbitrum skaliert. Beachten Sie, dass diese Transfer-Tools nur für Verträge gelten, die das Abstecken und Delegieren im Protokoll erlauben. Wenn Ihr Vertrag kein Staking oder Delegieren erlaubt oder widerrufbar ist, dann gibt es kein Transfer-Tool. Sie können Ihr GRT trotzdem von L1 abheben, wenn es verfügbar ist. diff --git a/website/pages/de/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/de/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..12a1554750db 100644 --- a/website/pages/de/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/de/arbitrum/l2-transfer-tools-guide.mdx @@ -2,123 +2,123 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +The Graph hat den Wechsel zu L2 auf Arbitrum One leicht gemacht. Für jeden Protokollteilnehmer gibt es eine Reihe von L2-Transfer-Tools, um den Transfer zu L2 für alle Netzwerkteilnehmer nahtlos zu gestalten. Je nachdem, was Sie übertragen möchten, müssen Sie eine bestimmte Anzahl von Schritten befolgen. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Einige häufig gestellte Fragen zu diesen Tools werden in den [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq) beantwortet. Die FAQs enthalten ausführliche Erklärungen zur Verwendung der Tools, zu ihrer Funktionsweise und zu den Dingen, die bei ihrer Verwendung zu beachten sind. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## So übertragen Sie Ihren Untergraphen auf Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Vorteile der Übertragung Ihrer Untergraphen -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +The Graph's Community und die Kernentwickler haben im letzten Jahr den Wechsel zu Arbitrum [vorbereitet] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). Arbitrum, eine Layer-2- oder "L2"-Blockchain, erbt die Sicherheit von Ethereum, bietet aber drastisch niedrigere Gasgebühren. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Wenn Sie Ihren Subgraphen auf The Graph Network veröffentlichen oder aktualisieren, interagieren Sie mit intelligenten Verträgen auf dem Protokoll, und dies erfordert die Bezahlung von Gas mit ETH. Indem Sie Ihre Subgraphen zu Arbitrum verschieben, werden alle zukünftigen Aktualisierungen Ihres Subgraphen viel niedrigere Gasgebühren erfordern. Die niedrigeren Gebühren und die Tatsache, dass die Kurationsbindungskurven auf L2 flach sind, machen es auch für andere Kuratoren einfacher, auf Ihrem Subgraphen zu kuratieren, was die Belohnungen für Indexer auf Ihrem Subgraphen erhöht. Diese kostengünstigere Umgebung macht es auch für Indexer preiswerter, Ihren Subgraphen zu indizieren und zu bedienen. Die Belohnungen für die Indexierung werden in den kommenden Monaten auf Arbitrum steigen und auf dem Ethereum-Mainnet sinken, so dass immer mehr Indexer ihren Einsatz transferieren und ihre Operationen auf L2 einrichten werden. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Verstehen, was mit dem Signal, Ihrem L1-Subgraphen und den Abfrage-URLs geschieht -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Die Übertragung eines Subgraphen nach Arbitrum verwendet die Arbitrum GRT-Brücke, die wiederum die native Arbitrum-Brücke verwendet, um den Subgraphen nach L2 zu senden. Der "Transfer" löscht den Subgraphen im Mainnet und sendet die Informationen, um den Subgraphen auf L2 mit Hilfe der Brücke neu zu erstellen. Sie enthält auch die vom Eigentümer des Subgraphen signalisierte GRT, die größer als Null sein muss, damit die Brücke die Übertragung akzeptiert. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Wenn Sie sich für die Übertragung des Untergraphen entscheiden, wird das gesamte Kurationssignal des Untergraphen in GRT umgewandelt. Dies ist gleichbedeutend mit dem "Verwerfen" des Subgraphen im Mainnet. Die GRT, die Ihrer Kuration entsprechen, werden zusammen mit dem Subgraphen an L2 gesendet, wo sie für die Prägung von Signalen in Ihrem Namen verwendet werden. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Andere Kuratoren können wählen, ob sie ihren Anteil an GRT zurückziehen oder ihn ebenfalls an L2 übertragen, um das Signal auf demselben Untergraphen zu prägen. Wenn ein Subgraph-Eigentümer seinen Subgraph nicht an L2 überträgt und ihn manuell über einen Vertragsaufruf abmeldet, werden die Kuratoren benachrichtigt und können ihre Kuration zurückziehen. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Sobald der Subgraph übertragen wurde, erhalten die Indexer keine Belohnungen mehr für die Indizierung des Subgraphen, da die gesamte Kuration in GRT umgewandelt wird. Es wird jedoch Indexer geben, die 1) übertragene Untergraphen für 24 Stunden weiter bedienen und 2) sofort mit der Indizierung des Untergraphen auf L2 beginnen. Da diese Indexer den Untergraphen bereits indiziert haben, sollte es nicht nötig sein, auf die Synchronisierung des Untergraphen zu warten, und es wird möglich sein, den L2-Untergraphen fast sofort abzufragen. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Anfragen an den L2-Subgraphen müssen an eine andere URL gerichtet werden (an `arbitrum-gateway.thegraph.com`), aber die L1-URL wird noch mindestens 48 Stunden lang funktionieren. Danach wird das L1-Gateway (für eine gewisse Zeit) Anfragen an das L2-Gateway weiterleiten, was jedoch zu zusätzlichen Latenzzeiten führt. Es wird daher empfohlen, alle Anfragen so bald wie möglich auf die neue URL umzustellen. -## Choosing your L2 wallet +## Ein Teil dieser GRT, der dem Inhaber des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Als Sie Ihren Subgraphen im Mainnet veröffentlicht haben, haben Sie eine angeschlossene Wallet benutzt, um den Subgraphen zu erstellen, und diese Wallet besitzt die NFT, die diesen Subgraphen repräsentiert und Ihnen erlaubt, Updates zu veröffentlichen. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +Wenn man den Subgraphen zu Arbitrum überträgt, kann man eine andere Wallet wählen, die diesen Subgraphen NFT auf L2 besitzen wird. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Wenn Sie eine "normale" Wallet wie MetaMask verwenden (ein Externally Owned Account oder EOA, d.h. eine Wallet, die kein Smart Contract ist), dann ist dies optional und es wird empfohlen, die gleiche Eigentümeradresse wie in L1 beizubehalten. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Wenn Sie eine Smart-Contract-Wallet, wie z.B. eine Multisig (z.B. Safe), verwenden, dann ist die Wahl einer anderen L2-Wallet-Adresse zwingend erforderlich, da es sehr wahrscheinlich ist, dass dieses Konto nur im Mainnet existiert und Sie mit dieser Wallet keine Transaktionen auf Arbitrum durchführen können. Wenn Sie weiterhin eine Smart Contract Wallet oder Multisig verwenden möchten, erstellen Sie eine neue Wallet auf Arbitrum und verwenden Sie deren Adresse als L2-Besitzer Ihres Subgraphen. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Es ist sehr wichtig, eine Wallet-Adresse zu verwenden, die Sie kontrollieren und die Transaktionen auf Arbitrum durchführen kann. Andernfalls geht der Subgraph verloren und kann nicht wiederhergestellt werden.** -## Preparing for the transfer: bridging some ETH +## Vorbereitung der Übertragung: Überbrückung einiger ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Die Übertragung des Subgraphen beinhaltet das Senden einer Transaktion über die Brücke und das Ausführen einer weiteren Transaktion auf Arbitrum. Die erste Transaktion verwendet ETH im Mainnet und enthält einige ETH, um das Gas zu bezahlen, wenn die Nachricht auf L2 empfangen wird. Wenn dieses Gas jedoch nicht ausreicht, müssen Sie die Transaktion wiederholen und das Gas direkt auf L2 bezahlen (dies ist "Schritt 3: Bestätigen des Transfers" unten). Dieser Schritt **muss innerhalb von 7 Tagen nach Beginn der Überweisung** ausgeführt werden. Außerdem wird die zweite Transaktion ("Schritt 4: Beenden der Übertragung auf L2") direkt auf Arbitrum durchgeführt. Aus diesen Gründen benötigen Sie etwas ETH auf einer Arbitrum-Wallet. Wenn Sie ein Multisig- oder Smart-Contract-Konto verwenden, muss sich die ETH in der regulären (EOA-) Wallet befinden, die Sie zum Ausführen der Transaktionen verwenden, nicht in der Multisig-Wallet selbst. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Sie können ETH auf einigen Börsen kaufen und direkt auf Arbitrum abheben, oder Sie können die Arbitrum-Brücke verwenden, um ETH von einer Mainnet-Wallet zu L2 zu senden: [bridge.arbitrum.io](http://bridge.arbitrum.io). Da die Gasgebühren auf Arbitrum niedriger sind, sollten Sie nur eine kleine Menge benötigen. Es wird empfohlen, mit einem niedrigen Schwellenwert (z.B. 0,01 ETH) zu beginnen, damit Ihre Transaktion genehmigt wird. -## Finding the subgraph Transfer Tool +## Suche nach dem Untergraphen Transfer Tool -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Sie finden das L2 Transfer Tool, wenn Sie die Seite Ihres Subgraphen in Subgraph Studio ansehen: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +Sie ist auch im Explorer verfügbar, wenn Sie mit der Wallet verbunden sind, die einen Untergraphen besitzt, und auf der Seite dieses Untergraphen im Explorer: ![Transferring to L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +Wenn Sie auf die Schaltfläche auf L2 übertragen klicken, wird das Übertragungstool geöffnet, mit dem Sie den Übertragungsvorgang starten können. -## Step 1: Starting the transfer +## Schritt 1: Starten der Übertragung -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Bevor Sie mit dem Transfer beginnen, müssen Sie entscheiden, welche Adresse den Subgraphen auf L2 besitzen wird (siehe "Wählen Sie Ihre L2-Wallet" oben), und es wird dringend empfohlen, einige ETH für Gas bereits auf Arbitrum zu überbrücken (siehe "Vorbereitung des Transfers: Überbrücken einiger ETH" oben). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Bitte beachten Sie auch, dass die Übertragung des Untergraphen ein Signal ungleich Null auf dem Untergraphen mit demselben Konto erfordert, das den Untergraphen besitzt; wenn Sie kein Signal auf dem Untergraphen haben, müssen Sie ein wenig Kuration hinzufügen (das Hinzufügen eines kleinen Betrags wie 1 GRT würde ausreichen). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +Nachdem Sie das Transfer-Tool geöffnet haben, können Sie die L2-Wallet-Adresse in das Feld "Empfänger-Wallet-Adresse" eingeben - **vergewissern Sie sich, dass Sie hier die richtige Adresse eingegeben haben**. Wenn Sie auf "Transfer Subgraph" klicken, werden Sie aufgefordert, die Transaktion auf Ihrer Wallet auszuführen (beachten Sie, dass ein gewisser ETH-Wert enthalten ist, um das L2-Gas zu bezahlen); dadurch wird der Transfer eingeleitet und Ihr L1-Subgraph außer Kraft gesetzt (siehe "Verstehen, was mit Signal, Ihrem L1-Subgraph und Abfrage-URLs passiert" weiter oben für weitere Details darüber, was hinter den Kulissen passiert). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Wenn Sie diesen Schritt ausführen, **vergewissern Sie sich, dass Sie bis zum Abschluss von Schritt 3 in weniger als 7 Tagen fortfahren, sonst gehen der Subgraph und Ihr Signal GRT verloren.** Dies liegt daran, wie L1-L2-Nachrichten auf Arbitrum funktionieren: Nachrichten, die über die Brücke gesendet werden, sind "wiederholbare Tickets", die innerhalb von 7 Tagen ausgeführt werden müssen, und die erste Ausführung muss möglicherweise wiederholt werden, wenn es Spitzen im Gaspreis auf Arbitrum gibt. ![Start the trnasfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Schritt 2: Warten, bis der Untergraph L2 erreicht hat -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Nachdem Sie die Übertragung gestartet haben, muss die Nachricht, die Ihren L1-Subgraphen an L2 sendet, die Arbitrum-Brücke durchlaufen. Dies dauert etwa 20 Minuten (die Brücke wartet darauf, dass der Mainnet-Block, der die Transaktion enthält, vor potenziellen Reorgs der Kette "sicher" ist). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Sobald diese Wartezeit abgelaufen ist, versucht Arbitrum, die Übertragung auf den L2-Verträgen automatisch auszuführen. ![Wait screen](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## Schritt 3: Bestätigung der Übertragung -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +In den meisten Fällen wird dieser Schritt automatisch ausgeführt, da das in Schritt 1 enthaltene L2-Gas ausreichen sollte, um die Transaktion auszuführen, die den Untergraphen auf den Arbitrum-Verträgen erhält. In einigen Fällen ist es jedoch möglich, dass ein Anstieg der Gaspreise auf Arbitrum dazu führt, dass diese automatische Ausführung fehlschlägt. In diesem Fall wird das "Ticket", das Ihren Subgraphen an L2 sendet, ausstehend sein und einen erneuten Versuch innerhalb von 7 Tagen erfordern. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Wenn dies der Fall ist, müssen Sie sich mit einer L2-Wallet verbinden, die etwas ETH auf Arbitrum hat, Ihr Wallet-Netzwerk auf Arbitrum umstellen und auf "Confirm Transfer" klicken, um die Transaktion zu wiederholen. ![Confirm the transfer to L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## Schritt 4: Abschluss der Übertragung auf L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +Zu diesem Zeitpunkt wurden Ihr Subgraph und GRT auf Arbitrum empfangen, aber der Subgraph ist noch nicht veröffentlicht. Sie müssen sich mit der L2-Wallet verbinden, die Sie als empfangende Wallet gewählt haben, Ihr Wallet-Netzwerk auf Arbitrum umstellen und auf "Subgraph" veröffentlichen klicken. ![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) ![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Dadurch wird der Untergraph veröffentlicht, so dass Indexer, die auf Arbitrum arbeiten, damit beginnen können, ihn zu bedienen. Es wird auch ein Kurationssignal unter Verwendung der GRT, die von L1 übertragen wurden, eingeleitet. -## Step 5: Updating the query URL +## Schritt 5: Aktualisierung der Abfrage-URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Ihr Subgraph wurde erfolgreich zu Arbitrum übertragen! Um den Subgraphen abzufragen, wird die neue URL lauten: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Beachten Sie, dass die ID des Subgraphen auf Arbitrum eine andere sein wird als die, die Sie im Mainnet hatten, aber Sie können sie immer im Explorer oder Studio finden. Wie oben erwähnt (siehe "Verstehen, was mit Signal, Ihrem L1-Subgraphen und Abfrage-URLs passiert"), wird die alte L1-URL noch eine kurze Zeit lang unterstützt, aber Sie sollten Ihre Abfragen auf die neue Adresse umstellen, sobald der Subgraph auf L2 synchronisiert worden ist. -## How to transfer your curation to Arbitrum (L2) +## Wie Sie Ihre Kuration auf Arbitrum übertragen (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Verstehen, was mit der Kuration bei der Übertragung von Untergraphen auf L2 geschieht -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +Wenn der Eigentümer eines Untergraphen einen Untergraphen an Arbitrum überträgt, werden alle Signale des Untergraphen gleichzeitig in GRT konvertiert. Dies gilt für "automatisch migrierte" Signale, d.h. Signale, die nicht spezifisch für eine Subgraphenversion oder einen Einsatz sind, sondern der neuesten Version eines Subgraphen folgen. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Diese Umwandlung von Signal in GRT entspricht dem, was passieren würde, wenn der Eigentümer des Subgraphen den Subgraphen in L1 verwerfen würde. Wenn der Subgraph veraltet oder übertragen wird, werden alle Kurationssignale gleichzeitig "verbrannt" (unter Verwendung der Kurationsbindungskurve) und das resultierende GRT wird vom GNS-Smart-Contract gehalten (das ist der Vertrag, der Subgraph-Upgrades und automatisch migrierte Signale handhabt). Jeder Kurator auf diesem Subgraphen hat daher einen Anspruch auf dieses GRT proportional zu der Menge an Anteilen, die er für den Subgraphen hatte. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +Ein Teil dieser GRT, der dem Eigentümer des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +Ein Teil dieser GRT, der dem Eigentümer des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. -## Choosing your L2 wallet +## Ein Teil dieser GRT, der dem Inhaber des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Ein Teil dieser GRT, der dem Eigentümer des Untergraphen entspricht, wird zusammen mit dem Untergraphen an L2 gesendet. If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. @@ -148,7 +148,7 @@ Starting the transfer: After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Sobald diese Wartezeit abgelaufen ist, versucht Arbitrum, die Übertragung auf den L2-Verträgen automatisch auszuführen. ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) @@ -156,7 +156,7 @@ Once this wait time is over, Arbitrum will attempt to auto-execute the transfer In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Wenn dies der Fall ist, müssen Sie sich mit einer L2-Wallet verbinden, die etwas ETH auf Arbitrum hat, Ihr Wallet-Netzwerk auf Arbitrum umstellen und auf "Confirm Transfer" klicken, um die Transaktion zu wiederholen. ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) diff --git a/website/pages/de/billing.mdx b/website/pages/de/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/de/billing.mdx +++ b/website/pages/de/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/de/chain-integration-overview.mdx b/website/pages/de/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/de/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/de/cookbook/arweave.mdx b/website/pages/de/cookbook/arweave.mdx index 15aaf1a38831..06ef4bbb413a 100644 --- a/website/pages/de/cookbook/arweave.mdx +++ b/website/pages/de/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -53,7 +53,7 @@ $ graph codegen # generates types from the schema file identified in the manifes $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -## Subgraph Manifest Definition +## Subgraf-Manifest-Definition The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -97,13 +97,13 @@ Arweave data sources support two types of handlers: > Note: [Bundlr](https://bundlr.network/) transactions are not supported yet. -## Schema Definition +## Schema-Definition Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). -## AssemblyScript Mappings +## AssemblyScript-Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -162,7 +162,7 @@ graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. -## Example Subgraphs +## Beispiele von Subgrafen Here is an example subgraph for reference: diff --git a/website/pages/de/cookbook/base-testnet.mdx b/website/pages/de/cookbook/base-testnet.mdx index b1e3a4fc7c6d..fd5481bbc752 100644 --- a/website/pages/de/cookbook/base-testnet.mdx +++ b/website/pages/de/cookbook/base-testnet.mdx @@ -65,7 +65,7 @@ The previous command creates a scaffold subgraph that you can use as a starting - Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. - Schema (schema.graphql) - The GraphQL schema defines what data you wish to retreive from the subgraph. -- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. +- AssemblyScript Mappings (mapping.ts) - Dies ist der Code, der die Daten aus Ihren Datenquellen in die im Schema definierten Entitäten übersetzt. If you want to index additional data, you will need extend the manifest, schema and mappings. diff --git a/website/pages/de/cookbook/cosmos.mdx b/website/pages/de/cookbook/cosmos.mdx index ef21e4bc0855..0978711d1b77 100644 --- a/website/pages/de/cookbook/cosmos.mdx +++ b/website/pages/de/cookbook/cosmos.mdx @@ -1,51 +1,51 @@ --- -title: Building Subgraphs on Cosmos +title: Erstellen von Subgrafen auf Cosmos --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. +Dieser Guide ist eine Einführung in das Erstellen von Subgrafen, die [Cosmos](https://docs.cosmos.network/)-basierte Blockchains indizieren. -## What are Cosmos subgraphs? +## Was sind Cosmos-Subgrafen? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph ermöglicht es Entwicklern, Blockchain-Ereignisse zu verarbeiten und die resultierenden Daten durch eine offene GraphQL-API, die als Subgraf bezeichnet wird, einfach verfügbar zu machen. [Graph Node](https://github.com/graphprotocol/graph-node) ist jetzt in der Lage, Cosmos-Ereignisse zu verarbeiten, was bedeutet, dass Cosmos-Entwickler jetzt Subgrafen erstellen können, um On-Chain-Ereignisse einfach zu indizieren. -There are four types of handlers supported in Cosmos subgraphs: +In Cosmos-Subgrafen werden vier Arten von Handlern unterstützt: -- **Block handlers** run whenever a new block is appended to the chain. -- **Event handlers** run when a specific event is emitted. -- **Transaction handlers** run when a transaction occurs. -- **Message handlers** run when a specific message occurs. +- **Block-Handler** werden ausgeführt, wenn ein neuer Block an die Kette angehängt wird. +- **Ereignis-Handler** werden ausgeführt, wenn ein bestimmtes Ereignis ausgegeben wird. +- **Transaktions-Handler** werden ausgeführt, wenn eine Transaktion stattfindet. +- **Message-Handler** werden ausgeführt, wenn eine bestimmte Nachricht auftritt. -Based on the [official Cosmos documentation](https://docs.cosmos.network/): +Basierend auf der [offiziellen Cosmos-Dokumentation](https://docs.cosmos.network/): -> [Events](https://docs.cosmos.network/main/core/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. +> [Events](https://docs.cosmos.network/main/core/events) sind Objekte, die Informationen über die Ausführung der Anwendung enthalten. Sie werden hauptsächlich von Dienstanbietern wie Block-Explorern und Wallets verwendet, um die Ausführung verschiedener Nachrichten und Indextransaktionen zu verfolgen. -> [Transactions](https://docs.cosmos.network/main/core/transactions) are objects created by end-users to trigger state changes in the application. +> [Transaktionen](https://docs.cosmos.network/main/core/transactions) sind Objekte, die von Endbenutzern erstellt werden, um Statusänderungen in der Anwendung auszulösen. -> [Messages](https://docs.cosmos.network/main/core/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. +> [Nachrichten](https://docs.cosmos.network/main/core/transactions#messages) sind modulspezifische Objekte, die Zustandsübergänge im Rahmen des Moduls, zu dem sie gehören, auslösen. -Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. +Obwohl man mit einem Block-Handler auf alle Daten zugegriffen werden kann, ermöglichen andere Handler den Subgrafen-Entwicklern, Daten viel detaillierter zu verarbeiten. -## Building a Cosmos subgraph +## Erstellen eines Subgrafen auf Cosmos -### Subgraph Dependencies +### Subgraf-Abhängigkeiten -[graph-cli](https://github.com/graphprotocol/graph-cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. +[graph-cli](https://github.com/graphprotocol/graph-cli) ist ein CLI-Tool zum Erstellen und Bereitstellen von Subgrafen, Version `>=0.30.0` ist erforderlich, um mit Cosmos-Subgrafen zu arbeiten. -[graph-ts](https://github.com/graphprotocol/graph-ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. +[graph-ts](https://github.com/graphprotocol/graph-ts) ist eine Bibliothek mit subgrafspezifischen Typen, Version `>=0.27.0` ist erforderlich, um mit Cosmos-Subgrafen zu arbeiten. -### Subgraph Main Components +### Hauptkomponenten des Subgrafen -There are three key parts when it comes to defining a subgraph: +Bei der Definition eines Subgrafen gibt es drei Schlüsselelemente: -**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. +**subgraph.yaml**: eine YAML-Datei, die das Subgraf-Manifest enthält, das angibt, welche Ereignisse verfolgt und wie sie verarbeitet werden sollen. -**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. +**schema.graphql**: ein GraphQL-Schema, das definiert, welche Daten für Ihren Subgrafen gespeichert werden und wie sie durch GraphQL abgefragt werden. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript)-Code, der Blockchain-Daten in die definierten Entitäten n Ihrem Schema umsetzt. -### Subgraph Manifest Definition +### Subgraf-Manifest-Definition -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: +Das Subgraf-Manifest (`subgraph.yaml`) identifiziert die Datenquellen für den Subgraf, die relevanten Trigger und die Funktionen (`handlers`), die als Reaktion auf diese Trigger ausgeführt werden sollen. Unten finden Sie ein Beispiel für ein Subgraf-Manifest für einen Cosmos-Subgrafen: ```yaml specVersion: 0.0.5 @@ -77,15 +77,15 @@ dataSources: - Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). - The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. -### Schema Definition +### Schema-Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graph-ql-schema). +Die Schema-Definition beschreibt die Struktur der resultierenden Subgraf-Datenbank und die Beziehungen zwischen Entitäten. Dies ist von der ursprünglichen Datenquelle unabhängig. Weitere Details zur Subgraf-Schema-Definition finden Sie [hier](/developing/creating-a-subgraph/#the-graph-ql-schema). -### AssemblyScript Mappings +### AssemblyScript-Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Die Cosmos-Indizierung führt Cosmos-spezifische Datentypen in die [AssemblyScript-API](/developing/assemblyscript-api/) ein. ```tsx class Block { @@ -163,22 +163,22 @@ class Any { } ``` -Each handler type comes with its own data structure that is passed as an argument to a mapping function. +Jeder Handler-Typ verfügt über eine eigene Datenstruktur, die als Argument an eine Zuordnungsfunktion übergeben wird. -- Block handlers receive the `Block` type. -- Event handlers receive the `EventData` type. -- Transaction handlers receive the `TransactionData` type. -- Message handlers receive the `MessageData` type. +- Block-Handler erhalten den Typ `Block`. +- Event-Handler erhalten den Typ `EventData`. +- Transaktionshandler erhalten den Typ `TransactionData`. +- Message-Handler erhalten den Typ `MessageData`. -As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). +Als Teil von `MessageData` erhält der Message-Handler einen Transaktionskontext, der die wichtigste Information zu einer Transaktion enthält, die eine Nachricht einschließt. Der Transaktionskontext ist auch im Typ `EventData` verfügbar, aber nur, wenn das entsprechende Ereignis mit einer Transaktion verknüpft ist. Zusätzlich erhalten alle Handler eine Referenz auf einen Block (`HeaderOnlyBlock`). -You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Die vollständige Liste der Typen für die Cosmos-Integration finden Sie [hier](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). -### Message decoding +### Nachrichtendecodierung It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://developers.google.com/protocol-buffers/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Ein Beispiel zum Decodieren von Nachrichtendaten in einem Subgrafen finden Sie [hier](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## Creating and building a Cosmos subgraph @@ -188,15 +188,15 @@ The first step before starting to write the subgraph mappings is to generate the $ graph codegen ``` -Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: +Sobald die Mappings fertig sind, muss der Subgraf erstellt werden. Dieser Schritt hebt alle Fehler hervor, die das Manifest oder die Mappings haben könnten. Ein Subgraf muss erfolgreich erstellt werden, um auf dem Graph-Knoten bereitgestellt zu werden. Dies kann mit dem CLI-Befehl `build` erfolgen: ```bash $ graph build ``` -## Deploying a Cosmos subgraph +## Bereitstellen eines Subgrafen auf Cosmos -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: +Sobald Ihr Subgraf erstellt wurde, können Sie ihn mit dem CLI-Befehl `graph deploy` bereitstellen, nachdem Sie den CLI-Befehl `graph create` ausgeführt haben: **Hosted Service** @@ -220,17 +220,17 @@ graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost ## Querying a Cosmos subgraph -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +Der GraphQL-Endpunkt für Cosmos-Subgrafen wird durch die Schemadefinition mit der vorhandenen API-Schnittstelle bestimmt. Weitere Informationen finden Sie in der [GraphQL-API-Dokumentation](/querying/graphql-api/). -## Supported Cosmos Blockchains +## Unterstützte Cosmos-Blockchains ### Cosmos Hub -#### What is Cosmos Hub? +#### Was ist Cosmos Hub? -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. +Die [Cosmos Hub Blockchain](https://hub.cosmos.network/) ist die erste Blockchain im [Cosmos](https://cosmos.network/)-Ökosystem. Weitere Informationen finden Sie in der [offiziellen Dokumentation](https://docs.cosmos.network/). -#### Networks +#### Netzwerke Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. @@ -242,18 +242,18 @@ Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testne [Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. -#### Networks +#### Netzwerke Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. -## Example Subgraphs +## Beispiele von Subgrafen -Here are some example subgraphs for reference: +Hier sind einige Beispiele von Subgrafen als Referenz: -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Beispiel für Blockfilterung](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Beispiel für Validator-Belohnungen](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Beispiel für Validator-Delegierungen](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) [Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/de/cookbook/grafting.mdx b/website/pages/de/cookbook/grafting.mdx index 54ad7a0eaff8..949f13ff0dc6 100644 --- a/website/pages/de/cookbook/grafting.mdx +++ b/website/pages/de/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: @@ -32,7 +48,7 @@ Building subgraphs is an essential part of The Graph, described more in depth [h > Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). -## Subgraph Manifest Definition +## Subgraf-Manifest-Definition The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: diff --git a/website/pages/de/cookbook/near.mdx b/website/pages/de/cookbook/near.mdx index 879e8e5c15aa..71a5a578b8a1 100644 --- a/website/pages/de/cookbook/near.mdx +++ b/website/pages/de/cookbook/near.mdx @@ -48,7 +48,7 @@ $ graph codegen # generates types from the schema file identified in the manifes $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -### Subgraph Manifest Definition +### Subgraf-Manifest-Definition The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: @@ -92,13 +92,13 @@ NEAR data sources support two types of handlers: - `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/docs/concepts/account#subaccounts) must be added as independent data sources). -### Schema Definition +### Schema-Definition Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). -### AssemblyScript Mappings +### AssemblyScript-Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Die Handler für die Ereignisverarbeitung sind in [AssemblyScript](https://www.assemblyscript.org/) geschrieben. NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/assemblyscript-api). @@ -231,9 +231,9 @@ We will provide more information on running the above components soon. The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. -## Example Subgraphs +## Beispiele von Subgrafen -Here are some example subgraphs for reference: +Hier sind einige Beispiele von Subgrafen als Referenz: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/de/cookbook/substreams-powered-subgraphs.mdx b/website/pages/de/cookbook/substreams-powered-subgraphs.mdx index 5c74e7ed8485..6b84c84358c8 100644 --- a/website/pages/de/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/de/cookbook/substreams-powered-subgraphs.mdx @@ -2,7 +2,7 @@ title: Substreams-powered subgraphs --- -[Substreams](https://substreams.streamingfast.io/) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. +[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. ## Requirements @@ -22,7 +22,7 @@ graph init --from-example substreams-powered-subgraph ## Defining a Substreams package -A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](https://substreams.streamingfast.io/), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): diff --git a/website/pages/de/cookbook/upgrading-a-subgraph.mdx b/website/pages/de/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/de/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/de/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/de/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/de/deploying/hosted-service.mdx b/website/pages/de/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/de/deploying/hosted-service.mdx +++ b/website/pages/de/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/de/deploying/subgraph-studio-faqs.mdx b/website/pages/de/deploying/subgraph-studio-faqs.mdx index 540c94480e59..3d3e858553bc 100644 --- a/website/pages/de/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/de/deploying/subgraph-studio-faqs.mdx @@ -2,30 +2,30 @@ title: Subgraph Studio-FAQs --- -## 1. What is Subgraph Studio? +## 1. Was ist Subgraph Studio? -[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. +[Subgraph Studio](https://thegraph.com/studio/) ist eine DApp zum Erstellen, Verwalten und Veröffentlichen von Subgrafen und API-Schlüsseln. -## 2. How do I create an API Key? +## 2. Wie erstelle ich einen API-Schlüssel? To create an API, navigate to the Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. -## 3. Can I create multiple API Keys? +## 3. Kann ich mehrere API-Schlüssel erstellen? Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). ## 4. How do I restrict a domain for an API Key? -After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. +Nachdem Sie einen API-Schlüssel erstellt haben, können Sie im Abschnitt Sicherheit die Domänen definieren, die einen bestimmten API-Schlüssel abfragen können. -## 5. Can I transfer my subgraph to another owner? +## 5. Kann ich meinen Subgrafen an einen anderen Eigentümer übertragen? -Yes, subgraphs that have been published to Mainnet can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. +Ja, Subgrafen, die auf Mainnet veröffentlicht wurden, können auf ein neues Wallet oder eine Multisig übertragen werden. Klicken Sie dazu auf der Detailseite des Subgrafen auf die drei Punkte neben der Schaltfläche „Veröffentlichen“ ('Publish') und wählen Sie „Inhaberschaft übertragen“ ('Transfer ownership'). -Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. +Beachten Sie, dass Sie den Subgrafen nach der Übertragung nicht mehr in Studio sehen oder bearbeiten können. ## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? -You can find the query URL of each subgraph in the Subgraph Details section of The Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in the Subgraph Studio. +Sie finden die Abfrage-URL jedes Subgrafen im Abschnitt „Subgraf-Details“ des Graph-Explorers. Wenn Sie auf die Schaltfläche „Abfrage“ (“Query”) klicken, werden Sie zu einem Bereich weitergeleitet, in dem Sie die Abfrage-URL des Subgrafen sehen können, an dem Sie interessiert sind. Sie können dann den Platzhalter `` mit dem API-Schlüssel, den Sie im Subgraph Studio nutzen möchten, ersetzen. -Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. +Denken Sie daran, dass Sie einen API-Schlüssel erstellen und jeden im Netzwerk veröffentlichten Subgrafen abfragen können, auch wenn Sie selbst einen Subgrafen erstellen. Diese Abfragen über den neuen API-Schlüssel sind wie alle anderen im Netzwerk kostenpflichtige Abfragen. diff --git a/website/pages/de/deploying/subgraph-studio.mdx b/website/pages/de/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/de/deploying/subgraph-studio.mdx +++ b/website/pages/de/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/de/developing/creating-a-subgraph.mdx b/website/pages/de/developing/creating-a-subgraph.mdx index e05ef5070ba9..9211e777b458 100644 --- a/website/pages/de/developing/creating-a-subgraph.mdx +++ b/website/pages/de/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/de/developing/developer-faqs.mdx b/website/pages/de/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/de/developing/developer-faqs.mdx +++ b/website/pages/de/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/de/developing/graph-ts/api.mdx b/website/pages/de/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..eaff2f178d6a --- /dev/null +++ b/website/pages/de/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versionen + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Adress + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Das wird nicht funktionieren +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// Das wird funktionieren +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/de/developing/graph-ts/common-issues.mdx b/website/pages/de/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/de/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/de/developing/supported-networks.mdx b/website/pages/de/developing/supported-networks.mdx index 58ce56345f7c..cd82305bfce2 100644 --- a/website/pages/de/developing/supported-networks.mdx +++ b/website/pages/de/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/de/docsearch.json b/website/pages/de/docsearch.json index 8cfff967936d..9f300c69acb0 100644 --- a/website/pages/de/docsearch.json +++ b/website/pages/de/docsearch.json @@ -1,14 +1,14 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Suchen", + "buttonAriaLabel": "Suchen" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", + "resetButtonTitle": "Die Abfrage löschen", + "resetButtonAriaLabel": "Die Abfrage löschen", "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "cancelButtonAriaLabel": "Anuluj" }, "startScreen": { "recentSearchesTitle": "Recent", diff --git a/website/pages/de/firehose.mdx b/website/pages/de/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/de/firehose.mdx +++ b/website/pages/de/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/de/global.json b/website/pages/de/global.json index 6a3eb234bfce..bf08c67e0543 100644 --- a/website/pages/de/global.json +++ b/website/pages/de/global.json @@ -1,14 +1,14 @@ { "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "expand": "Erweitern", + "previous": "Zurück", + "next": "Weiter", + "editPage": "Seite bearbeiten", + "pageSections": "Seitenabschnitte", + "linkToThisSection": "Link zu diesem Abschnitt", + "technicalLevelRequired": "Erforderliches technisches Niveau", + "notFoundTitle": "Ups! Diese Seite ging im Weltraum verloren...", + "notFoundSubtitle": "Überprüfen Sie, ob Sie die richtige Adresse verwenden, oder besuchen Sie unsere Website, indem Sie auf den unten stehenden Link klicken.", + "goHome": "Zurück zur Startseite", "video": "Video" } diff --git a/website/pages/de/glossary.mdx b/website/pages/de/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/de/glossary.mdx +++ b/website/pages/de/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/de/graphcast.mdx b/website/pages/de/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/de/graphcast.mdx +++ b/website/pages/de/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/de/index.json b/website/pages/de/index.json index 83b5aeda83d9..2526bc347927 100644 --- a/website/pages/de/index.json +++ b/website/pages/de/index.json @@ -7,7 +7,7 @@ "description": "Learn more about The Graph" }, "quickStart": { - "title": "Quick Start", + "title": "Schnellstart", "description": "Jump in and start with The Graph" }, "developerFaqs": { @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/de/managing/transferring-subgraph-ownership.mdx b/website/pages/de/managing/transferring-subgraph-ownership.mdx index 1ca1c621a9c9..171a13659f0a 100644 --- a/website/pages/de/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/de/managing/transferring-subgraph-ownership.mdx @@ -1,39 +1,39 @@ --- -title: Transferring Subgraph Ownership +title: Übertragung des Eigentums an Subgraphen --- -The Graph supports the transfer of the ownership of a subgraph. +The Graph unterstützt die Übertragung des Eigentums an einem Subgraphen. -When you deploy a subgraph to mainnet, an NFT will be minted to the address that deployed the subgraph. The NFT is based on a standard ERC721, so it can be easily transferred to different accounts. +Wenn Sie einen Subgraphen im Mainnet bereitstellen, wird eine NFT auf die Adresse gemint, die den Subgraphen bereitgestellt hat. Die NFT basiert auf einem Standard ERC721, sodass sie leicht auf verschiedene Konten übertragen werden kann. -Whoever owns the NFT controls the subgraph. If the owner decides to sell the NFT, or transfer it, they will no longer be able to make edits or updates to that subgraph on the network. +Wer den NFT besitzt, kontrolliert den Subgraphen. Wenn der Eigentümer beschließt, das NFT zu verkaufen oder zu übertragen, kann er keine Änderungen oder Aktualisierungen an diesem Subgraphen im Netzwerk mehr vornehmen. -In addition to adding more flexibility to the development lifecycle, this functionality makes certain use cases more convenient, such as moving your control to a multisig or a community member creating it on behalf of a DAO. +Diese Funktionalität erhöht nicht nur die Flexibilität im Entwicklungszyklus, sondern macht auch bestimmte Anwendungsfälle bequemer, z. B. das Verschieben Ihrer Kontrolle zu einer Multisig oder die Erstellung durch ein Community-Mitglied im Namen einer DAO. -## Viewing your subgraph as an NFT +## Betrachtung Ihres Subgraphen als NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like OpenSea: +Um Ihren Subgraphen als NFT zu sehen, können Sie einen NFT-Marktplatz wie OpenSea besuchen: ``` https://opensea.io/your-wallet-address ``` -Or a wallet explorer like **Rainbow.me**: +Oder ein Wallet-Explorer wie **Rainbow.me**: ``` https://rainbow.me/your-wallet-addres ``` -## Transferring ownership of a subgraph +## Übertragen des Eigentums an einem Subgraphen -To transfer ownership of a subgraph, you can use the UI built into Subgraph Studio: +Um das Eigentum an einem Subgraphen zu übertragen, können Sie die in Subgraph Studio integrierte Benutzeroberfläche verwenden: -![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) +![Subgraphen Eigentumsübertragung](/img/subgraph-ownership-transfer-1.png) -And then choose the address that you would like to transfer the subgraph to: +Wählen Sie dann die Adresse, an die Sie den Subgraphen übertragen möchten: -![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) +![Subgraphen Eigentumsübertragung](/img/subgraph-ownership-transfer-2.png) -You can also use the built-in UI of NFT marketplaces like OpenSea: +Sie können auch die integrierte Benutzeroberfläche von NFT-Marktplätzen wie OpenSea verwenden: -![Subgraph Ownership Trasfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) +![Subgraph-Eigentumsübertragung vom NFT-Marktplatz](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/de/mips-faqs.mdx b/website/pages/de/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/de/mips-faqs.mdx +++ b/website/pages/de/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/de/network/benefits.mdx b/website/pages/de/network/benefits.mdx index 75a28299b366..cf9a39833987 100644 --- a/website/pages/de/network/benefits.mdx +++ b/website/pages/de/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/de/network/indexing.mdx b/website/pages/de/network/indexing.mdx index a1cf90098c5d..8d273d73853d 100644 --- a/website/pages/de/network/indexing.mdx +++ b/website/pages/de/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -174,7 +174,7 @@ Note: To support agile scaling, it is recommended that query and indexing concer > Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Install prerequisites +#### Installieren Sie die Voraussetzungen - Google Cloud-SDK - Kubectl-Befehlszeilentool @@ -299,9 +299,9 @@ Deploy all resources with `kubectl apply -k $dir`. [Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. -#### Getting started from source +#### Einstieg in den Sourcecode -#### Install prerequisites +#### Installieren Sie die Voraussetzungen - **Rust** @@ -309,7 +309,7 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Zusätzliche Anforderungen für Ubuntu-Benutzer** - Um einen Graph-Knoten auf Ubuntu auszuführen, sind möglicherweise einige zusätzliche Pakete erforderlich. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -317,7 +317,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Konfiguration -1. Start a PostgreSQL database server +1. Starten Sie einen PostgreSQL-Datenbankserver ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Klonen Sie das [Graph-Knoten](https://github.com/graphprotocol/graph-node)-Repo und erstellen Sie den Sourcecode durch Ausführen von `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Nachdem alle Abhängigkeiten eingerichtet sind, starten Sie den Graph-Knoten: ```sh cargo run -p graph-node --release -- \ @@ -336,7 +336,7 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Erste Schritte mit Docker #### Prerequisites @@ -344,7 +344,7 @@ cargo run -p graph-node --release -- \ #### Konfiguration -1. Clone Graph Node and navigate to the Docker directory: +1. Klonen Sie den Graph-Knoten und navigieren Sie zum Docker-Verzeichnis: ```sh git clone https://github.com/graphprotocol/graph-node @@ -357,7 +357,7 @@ cd graph-node/docker ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Starten Sie einen lokalen Graph-Knoten, der sich mit Ihrem Ethereum-Endpunkt verbindet: ```sh docker-compose up @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/de/new-chain-integration.mdx b/website/pages/de/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/de/new-chain-integration.mdx +++ b/website/pages/de/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/de/operating-graph-node.mdx b/website/pages/de/operating-graph-node.mdx index 6c6c7a640671..0e24f48252df 100644 --- a/website/pages/de/operating-graph-node.mdx +++ b/website/pages/de/operating-graph-node.mdx @@ -1,40 +1,40 @@ --- -title: Operating Graph Node +title: Betreiben eines Graph-Knotens --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node ist die Komponente, die Subgrafen indiziert und die resultierenden Daten zur Abfrage über eine GraphQL-API verfügbar macht. Als solches ist es für den Indexer-Stack von zentraler Bedeutung, und der korrekte Betrieb des Graph-Knotens ist entscheidend für den Betrieb eines erfolgreichen Indexers. -This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). +Dies bietet eine kontextbezogene Übersicht über Graph Node und einige der erweiterten Optionen, die Indexierern zur Verfügung stehen. Ausführliche Dokumentation und Anleitungen finden Sie im [Graph Node-Repository](https://github.com/graphprotocol/graph-node). ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Node](https://github.com/graphprotocol/graph-node) ist die Referenzimplementierung für die Indizierung von Subgrafen auf The Graph Network, die Verbindung zu Blockchain-Clients, die Indizierung von Subgrafen und die Bereitstellung indizierter Daten für Abfragen. -Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (und der gesamte Indexer-Stack) kann auf Bare-Metal oder in einer Cloud-Umgebung ausgeführt werden. Diese Flexibilität der zentralen Indizierungskomponente ist entscheidend für die Robustheit von The Graph Protocol. In ähnlicher Weise kann Graph Node [aus der Sourcecode erstellt werden](https://github.com/graphprotocol/graph-node), oder Indexer können eines der [bereitgestellte Docker-Images](https:// hub.docker.com/r/graphprotocol/graph-node) benutzen. -### PostgreSQL database +### PostgreSQL-Datenbank -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +Der Hauptspeicher für den Graph-Knoten, hier werden Subgraf-Daten sowie Metadaten zu Subgrafen und Subgraf-unabhängige Netzwerkdaten wie Block-Cache und eth_call-Cache gespeichert. -### Network clients +### Netzwerk-Clients In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). -### IPFS Nodes +### IPFS-Knoten -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraf-Bereitstellungsmetadaten werden im IPFS-Netzwerk gespeichert. Der Graph-Knoten greift hauptsächlich während der Subgraf-Bereitstellung auf den IPFS-Knoten zu, um das Subgraf-Manifest und alle verknüpften Dateien abzurufen. Netzwerk-Indexierer müssen keinen eigenen IPFS-Knoten hosten, ein IPFS-Knoten für das Netzwerk wird unter https://ipfs.network.thegraph.com gehostet. -### Prometheus metrics server +### Prometheus-Metrikserver -To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. +Um Überwachung und Berichterstellung zu ermöglichen, kann Graph Node optional Metriken auf einem Prometheus-Metrikserver protokollieren. -### Getting started from source +### Einstieg in den Sourcecode -#### Install prerequisites +#### Installieren Sie die Voraussetzungen - **Rust** @@ -42,7 +42,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Zusätzliche Anforderungen für Ubuntu-Benutzer** - Um einen Graph-Knoten auf Ubuntu auszuführen, sind möglicherweise einige zusätzliche Pakete erforderlich. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Konfiguration -1. Start a PostgreSQL database server +1. Starten Sie einen PostgreSQL-Datenbankserver ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Klonen Sie das [Graph-Knoten](https://github.com/graphprotocol/graph-node)-Repo und erstellen Sie den Sourcecode durch Ausführen von `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Nachdem alle Abhängigkeiten eingerichtet sind, starten Sie den Graph-Knoten: ```sh cargo run -p graph-node --release -- \ @@ -69,13 +69,13 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -### Getting started with Kubernetes +### Erste Schritte mit Kubernetes -A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +Eine vollständige Kubernetes-Beispielkonfiguration finden Sie im [Indexer-Repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Ports -When it is running Graph Node exposes the following ports: +Wenn es ausgeführt wird, stellt Graph Node die folgenden Ports zur Verfügung: | Port | Zweck | Routen | CLI-Argument | Umgebungsvariable | | --- | --- | --- | --- | --- | @@ -85,17 +85,17 @@ When it is running Graph Node exposes the following ports: | 8030 | Subgraf-Indizierungsstatus-API | /graphql | --index-node-port | - | | 8040 | Prometheus-Metriken | /metrics | --metrics-port | - | -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. +> **Wichtig**: Seien Sie vorsichtig, wenn Sie Ports öffentlich zugänglich machen - **Administrationsports** sollten gesperrt bleiben. Dies schließt den JSON-RPC-Endpunkt des Graph-Knotens ein. -## Advanced Graph Node configuration +## Erweiterte Graph-Knoten-Konfiguration -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +In seiner einfachsten Form kann Graph Node mit einer einzelnen Instanz von Graph Node, einer einzelnen PostgreSQL-Datenbank, einem IPFS-Knoten und den Netzwerk-Clients betrieben werden, die von den zu indizierenden Subgrafen benötigt werden. -This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. +Dieses Setup kann horizontal skaliert werden, indem mehrere Graph-Knoten und mehrere Datenbanken zur Unterstützung dieser Graph-Knoten hinzugefügt werden. Fortgeschrittene Benutzer möchten vielleicht einige der horizontalen Skalierungsfunktionen von Graph Node sowie einige der fortgeschritteneren Konfigurationsoptionen durch die Datei `config.toml` und die Umgebungsvariablen von Graph Node nutzen. ### `config.toml` -A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. +Eine [TOML](https://toml.io/en/)-Konfigurationsdatei kann verwendet werden, um komplexere Konfigurationen als die in der CLI bereitgestellten festzulegen. Der Speicherort der Datei wird mit dem Befehlszeilenschalter --config übergeben. > When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. diff --git a/website/pages/de/querying/querying-the-hosted-service.mdx b/website/pages/de/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/de/querying/querying-the-hosted-service.mdx +++ b/website/pages/de/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/de/querying/querying-with-python.mdx b/website/pages/de/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/de/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/de/quick-start.mdx b/website/pages/de/quick-start.mdx new file mode 100644 index 000000000000..9b489cbe3056 --- /dev/null +++ b/website/pages/de/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Schnellstart +--- + +Diese Anleitung führt Sie schnell durch die Initialisierung, Erstellung und Bereitstellung Ihres Subgraphen im Subgraph Studio oder im [Hostingdienst](#hosted-service). + +Stellen Sie sicher, dass Ihr Subgraph Daten aus einem [unterstützten Netzwerk] \(/developing/supported-networks) indiziert. + +Bei der Erstellung dieses Leitfadens wird davon ausgegangen, dass Sie über die entsprechenden Kenntnisse verfügen: + +- Eine Smart-Contract-Adresse im Netzwerk Ihrer Wahl nach +- GRT, um Ihren Subgraphen zu kuratieren +- Eine Krypto-Wallet + +## 1. Erstellen Sie einen Untergraphen in Subgraph Studio + +Gehen Sie zu [Subgraph Studio](https://thegraph.com/studio/) und verbinden Sie Ihre Wallet. + +Sobald die Verbindung hergestellt ist, können Sie auf " Subgraph erstellen" klicken. Wählen Sie das Netzwerk Ihrer Wahl und klicken Sie auf "Weiter". + +## 2. Installieren der Graph-CLI + +Die Graph-CLI ist in JavaScript geschrieben und Sie müssen entweder `npm` oder `yarn` installiert haben, um sie zu verwenden. + +Führen Sie einen der folgenden Befehle auf Ihrem lokalen Computer aus: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Wenn Sie Ihren Untergraphen initialisieren, fragt das CLI-Tool Sie nach den folgenden Informationen: + +- Protokoll: Wählen Sie das Protokoll aus, von dem Ihr Untergraph ( Subgraph ) Daten indizieren soll. +- Subgraph slug: Erstellen Sie einen Namen für Ihren Subgraphen. Ihr Subgraph-Slug ist ein Identifikationsmerkmal für Ihren Subgraphen. +- Verzeichnis zur Erstellung des Subgraphen: Wählen Sie Ihr lokales Verzeichnis +- Ethereum-Netzwerk (optional): Sie müssen ggf. angeben, von welchem EVM-kompatiblen Netzwerk Ihr Subgraph Daten indizieren soll. +- Vertragsadresse: Suchen Sie die Smart-Contract-Adresse, von der Sie Daten abfragen möchten +- ABI: Wenn die ABI nicht automatisch ausgefüllt wird, müssen Sie sie manuell in Form einer JSON-Datei eingeben. +- Startblock: Es wird empfohlen, den Startblock einzugeben, um Zeit zu sparen, während Ihr Subgraph die Blockchain-Daten indiziert. Sie können den Startblock finden, indem Sie den Block suchen, in dem Ihr Vertrag bereitgestellt wurde. +- Vertragsname: Geben Sie den Namen Ihres Vertrags ein +- Index contract events as entities (Vertragsereignisse als Entitäten): Es wird empfohlen, dies auf true (wahr) zu setzen, da es automatisch Zuordnungen zu Ihrem Subgraph für jedes emittierte Ereignis hinzufügt +- Einen weiteren Vertrag hinzufügen (optional): Sie können einen weiteren Vertrag hinzufügen + +Initialisieren Sie Ihren Subgraphen anhand eines bestehenden Vertrags, indem Sie den folgenden Befehl ausführen: + +```sh +graph init --studio +``` + +Der folgende Screenshot zeigt ein Beispiel dafür, was Sie bei der Initialisierung Ihres Untergraphen ( Subgraph ) erwarten können: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Schreiben Sie Ihren Untergraph + +Die vorangegangenen Befehle erstellen einen gerüstartigen Subgraphen, den Sie als Ausgangspunkt für den Aufbau Ihres Subgraphen verwenden können. Wenn Sie Änderungen an dem Subgraphen vornehmen, werden Sie hauptsächlich mit + +- Manifest (subgraph.yaml) - Das Manifest definiert, welche Datenquellen Ihre Subgraphen indizieren werden. +- Schema (schema.graphql) - Das GraphQL-Schema definiert, welche Daten Sie aus dem Subgraph abrufen möchten. +- AssemblyScript Mappings (mapping.ts) - Dies ist der Code, der die Daten aus Ihren Datenquellen in die im Schema definierten Entitäten übersetzt. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Bereitstellung für das Subgraph Studio + +Sobald Ihr Subgraph geschrieben ist, führen Sie die folgenden Befehle aus: + +```sh +$ graph codegen +$ graph build +``` + +- Authentifizieren Sie Ihren Subgraphen und stellen Sie ihn bereit. Den Bereitstellungsschlüssel finden Sie auf der Seite "Subgraph" in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Testen Sie Ihren Untergraphen ( Subgraphen ) + +Sie können Ihren Untergraphen testen, indem Sie eine Beispielabfrage im Playground-Bereich erstellen. + +In den Protokollen können Sie sehen, ob es Fehler in Ihrem Subgraphen gibt. Die Protokolle eines funktionierenden Subgraphen sehen wie folgt aus: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Veröffentlichen Sie Ihren Subgraphen im dezentralen Netzwerk von The Graph + +Sobald Ihr Subgraph im Subgraph Studio bereitgestellt wurde, Sie ihn getestet haben und bereit sind, ihn in Betrieb zu nehmen, können Sie ihn im dezentralen Netzwerk veröffentlichen. + +Klicken Sie im Subgraph Studio auf Ihren Subgraphen. Auf der Seite des Subgraphen können Sie oben rechts auf die Schaltfläche "Veröffentlichen" klicken. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Bevor Sie Ihren Subgraphen abfragen können, müssen die Indexer damit beginnen, die Abfragen zu verarbeiten. Bevor Sie Ihren Subgraphen abfragen können, müssen die Indexer damit beginnen, die Abfragen zu bedienen. + +Zum Zeitpunkt der Erstellung dieses Dokuments wird empfohlen, einen eigenen Subgraphen mit 10.000 GRT zu kuratieren, um sicherzustellen, dass er so schnell wie möglich indiziert und für Abfragen verfügbar ist. + +Um Gaskosten zu sparen, können Sie Ihren Subgraphen in der gleichen Transaktion kuratieren, in der Sie ihn veröffentlicht haben, indem Sie diese Schaltfläche auswählen, wenn Sie Ihren Subgraphen im dezentralen Netzwerk von The Graph veröffentlichen: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Abfrage Ihres Subgraphen + +Jetzt können Sie Ihren Subgraphen abfragen, indem Sie GraphQL-Abfragen an die Abfrage-URL Ihres Subgraphen senden, die Sie durch Klicken auf die Abfrage-Schaltfläche finden können. + +Wenn Sie Ihren API-Schlüssel nicht haben, können Sie von Ihrer App aus eine Abfrage über die kostenlose, zeitlich begrenzte Abfrage-URL durchführen, die für die Entwicklung und das Bereitstellen verwendet werden kann. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/de/substreams.mdx b/website/pages/de/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/de/substreams.mdx +++ b/website/pages/de/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/de/sunrise.mdx b/website/pages/de/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/de/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/de/tokenomics.mdx b/website/pages/de/tokenomics.mdx index 949796a99983..3f5c435e310b 100644 --- a/website/pages/de/tokenomics.mdx +++ b/website/pages/de/tokenomics.mdx @@ -1,110 +1,110 @@ --- -title: Tokenomics of The Graph Network -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token works. +title: Tokenomics des The Graph Netzwerks +description: The Graph Network wird durch leistungsstarke Tokenomics unterstützt. Hier ist, wie GRT, The Graph's eigener Work Utility Token funktioniert. --- -- GRT Token Address: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) +- GRT-Token-Adresse: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT-Token-Adresse auf Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -The Graph is a decentralized protocol that enables easy access to blockchain data. +The Graph ist ein dezentrales Protokoll, das einen einfachen Zugang zu Blockchain-Daten ermöglicht. -It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. +Es ähnelt einem B2B2C-Modell, nur dass es von einem dezentralen Netzwerk von Teilnehmern betrieben wird. Die Netzwerkteilnehmer arbeiten zusammen, um den Endnutzern Daten im Austausch für GRT-Belohnungen zur Verfügung zu stellen. GRT ist der Arbeits-Utility-Token, der Datenanbieter und -nutzer koordiniert. GRT dient als Dienstprogramm zur Koordinierung von Datenanbietern und -nachfragern innerhalb des Netzwerks und schafft Anreize für die Protokollteilnehmer, Daten effektiv zu organisieren. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +Durch die Verwendung von The Graph können Nutzer einfach auf Daten aus der Blockchain zugreifen und zahlen nur für die spezifischen Informationen, die sie benötigen. The Graph wird heute von vielen [populären Dapps](https://thegraph.com/explorer) im web3-Ökosystem verwendet. -The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! +The Graph indexiert Blockchain-Daten ähnlich wie Google das Web. Es kann sogar sein, dass Sie The Graph bereits nutzen, ohne es zu merken. Wenn Sie sich das Frontend einer Dapp angesehen haben, die ihre Daten aus einem Subgraph bezieht, haben Sie Daten aus einem Subgraph abgefragt! -The Graph plays a crucial role in making blockchain data more accessible and enabling a marketplace for its exchange. +The Graph spielt eine entscheidende Rolle, wenn es darum geht, Blockchain-Daten besser zugänglich zu machen und einen Marktplatz für deren Austausch zu schaffen. -## The Roles of Network Participants +## Die Rollen der Teilnehmer des Netzwerks -There are four primary network participants: +Es gibt vier primäre Netzwerkteilnehmer: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegatoren - Delegieren Sie GRT an Indexer & sichern Sie das Netzwerk -2. Curators - Find the best subgraphs for Indexers +2. Kuratoren - Finden Sie die besten Untergraphen für Indexer -3. Developers - Build & query subgraphs +3. Entwickler - Erstellen & Abfragen von Untergraphen -4. Indexers - Backbone of blockchain data +4. Indexer - Das Rückgrat der Blockchain-Daten -Fishermen and Arbitrators are also integral to the network’s success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +Fischer und Schiedsrichter tragen auch durch andere Beiträge zum Erfolg des Netzwerks bei und unterstützen die Arbeit der anderen Hauptakteure. Weitere Informationen über die Rollen im Netzwerk finden Sie in diesem Artikel[Lesen Sie diesen Artikel](https://thegraph.com/blog/the-graph-grt-token-economics/). -![Tokenomics diagram](/img/updated-tokenomics-image.png) +![Diagramm zur Tokenomik](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## Delegatoren (verdienen passiv GRT) -Indexers are delegated GRT by Delegators increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexer werden von Delegatoren mit GRT betraut, die den Anteil des Indexers an den Subgraphen im Netzwerk erhöhen. Im Gegenzug verdienen die Delegatoren einen Prozentsatz aller Abfragegebühren und Indexierungsbelohnungen von den Indexern. Jeder Indexer legt den Anteil, den er an die Delegatoren vergütet, selbständig fest, wodurch ein Wettbewerb zwischen den Indexern entsteht, um Delegatoren anzuziehen. Die meisten Indexierer bieten zwischen 9-12% jährlich. -For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1500 GRT in rewards annually. +Wenn zum Beispiel ein Delegator 15.000 BRT an einen Indexer delegiert, der 10 % anbietet, würde der Delegator jährlich ~1500 GRT an Belohnungen erhalten. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +Es gibt eine Delegationssteuer von 0,5 %, die jedes Mal erhoben wird, wenn ein Delegator GRT an das Netzwerk delegiert. Wenn ein Delegator beschließt, sein delegiertes GRT zurückzuziehen, muss er die 28-Epochen-Frist abwarten, in der die Bindung aufgehoben wird. Jede Epoche besteht aus 6.646 Blöcken, was bedeutet, dass 28 Epochen ungefähr 26 Tagen entsprechen. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +Wenn Sie dies lesen, können Sie sofort Delegator werden, indem Sie auf die [Netzwerkteilnehmerseite](https://thegraph.com/explorer/participants/indexers) gehen und GRT an einen Indexer Ihrer Wahl delegieren. -## Curators (Earn GRT) +## Kuratoren (verdienen GRT) -Curators identify high-quality subgraphs, and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Kuratoren identifizieren qualitativ hochwertige Untergraphen und "kuratieren" sie (d.h. signalisieren GRT auf ihnen), um Kurationsanteile zu verdienen, die einen Prozentsatz aller zukünftigen Abfragegebühren garantieren, die durch den Untergraphen generiert werden. Obwohl jeder unabhängige Netzwerkteilnehmer ein Kurator sein kann, gehören die Entwickler von Subgraphen in der Regel zu den ersten Kuratoren für ihre eigenen Subgraphen, da sie sicherstellen wollen, dass ihr Subgraph indiziert wird. -As of December 2022, subgraph developers are encouraged to curate their subgraph with at least 10,000 GRT. However, this number may be impacted by network activity and community participation. +Ab Dezember 2022 werden die Entwickler von Subgraphen ermutigt, ihre Subgraphen mit mindestens 10.000 GRT zu kuratieren. Diese Zahl kann jedoch durch die Netzwerkaktivität und die Beteiligung der Community beeinflusst werden. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Kuratoren zahlen eine Kurationssteuer von 1%, wenn sie einen neuen Untergraphen kuratieren. Diese Kuratierungssteuer wird verbrannt, wodurch das Angebot an GRT sinkt. -## Developers +## Entwickler -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Entwickler erstellen Subgraphen und fragen sie ab, um Blockchain-Daten abzurufen. Da Subgraphen quelloffen sind, können Entwickler bestehende Subgraphen abfragen, um Blockchain-Daten in ihre Dapps zu laden. Entwickler zahlen für Abfragen, die sie in GRT machen, das an die Netzwerkteilnehmer verteilt wird. -### Creating a subgraph +### Erstellung eines Untergraphen -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Entwickler können [einen Subgraph](/developing/creating-a-subgraph/) erstellen, um Daten auf der Blockchain zu indizieren. Subgraphen sind Anweisungen für Indexer darüber, welche Daten an Verbraucher geliefert werden sollen. -Once developers have built and tested their subgraph, they can [publish their subgraph](/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Sobald Entwickler ihren Subgraphen erstellt und getestet haben, können sie [ihren Subgraphen](/publishing/publishing-a-subgraph/) im dezentralen Netzwerk von The Graph veröffentlichen. -### Querying an existing subgraph +### Abfrage eines vorhandenen Untergraphen -Once a subgraph is [published](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Sobald ein Subgraph [im dezentralen Netzwerk von The Graph veröffentlicht](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) wurde, kann jeder einen API-Schlüssel erstellen, GRT zu seinem Guthaben hinzufügen und den Subgraph abfragen. -Subgraphs are [queried using GraphQL](/querying/querying-the-graph/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +Subgraphen werden [mit GraphQL](/querying/querying-the-graph/) abgefragt, und die Abfragegebühren werden mit GRT in [Subgraph Studio](https://thegraph.com/studio/) bezahlt. Die Abfragegebühren werden an die Netzwerkteilnehmer auf der Grundlage ihrer Beiträge zum Protokoll verteilt. -1% of the query fees paid to the network are burned. +1 % der an das Netzwerk gezahlten Abfragegebühren werden verbrannt. -## Indexers (Earn GRT) +## Indexierer (verdienen GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexer sind das Rückgrat von The Graph. Sie betreiben unabhängige Hardware und Software, die das dezentrale Netzwerk von The Graph antreiben. Indexer liefern Daten an Verbraucher auf der Grundlage von Anweisungen von Untergraphen. -Indexers can earn GRT rewards in two ways: +Indexierer können auf zwei Arten GRT-Belohnungen verdienen: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Abfragegebühren: GRT, die von Entwicklern oder Nutzern für Abfragen von Subgraphen-Daten gezahlt werden. Die Abfragegebühren werden gemäß der exponentiellen Rabattfunktion (siehe GIP [hier](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)) direkt an die Indexierer verteilt. -2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. +2. Indexierungsprämien: Die jährliche Ausgabe von 3 % wird an die Indexierer auf der Grundlage der Anzahl der von ihnen indexierten Untergraphen verteilt. Diese Belohnungen sind ein Anreiz für Indexer, Untergraphen zu indexieren, gelegentlich bevor die Abfragegebühren beginnen, um Proofs of Indexing (POIs) zu sammeln und einzureichen, die bestätigen, dass sie Daten korrekt indexiert haben. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Jedem Untergraphen wird ein Teil der gesamten Netzwerk-Token-Ausgabe zugeteilt, basierend auf der Höhe des Kurationssignals des Untergraphen. Dieser Betrag wird dann an die Indexer auf der Grundlage ihres zugewiesenen Anteils an dem Subgraphen vergütet. -In order to run an indexing node, Indexers must stake 100,000 GRT or more with the network. Indexers are incentivized to stake GRT in proportion to the amount of queries they serve. +Um einen Indexierungs-Knoten zu betreiben, müssen Indexer 100.000 GRT oder mehr in das Netzwerk einbringen. Für Indexer besteht ein Anreiz, GRT im Verhältnis zur Anzahl der von ihnen bearbeiteten Abfragen einzusetzen. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial stake), they will not be able to use the additional GRT from Delegators until they increase their stake in the network. +Indexer können ihre GRT-Zuteilungen auf Untergraphen erhöhen, indem sie GRT-Delegierung von Delegatoren akzeptieren, und sie können bis zum 16-fachen ihres ursprünglichen Einsatzes akzeptieren. Wenn ein Indexer "überdelegiert" wird (d.h. mehr als das 16-fache seines ursprünglichen Einsatzes), kann er die zusätzlichen GRT von Delegatoren nicht nutzen, bis er seinen Einsatz im Netzwerk erhöht. -The amount of rewards an Indexer receives can vary based on the initial stake, accepted delegation, quality of service, and many more factors. The following chart is publicly available data from an active Indexer on The Graph's decentralized network. +Die Höhe der Belohnungen, die ein Indexer erhält, kann je nach anfänglichem Einsatz, akzeptierter Delegation, Qualität des Dienstes und vielen weiteren Faktoren variieren. Das folgende Diagramm ist ein öffentlich zugängliches Diagramm eines aktiven Indexers im dezentralen Netzwerk von The Graph. -### The Indexer stake & reward of allnodes-com.eth +### Der Indexer Einsatz & Belohnung von allnodes-com.eth -![Indexing stake and rewards](/img/indexing-stake-and-income.png) +![Indexierung von Einsatz und Belohnungen](/img/indexing-stake-and-income.png) -This data is from February 2021 to September 2022. +Diese Daten beziehen sich auf den Zeitraum von Februar 2021 bis September 2022. -> Please note, this will improve when the [Arbitrum migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) is complete, making gas costs a significantly lower burden for participating on the network. +> Bitte beachten Sie, dass sich diese Situation verbessern wird, wenn die [Arbitrum-Migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) abgeschlossen ist, so dass die Gaskosten für die Teilnehmer des Netzes eine deutlich geringere Belastung darstellen. -## Token Supply: Burning & Issuance +## Token-Versorgung: Burning & Ausgabe -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +Das anfängliche Token-Angebot beträgt 10 Milliarden GRT, mit einem Ziel von 3 % Neuemissionen pro Jahr, um Indexer für die Zuweisung von Anteilen an Subgraphen zu belohnen. Das bedeutet, dass das Gesamtangebot an GRT-Token jedes Jahr um 3 % steigen wird, da neue Token an Indexer für ihren Beitrag zum Netzwerk ausgegeben werden. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graph ist mit mehreren Brennmechanismen ausgestattet, um die Ausgabe neuer Token auszugleichen. Ungefähr 1 % des GRT-Angebots wird jährlich durch verschiedene Aktivitäten im Netzwerk verbrannt, und diese Zahl steigt, da die Netzwerkaktivität weiter zunimmt. Zu diesen Burning-Aktivitäten gehören eine Delegationssteuer von 0,5 %, wenn ein Delegator GRT an einen Indexer delegiert, eine Kurationssteuer von 1 %, wenn Kuratoren ein Signal auf einem Untergraphen geben, und 1 % der Abfragegebühren für Blockchain-Daten. -![Total burned GRT](/img/total-burned-grt.jpeg) +![Verbrannte GRT insgesamt](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +Zusätzlich zu diesen regelmäßig stattfindenden Burning-Aktivitäten verfügt der GRT-Token auch über einen Slashing-Mechanismus, um böswilliges oder unverantwortliches Verhalten von Indexern zu bestrafen. Wenn ein Indexer geslashed wird, werden 50% seiner Indexierungsbelohnungen für die Epoche verbrannt (während die andere Hälfte an den Fischer geht), und sein Eigenanteil wird um 2,5% gekürzt, wobei die Hälfte dieses Betrags verbrannt wird. Dies trägt dazu bei, dass Indexer einen starken Anreiz haben, im besten Interesse des Netzwerks zu handeln und zu dessen Sicherheit und Stabilität beizutragen. -## Improving the Protocol +## Verbesserung des Protokolls -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +Das The Graph Network entwickelt sich ständig weiter, und es werden laufend Verbesserungen an der wirtschaftlichen Gestaltung des Protokolls vorgenommen, um allen Netzwerkteilnehmern die bestmögliche Erfahrung zu bieten. DerThe Graph-Rat überwacht die Protokolländerungen, und die Mitglieder der Community sind aufgerufen, sich daran zu beteiligen. Beteiligen Sie sich an der Verbesserung des Protokolls im [Das Graph Forum](https://forum.thegraph.com/). diff --git a/website/pages/en/quick-start.mdx b/website/pages/en/quick-start.mdx index c5211da0768a..c205b755317c 100644 --- a/website/pages/en/quick-start.mdx +++ b/website/pages/en/quick-start.mdx @@ -2,7 +2,7 @@ title: Quick Start --- -This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the hosted service. Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). @@ -14,7 +14,7 @@ This guide is written assuming that you have: ## 1. Create a subgraph on Subgraph Studio -Go to the Subgraph Studio [https://thegraph.com/studio/](https://thegraph.com/studio/) and connect your wallet. +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. @@ -165,4 +165,4 @@ Now, you can query your subgraph by sending GraphQL queries to your subgraph’s You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. -For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). +For more information about querying data from your subgraph, read more [here](/querying/querying-the-graph/). diff --git a/website/pages/es/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/es/arbitrum/l2-transfer-tools-faq.mdx index 8ee3c1a793a8..f52e0179c45b 100644 --- a/website/pages/es/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/es/arbitrum/l2-transfer-tools-faq.mdx @@ -2,314 +2,410 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. -## Subgraph Transfer +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### ¿Qué sucede si no completo mi transferencia en 7 días? + +Las Herramientas de Transferencia a L2 utilizan el mecanismo nativo de Arbitrum para enviar mensajes de L1 a L2. Este mecanismo se llama "ticket reintentable" y es utilizado por todos los puentes de tokens nativos, incluido el puente GRT de Arbitrum. Puedes obtener más información sobre los tickets reintentables en la [Documentación de Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +Cuando transfieres tus activos (subgrafo, stake, delegación o curación) a L2, se envía un mensaje a través del puente Arbitrum GRT que crea un ticket reintentable en L2. La herramienta de transferencia incluye un valor ETH en la transacción, que se utiliza para: 1) pagar la creación del ticket y 2) pagar por el gas para ejecutar el ticket en L2. Sin embargo, debido a que los precios del gas pueden variar durante el tiempo hasta que el ticket esté listo para ejecutarse en L2, es posible que este intento de autoejecución falle. Cuando eso sucede, el puente de Arbitrum mantendrá el ticket reintentable activo durante un máximo de 7 días, y cualquier persona puede intentar nuevamente "canjear" el ticket (lo que requiere una wallet con algo de ETH transferido a Arbitrum). + +Esto es lo que llamamos el paso de "Confirmar" en todas las herramientas de transferencia. En la mayoría de los casos, se ejecutará automáticamente, ya que la autoejecución suele ser exitosa, pero es importante que vuelvas a verificar para asegurarte de que se haya completado. Si no tiene éxito y no hay reintentos exitosos en 7 días, el puente de Arbitrum descartará el ticket, y tus activos (subgrafo, stake, delegación o curación) se perderán y no podrán recuperarse. Los core devs de The Graph tienen un sistema de monitoreo para detectar estas situaciones e intentar canjear los tickets antes de que sea demasiado tarde, pero en última instancia, es tu responsabilidad asegurarte de que tu transferencia se complete a tiempo. Si tienes problemas para confirmar tu transacción, por favor comunícate a través de [este formulario](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) y los core devs estarán allí para ayudarte. -## How do I transfer my subgraph? +### Comencé la transferencia de mi delegación/stake/curación y no estoy seguro de si se completó en L2, ¿cómo puedo confirmar que se transfirió correctamente? -To transfer your subgraph, you will need to complete the following steps: +Si no ves un banner en tu perfil que te pida completar la transferencia, es probable que la transacción haya llegado de manera segura a L2 y no sea necesario realizar ninguna otra acción. Si tienes dudas, puedes verificar si el Explorer muestra tu delegación, stake o curación en Arbitrum One. -1. Initiate the transfer on Ethereum mainnet +Si tienes el hash de la transacción en L1 (que puedes encontrar revisando las transacciones recientes en tu wallet), también puedes confirmar si el "ticket reintentable" que transportó el mensaje a L2 fue redimido aquí: https://retryable-dashboard.arbitrum.io/ - si la redención automática falló, también puedes conectar tu wallet allí y redimirlo. Puedes estar seguro de que los core devs también están monitoreando los mensajes que quedan atascados y tratarán de redimirlos antes de que expiren. + +## Transferencia de Subgrafo + +### ¿Cómo transfiero mi subgrafo? + + + +Para transferir tu subgrafo, tendrás que completar los siguientes pasos: + +1. Inicia la transferencia en Ethereum mainnet 2. Espera 20 minutos para la confirmación -3. Confirm subgraph transfer on Arbitrum\* +3. Confirma la transferencia del subgrafo en Arbitrum + +4. Termina de publicar el subgrafo en Arbitrum -4. Finish publishing subgraph on Arbitrum +5. Actualiza la URL de consulta (recomendado) -5. Update Query URL (recommended) +\*Ten en cuenta que debes confirmar la transferencia dentro de los 7 días, de lo contrario, es posible que se pierda tu subgrafo. En la mayoría de los casos, este paso se ejecutará automáticamente, pero puede ser necesaria una confirmación manual si hay un aumento en el precio del gas en Arbitrum. Si surgen problemas durante este proceso, habrá recursos disponibles para ayudarte: ponte en contacto con el soporte en support@thegraph.com o en [Discord](https://discord.gg/graphprotocol). -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### ¿Desde dónde debo iniciar mi transferencia? -## Where should I initiate my transfer from? +Puedes iniciar la transferencia desde el [Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) o desde cualquier página de detalles del subgrafo. Haz clic en el botón "Transferir Subgrafo" en la página de detalles del subgrafo para iniciar la transferencia. -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +### ¿Cuánto tiempo tengo que esperar hasta que se transfiera mi subgrafo? -## How long do I need to wait until my subgraph is transferred +El tiempo de transferencia demora aproximadamente 20 minutos. El puente de Arbitrum está trabajando en segundo plano para completar la transferencia automáticamente. En algunos casos, los costos de gas pueden aumentar y necesitarás confirmar la transacción nuevamente. -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +### ¿Mi subgrafo seguirá siendo accesible después de transferirlo a L2? -## Will my subgraph still be discoverable after I transfer it to L2? +Tu subgrafo solo será accesible en la red donde esté publicado. Por ejemplo, si tu subgrafo está en Arbitrum One, solo podrás encontrarlo en el explorador de Arbitrum One y no podrás encontrarlo en Ethereum. Asegúrate de tener seleccionado Arbitrum One en el selector de redes en la parte superior de la página para asegurarte de estar en la red correcta. Después de la transferencia, el subgrafo en L1 aparecerá como obsoleto. -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +### ¿Es necesario publicar mi subgrafo para transferirlo? -## Does my subgraph need to be published to transfer it? +Para aprovechar la herramienta de transferencia de subgrafos, tu subgrafo debe estar ya publicado en la red principal de Ethereum y debe tener alguna señal de curación propiedad de la wallet que posee el subgrafo. Si tu subgrafo no está publicado, se recomienda que lo publiques directamente en Arbitrum One, ya que las tarifas de gas asociadas serán considerablemente más bajas. Si deseas transferir un subgrafo ya publicado pero la cuenta del propietario no ha curado ninguna señal en él, puedes señalizar una pequeña cantidad (por ejemplo, 1 GRT) desde esa cuenta; asegúrate de elegir la opción de señal "auto-migración". -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +### ¿Qué ocurre con la versión de Ethereum mainnet de mi subgrafo después de transferirlo a Arbitrum? -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +Tras transferir tu subgrafo a Arbitrum, la versión de Ethereum mainnet quedará obsoleta. Te recomendamos que actualices tu URL de consulta en un plazo de 48 horas. Sin embargo, existe un periodo de gracia que mantiene tu URL de mainnet en funcionamiento para que se pueda actualizar cualquier soporte de dapp de terceros. -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +### Después de la transferencia, ¿también tengo que volver a publicar en Arbitrum? -## After I transfer, do I also need to re-publish on Arbitrum? +Una vez transcurridos los 20 minutos de la ventana de transferencia, tendrás que confirmar la transferencia con una transacción en la interfaz de usuario para finalizar la transferencia, pero la herramienta de transferencia te guiará en este proceso. Tu endpoint L1 seguirá siendo compatible durante la ventana de transferencia y un período de gracia después. Te recomendamos que actualices tu endpoint cuando te resulte conveniente. -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +### ¿Experimentará mi endpoint una interrupción durante la republicación? -## Will there be a down-time to my endpoint while re-publishing? +Es poco probable, pero es posible experimentar una breve interrupción dependiendo de qué Indexadores estén respaldando el subgrafo en L1 y si continúan indexándolo hasta que el subgrafo esté completamente respaldado en L2. -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +### ¿Es lo mismo publicar y versionar en L2 que en Ethereum mainnet? -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +Sí. Asegúrate de seleccionar Arbitrum One como tu red para publicar cuando publiques en Subgraph Studio. En el Studio, estará disponible el último endpoint que apunta a la última versión actualizada del subgrafo. -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +### ¿Se moverá la curación de mi subgrafo junto con mi subgrafo? -## Will my subgraph's curation move with my subgraph? +Si has elegido auto-migrar la señal, el 100% de tu curación propia se moverá con tu subgrafo a Arbitrum One. Toda la señal de curación del subgrafo se convertirá a GRT en el momento de la transferencia, y el GRT correspondiente a tu señal de curación se utilizará para mintear señal en el subgrafo L2. -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Otros Curadores pueden elegir si retiran su fracción de GRT, o también la transfieren a L2 para mintear señal en el mismo subgrafo. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### ¿Puedo mover mi subgrafo de nuevo a Ethereum mainnet después de la transferencia? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +Una vez transferida, la versión en Ethereum mainnet de este subgrafo quedará obsoleta. Si deseas regresar a mainnet, deberás volver a deployar y publicar en mainnet. Sin embargo, se desaconseja firmemente volver a transferir a Ethereum mainnet, ya que las recompensas por indexación se distribuirán eventualmente por completo en Arbitrum One. -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### ¿Por qué necesito ETH bridgeado para completar mi transferencia? -## Why do I need bridged ETH to complete my transfer? +Las tarifas de gas en Arbitrum One se pagan utilizando ETH bridgeado (es decir, ETH que ha sido transferido a Arbitrum One). Sin embargo, las tarifas de gas son significativamente más bajas en comparación con Ethereum mainnet. -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegación -## Curation Signal +### ¿Cómo transfiero mi delegación? -## How do I transfer my curation? + -To transfer your curation, you will need to complete the following steps: +Para transferir tu delegación, deberás seguir los siguientes pasos: -1. Initiate signal transfer on Ethereum mainnet +1. Inicia la transferencia de delegación en Ethereum mainnet +2. Espera 20 minutos para la confirmación +3. Confirma la transferencia de delegación en Arbitrum -2. Specify an L2 Curator address\* +\*\*\*\*Debes confirmar la transacción para completar la transferencia de la delegación en Arbitrum. Este paso debe completarse dentro de los 7 días o la delegación podría perderse. En la mayoría de los casos, este paso se ejecutará automáticamente, pero puede ser necesaria una confirmación manual si hay un aumento en el precio del gas en Arbitrum. Si surgen problemas durante este proceso, habrá recursos disponibles para ayudarte: contacta con el soporte en support@thegraph.com o en [Discord](https://discord.gg/graphprotocol). -3. Espera 20 minutos para la confirmación +### ¿Qué sucede con mis recompensas si inicio una transferencia con una allocation abierta en Ethereum mainnet? -\*If necessary - i.e. you are using a contract address. +Si el Indexador al que estás delegando aún está operando en L1, cuando transfieras a Arbitrum perderás cualquier recompensa de delegación de allocations abiertas en Ethereum mainnet. Esto significa que perderás las recompensas de, como máximo, el último período de 28 días. Si sincronizas la transferencia justo después de que el Indexador haya cerrado las allocations, puedes asegurarte de que sea la menor cantidad posible. Si tienes un canal de comunicación con tu(s) Indexador(es), considera discutir con ellos para encontrar el mejor momento para hacer la transferencia. -## How will I know if the subgraph I curated has moved to L2? +### ¿Qué sucede si el Indexador al que actualmente delego no está en Arbitrum One? -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +La herramienta de transferencia a L2 solo estará habilitada si el Indexador al que has delegado ha transferido su stake propio a Arbitrum. -## What if I do not wish to move my curation to L2? +### ¿Los Delegadores tienen la opción de delegar a otro Indexador? -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +Si deseas delegar a otro Indexador, puedes transferir a ese mismo Indexador en Arbitrum, luego anular la delegación y esperar el período de desbloqueo. Después de esto, puedes seleccionar otro Indexador activo al que delegar. -## How do I know my curation successfully transferred? +### ¿Qué sucede si no puedo encontrar al Indexador al que estoy delegando en L2? -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +La herramienta de transferencia a L2 detectará automáticamente al Indexador al que previamente has delegado. -## Can I transfer my curation on more than one subgraph at a time? +### ¿Podré mezclar o distribuir mi delegación entre nuevos Indexadores o varios Indexadores en lugar del Indexador anterior? -There is no bulk transfer option at this time. +La herramienta de transferencia a L2 siempre trasladará tu delegación al mismo Indexador al que delegaste anteriormente. Una vez que te hayas trasladado a L2, puedes anular la delegación, esperar el período de desbloqueo y decidir si deseas dividir tu delegación. -## Indexer Stake +### ¿Estoy sujeto al período de espera o puedo retirarme inmediatamente después de usar la herramienta de transferencia de delegación a L2? -## How do I transfer my stake to Arbitrum? +La herramienta de transferencia te permite moverte de inmediato a L2. Si deseas anular la delegación, tendrás que esperar el período de desbloqueo. Sin embargo, si un Indexador ha transferido todo su stake a L2, podrás retirarla inmediatamente en Ethereum mainnet. -To transfer your stake, you will need to complete the following steps: +### ¿Mis recompensas pueden verse afectadas negativamente si no transfiero mi delegación? -1. Initiate stake transfer on Ethereum mainnet +Se espera que toda la participación en la red se traslade a Arbitrum One en el futuro. -2. Espera 20 minutos para la confirmación +### ¿Cuánto tiempo se tarda en completar la transferencia de mi delegación a L2? + +Se requiere una confirmación de 20 minutos para la transferencia de la delegación. Después del período de 20 minutos, debes regresar y completar el paso 3 del proceso de transferencia dentro de los 7 días. Si no lo haces, es posible que pierdas tu delegación. Ten en cuenta que en la mayoría de los casos, la herramienta de transferencia completará automáticamente este paso por ti. En caso de que ocurra un intento automático fallido, deberás completarlo manualmente. Si surgen problemas durante este proceso, no te preocupes, estaremos aquí para ayudarte: contáctanos en support@thegraph.com o en [Discord](https://discord.gg/vtvv7FP). + +### ¿Puedo transferir mi delegación si estoy usando un contrato de vesting o una wallet de bloqueo de tokens? + +¡Sí! El proceso es un poco diferente porque los contratos de vesting no pueden enviar el ETH necesario para pagar las tarifas de gas en L2, por lo que debes depositarlo con antelación. Si tu contrato de vesting aún no está completamente adquirido, también deberás inicializar un contrato de vesting equivalente en L2 y solo podrás transferir la delegación a este contrato de vesting en L2. La interfaz de usuario en Explorer te guiará a través de este proceso cuando te hayas conectado a Explorer usando la wallet con bloqueo por vesting. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -3. Confirm stake transfer on Arbitrum +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -## Will all of my stake transfer? +### ¿Existe algún impuesto a la delegación? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +No, no se cobra ningún impuesto de delegación. Los tokens recibidos en L2 se delegan al Indexador especificado en nombre del Delegador especificado sin cobrar ningún impuesto de delegación. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### ¿Se transferirán mis recompensas no realizadas cuando transfiera mi delegación? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +¡Sí! Las únicas recompensas que no se pueden transferir son las correspondientes a allocations abiertas, ya que estas no existirán hasta que el Indexador cierre las allocations (generalmente cada 28 días). Si has estado delegando durante un tiempo, es probable que estas sean solo una pequeña fracción de las recompensas. -## How much time do I have to confirm my stake transfer to Arbitrum? +A nivel de contrato inteligente, las recompensas no realizadas ya forman parte de tu saldo de delegación, por lo que se transferirán cuando transfieras tu delegación a L2. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### ¿Es obligatorio trasladar las delegaciones a L2? ¿Hay algún plazo límite? -## What if I have open allocations? +Trasladar la delegación a L2 no es obligatorio, pero las recompensas de indexación están aumentando en L2 siguiendo el calendario descrito en [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventualmente, si el Council sigue aprobando los aumentos, todas las recompensas se distribuirán en L2 y no habrá recompensas de indexación para Indexadores y Delegadores en L1. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +### Si estoy delegando a un Indexador que ya ha transferido su stake a L2, ¿dejo de recibir recompensas en L1? -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +Muchos Indexadores están transfiriendo su stake gradualmente, por lo que los Indexadores en L1 seguirán ganando recompensas y tarifas en L1, las cuales luego se comparten con los Delegadores. Una vez que un Indexador haya transferido todo su stake, dejarán de operar en L1, por lo que los Delegadores no recibirán más recompensas a menos que transfieran a L2. -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +Eventualmente, si el Consejo continúa aprobando aumentos en las recompensas de indexación en L2, todas las recompensas se distribuirán en L2 y no habrá recompensas de indexación para Indexers y Delegators en L1 -## How long will it take to transfer my stake? +### No veo un botón para transferir mi delegación. ¿Por qué sucede eso? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +Tu Indexer probablemente aún no ha utilizado las herramientas de transferencia de L2 para transferir la participación. -## Do I have to index on Arbitrum before I transfer my stake? +Si puedes contactar al Indexador, puedes animarlos a utilizar las Herramientas de Transferencia de L2 para que los Delegators puedan transferir sus delegaciones a la dirección de L2 del Indexer -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +### Mi Indexador también está en Arbitrum, pero no veo un botón para transferir la delegación en mi perfil. ¿Por qué sucede eso? -## Can Delegators move their delegation before I move my indexing stake? +Es posible que el Indexador haya configurado operaciones en L2, pero no haya utilizado las herramientas de transferencia de L2 para transferir la participación. Por lo tanto, los contratos inteligentes de L1 no tendrán información sobre la dirección de L2 del Indexador. Si puedes ponerte en contacto con el Indexador, puedes animarlos a utilizar la herramienta de transferencia para que los Delegadores puedan transferir sus delegaciones a la dirección de L2 del Indexador -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### ¿Puedo transferir mi delegación a L2 si he iniciado el proceso de cancelación de la delegación y aún no lo he retirado? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +No. Si tu delegación está desbloqueandose, debes esperar los 28 días y retirarla. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +Los tokens que se están desdelegando están "bloqueados" y, por lo tanto, no se pueden transferir a L2. -## Delegation +## Señal de Curación + +### ¿Cómo transfiero mi curaduría? + +Para transferir tu curación, deberás completar los siguientes pasos: + +1. Iniciar la transferencia de señal en Ethereum mainnet + +2. Especifica una dirección L2 de Curador + +3. Espera 20 minutos para la confirmación -## How do I transfer my delegation? +\*Si es necesario - i.e. si estás utilizando una dirección de contrato. -To transfer your delegation, you will need to complete the following steps: +### ¿Cómo sabré si el subgrafo que he curado ha pasado a L2? -1. Initiate delegation transfer on Ethereum mainnet +Al ver la página de detalles del subgrafo, un banner te notificará que este subgrafo ha sido transferido. Puedes seguir la indicación para transferir tu curación. También puedes encontrar esta información en la página de detalles del subgrafo de cualquier subgrafo que se haya trasladado. + +### ¿Qué ocurre si no deseo trasladar mi curación a L2? + +Cuando un subgrafo queda obsoleto, tienes la opción de retirar tu señal. De manera similar, si un subgrafo se ha trasladado a L2, puedes elegir retirar tu señal en Ethereum mainnet o enviar la señal a L2. + +### ¿Cómo sé si mi curación se ha transferido correctamente? + +Los detalles de la señal serán accesibles a través del Explorer aproximadamente 20 minutos después de iniciar la herramienta de transferencia a L2. + +### ¿Puedo transferir mi curación en más de un subgrafo a la vez? + +En este momento no existe la opción de transferencia masiva. + +## Stake del Indexador + +### ¿Cómo transfiero mi stake a Arbitrum? + +> Aviso: Si en este momento estás retirando alguna porción de tus GRT de tu Indexador, no podrás utilizar las Herramientas de Transferencia de L2. + + + +Para transferir tu stake, deberás seguir los siguientes pasos: + +1. Inicia la transferencia de stake en Ethereum mainnet 2. Espera 20 minutos para la confirmación -3. Confirm delegation transfer on Arbitrum +3. Confirma la transferencia de stake en Arbitrum + +\*Ten en cuenta que debes confirmar la transferencia dentro de los 7 días; de lo contrario, tu participación podría perderse. En la mayoría de los casos, este paso se ejecutará automáticamente, pero puede ser necesaria una confirmación manual si hay un aumento en el precio del gas en Arbitrum. Si surgen problemas durante este proceso, habrá recursos disponibles para ayudarte: ponte en contacto con el soporte en support@thegraph.com o en [Discord](https://discord.gg/graphprotocol). -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### ¿Se transferirá todo mi stake? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Puedes elegir qué parte de tu stake deseas transferir. Si decides transferir todo tu stake de una vez, deberás cerrar primero las allocations abiertas. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +Si planeas transferir partes de tu stake en múltiples transacciones, siempre debes especificar la misma dirección de beneficiario. -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +Nota: Debes cumplir con los requisitos mínimos de participación en L2 la primera vez que utilices la herramienta de transferencia. Los Indexadores deben enviar un mínimo de 100,000 GRT (cuando llaman a esta función por primera vez). Si dejas una parte de la participación en L1, también debe superar el mínimo de 100,000 GRT y ser suficiente (junto con tus delegaciones) para cubrir tus asignaciones abiertas. -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +### ¿Cuánto tiempo tengo para confirmar la transferencia de mi stake a Arbitrum? -## Do Delegators have the option to delegate to another Indexer? +\*\*\* Debes confirmar tu transacción para completar la transferencia del stake en Arbitrum. Este paso debe completarse dentro de los 7 días, de lo contrario, el stake podría perderse. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### ¿Qué sucede si tengo allocations abiertas? -## What if I can't find the Indexer I'm delegating to on L2? +Si no estás enviando la totalidad de tu stake, la herramienta de transferencia a L2 validará que al menos el mínimo de 100.000 GRT permanezca en Ethereum mainnet y que tu stake y delegación restantes sean suficientes para cubrir cualquier allocation abierta. Es posible que debas cerrar allocations abiertas si tu saldo de GRT no cubre los mínimos + allocations abiertas. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### ¿Es necesario esperar 28 días para retirar el stake en Ethereum mainnet antes de realizar la transferencia utilizando las herramientas de transferencia? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +No, puedes transferir tu stake a L2 de inmediato, no es necesario retirarlo y esperar antes de utilizar la herramienta de transferencia. La espera de 28 días solo aplica si deseas retirar el stake de regreso a tu wallet, ya sea en Ethereum mainnet o en L2. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### ¿Cuánto tardaré en transferir mi stake? -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +La herramienta de transferencia L2 tardará aproximadamente 20 minutos en completar la transferencia de tu stake. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### ¿Tengo que indexar en Arbitrum antes de transferir mi stake? -## Can my rewards be negatively impacted if I do not transfer my delegation? +En efecto, puedes transferir tu stake primero antes de configurar la indexación de manera efectiva, pero no podrás reclamar ninguna recompensa en L2 hasta que asignes a subgrafos en L2, los indexes y presentes POIs. -It is anticipated that all network participation will move to Arbitrum One in the future. +### ¿Pueden los Delegadores trasladar su delegación antes de que yo traslade mi stake de Indexador? -## How long does it take to complete the transfer of my delegation to L2? +No, para que los Delegadores puedan transferir su GRT delegado a Arbitrum, el Indexador en el que delegan debe estar activo en L2. -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### ¿Puedo transferir mi stake si estoy utilizando un contrato de liberación gradual de GRT (GRT vesting contract) o una wallet de bloqueo de tokens? -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +¡Sí! El proceso es un poco diferente porque los contratos de vesting no pueden transferir el ETH necesario para pagar el gas en L2, por lo que debes depositarlo de antemano. Si tu contrato de vesting aún no ha sido completamente adquirido, también deberás inicializar un contrato de vesting equivalente en L2 y solo podrás transferir el stake a este contrato de vesting en L2. La interfaz de usuario en Explorer puede guiarte a través de este proceso cuando te hayas conectado a Explorer utilizando la wallet de bloqueo de vesting. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Ya tengo participación en L2. ¿Todavía necesito enviar 100,000 GRT cuando use las herramientas de transferencia por primera vez? -## Is there any delegation tax? +Sí. Los contratos inteligentes de L1 no tendrán conocimiento de tu participación en L2, por lo que requerirán que transfieras al menos 100,000 GRT cuando realices la transferencia por primera vez -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### ¿Puedo transferir mi participación a L2 si estoy en proceso de retirar GRT? -## Vesting Contract Transfer +No. Si alguna fracción de tu participación se encuentra desbloqueandose, debes esperar los 28 días y retirarla antes de poder transferir la participación. Los tokens que se están apostando están "bloqueados" y evitarán cualquier transferencia o participación en L2. -## How do I transfer my vesting contract? +## Transferencia de Contrato de Vesting -To transfer your vesting, you will need to complete the following steps: +### ¿Cómo transfiero mi contrato de vesting? -1. Initiate the vesting transfer on Ethereum mainnet +Para transferir tu contrato de vesting, deberás completar los siguientes pasos: + +1. Inicia la transferencia del vesting en Ethereum mainnet 2. Espera 20 minutos para la confirmación -3. Confirm vesting transfer on Arbitrum +3. Confirma la transferencia del contrato de vesting en Arbitrum + +### ¿Cómo transfiero mi contrato de vesting si solo está parcialmente adquirido? + + -## How do I transfer my vesting contract if I am only partially vested? +1. Deposita una cantidad de ETH en el contrato de la herramienta de transferencia (la interfaz de usuario puede ayudar a estimar una cantidad razonable) -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +2. Envía un poco de GRT bloqueado a través del contrato de la herramienta de transferencia, a L2 para inicializar el bloqueo de vesting en L2. Esto también establecerá su dirección de beneficiario en L2. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +3. Envía tu stake/delegación a L2 a través de las funciones de transferencia "bloqueada" en el contrato L1Staking. -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +4. Retira cualquier ETH restante del contrato de la herramienta de transferencia -4. Withdraw any remaining ETH from the transfer tool contract +### ¿Cómo transfiero mi contrato de vesting si está completamente adquirido? -## How do I transfer my vesting contract if I am fully vested? + -For those that are fully vested, the process is similar: +Para aquellos que están completamente adquiridos, el proceso es similar: -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +1. Deposita una cantidad de ETH en el contrato de la herramienta de transferencia (la interfaz de usuario puede ayudar a estimar una cantidad razonable) -2. Set your L2 address with a call to the transfer tool contract +2. Establece tu dirección de L2 con una llamada al contrato de la herramienta de transferencia -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +3. Envía tu stake/delegación a L2 a través de las funciones de transferencia "bloqueada" en el contrato de Staking de L1. -4. Withdraw any remaining ETH from the transfer tool contract +4. Retira cualquier ETH restante del contrato de la herramienta de transferencia -## Can I transfer my vesting contract to Arbitrum? +### ¿Puedo transferir mi contrato de vesting a Arbitrum? -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +Puedes transferir el saldo de GRT de tu contrato de vesting a un contrato de vesting en L2. Esto es un requisito previo para transferir el stake o la delegación de tu contrato de vesting a L2. El contrato de vesting debe contener una cantidad distinta de cero de GRT (puedes transferir una pequeña cantidad, como 1 GRT, si es necesario). -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +Cuando transfieres GRT desde tu contrato de vesting en L1 a L2, puedes elegir la cantidad que deseas enviar y puedes hacerlo tantas veces como desees. El contrato en L2 se inicializará la primera vez que transfieras GRT. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +Las transferencias se realizan utilizando una herramienta de transferencia que estará visible en tu perfil de Explorer cuando te conectes con la cuenta del contrato de vesting. -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Ten en cuenta que no podrás liberar/retirar GRT del contrato de vesting en L2 hasta el final de tu período de vesting, cuando tu contrato esté completamente adquirido. Si necesitas liberar GRT antes de ese momento, puedes transferir el GRT de regreso al contrato de vesting en L1 utilizando otra herramienta de transferencia disponible para ese propósito. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Si no has transferido ningún saldo del contrato de vesting a L2 y tu contrato de vesting está completamente adquirido, no debes transferir tu contrato de vesting a L2. En su lugar, puedes usar las herramientas de transferencia para establecer una dirección de wallet en L2 y transferir directamente tu stake o delegación a esta wallet regular en L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### Si estoy utilizando mi contrato de vesting para realizar stake en la red principal, ¿puedo transferir mi stake a Arbitrum? -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +Sí, pero si tu contrato aún tiene vesting, solo puedes transferir el stake para que sea propiedad de tu contrato de vesting en L2. Debes inicializar primero este contrato en L2 transfiriendo un saldo de GRT usando la herramienta de transferencia de contrato de vesting en Explorer. Si tu contrato está completamente adquirido, puedes transferir tu stake a cualquier dirección en L2, pero debes configurarla con antelación y depositar algo de ETH para que la herramienta de transferencia en L2 pague por las tarifas de gas en L2. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### Si estoy utilizando mi contrato de vesting para delegar en mainnet, ¿puedo transferir mis delegaciones a Arbitrum? -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +Sí, pero si tu contrato aún tiene vesting, solo puedes transferir la delegación para que sea propiedad de tu contrato de vesting en L2. Debes inicializar primero este contrato en L2 transfiriendo un saldo de GRT usando la herramienta de transferencia de contrato de vesting en Explorer. Si tu contrato está completamente adquirido, puedes transferir tu delegación a cualquier dirección en L2, pero debes configurarla con antelación y depositar algo de ETH para que la herramienta de transferencia en L2 pague por las tarifas de gas en L2. -## Can I specify a different beneficiary for my vesting contract on L2? +### ¿Puedo especificar un beneficiario diferente para mi contrato de vesting en L2? -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +Sí, la primera vez que transfieras un saldo y configures tu contrato de vesting en L2, puedes especificar un beneficiario en L2. Asegúrate de que este beneficiario sea una wallet que pueda realizar transacciones en Arbitrum One, es decir, debe ser una cuenta de propietario externa (EOA) o un contrato de firma múltiple implementado en Arbitrum One. -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Si tu contrato está completamente adquirido, no configurarás un contrato de vesting en L2; en su lugar, establecerás una dirección de wallet en L2 y esta será la wallet receptora para tu stake o delegación en Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### Mi contrato está completamente adquirido. ¿Puedo transferir mi stake o delegación a otra dirección que no es un contrato de vesting en L2? -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Si. Si no has transferido ningún saldo de contrato de vesting a L2 y tu contrato de vesting está completamente adquirido, no debes transferir tu contrato de vesting a L2. En su lugar, puedes utilizar las herramientas de transferencia para establecer una dirección de wallet en L2 y transferir directamente tu stake o delegación a esta wallet regular en L2. -This allows you to transfer your stake or delegation to any L2 address. +Esto te permite transferir tu stake o delegación a cualquier dirección en L2. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### Mi contrato de vesting todavía está en proceso de vesting, ¿cómo transfiero el saldo de mi contrato de vesting a L2? -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +Estos pasos solo se aplican si tu contrato todavía tiene vesting o si has utilizado este proceso antes cuando tu contrato todavía tenía vesting. -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +Para transferir tu contrato de vesting a L2, enviarás cualquier saldo de GRT a L2 utilizando las herramientas de transferencia, lo que inicializará tu contrato de vesting en L2: -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +1. Deposita algo de ETH en el contrato de la herramienta de transferencia (esto se usará para pagar el gas de L2) -2. Revoke protocol access to the vesting contract (needed for the next step) +2. Revoca el acceso del protocolo al contrato de vesting (necesario para el siguiente paso) -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +3. Otorga acceso del protocolo al contrato de vesting (permitirá que tu contrato interactúe con la herramienta de transferencia) -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +4. Especifica una dirección de beneficiario en L2\* e inicia la transferencia de saldo en Ethereum mainnet 5. Espera 20 minutos para la confirmación -6. Confirm the balance transfer on L2 +6. Confirma la transferencia de saldo en L2 + +\*Si es necesario - i.e. si estás utilizando una dirección de contrato. + +\*\*\*\*Debes confirmar tu transacción para completar la transferencia de saldo en Arbitrum. Este paso debe completarse en un plazo de 7 días, o el saldo podría perderse. En la mayoría de los casos, este paso se ejecutará automáticamente, pero puede ser necesaria una confirmación manual si hay un aumento en el precio del gas en Arbitrum. Si surgen problemas durante este proceso, habrá recursos disponibles para ayudarte: ponte en contacto con el soporte en support@thegraph.com o en [Discord](https://discord.gg/graphprotocol). + +### Mi contrato de bloqueo de participación muestra 0 GRT, por lo que no puedo transferirlo. ¿Por qué sucede esto y cómo puedo solucionarlo? + +Para inicializar tu contrato de bloqueo en L2, debes transferir una cantidad no nula de GRT a L2. Esto es requerido por el puente de GRT de Arbitrum que es utilizado por las Herramientas de Transferencia en L2. Los GRT deben provenir del saldo del contrato de bloqueo, por lo que no incluye GRT apostados o delegados. + +Si has apostado o delegado todo tu GRT del contrato de bloqueo, puedes enviar manualmente una pequeña cantidad, como 1 GRT, a la dirección del contrato de bloqueo desde cualquier otro lugar (por ejemplo, desde otra billetera o desde una plataforma de intercambio) + +### Estoy utilizando un contrato de bloqueo para apostar o delegar, pero no veo un botón para transferir mi participación o delegación a L2, ¿qué debo hacer? + +Si tu contrato de bloqueo aún no ha terminado de bloquear, primero debes crear un contrato de bloqueo en L2 que recibirá tu participación o delegación en L2. Este contrato de bloqueo no permitirá liberar tokens en L2 hasta el final del período de bloqueo, pero te permitirá transferir GRT de vuelta al contrato de bloqueo en L1 para que sean liberados allí. + +Cuando estés conectado al contrato de bloqueo en el Explorador, deberías ver un botón para inicializar tu contrato de bloqueo en L2. Sigue ese proceso primero y luego verás los botones para transferir tu participación o delegación en tu perfil + +### Si inicializo mi contrato de bloqueo en L2, ¿esto transferirá automáticamente mi delegación a L2? -\*If necessary - i.e. you are using a contract address. +No, inicializar tu contrato de bloqueo en L2 es un requisito previo para transferir la participación o delegación desde el contrato de bloqueo, pero aún así necesitas hacer estas transferencias por separado. -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Verás un banner en tu perfil que te indicará que transfieras tu participación o delegación después de haber inicializado tu contrato de bloqueo en L2. -## Can I move my vesting contract back to L1? +### ¿Puedo trasladar mi contrato de bloqueo de nuevo a L1? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +No es necesario hacerlo porque tu contrato de vesting todavía está en L1. Cuando utilizas las herramientas de transferencia, simplemente creas un nuevo contrato en L2 que está conectado con tu contrato de vesting en L1, y puedes enviar GRT de un lado a otro entre los dos. -## Why do I need to move my vesting contract to begin with? +### ¿Por qué necesito mover mi contrato de vesting para empezar? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +Necesitas configurar un contrato de vesting en L2 para que esta cuenta pueda ser propietaria de tu stake o delegación en L2. De lo contrario, no habría forma de transferir el stake/delegación a L2 sin "escapar" del contrato de vesting. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### ¿Qué sucede si intento cobrar mi contrato cuando solo está parcialmente adquirido? ¿Es posible hacerlo? -This is not a possibility. You can move funds back to L1 and withdraw them there. +Esto no es posible. Puedes mover los fondos de regreso a L1 y retirarlos desde allí. -## What if I don't want to move my vesting contract to L2? +### ¿Qué sucede si no quiero trasladar mi contrato de vesting a L2? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +Puedes seguir realizando stake/delegando en L1. Con el tiempo, es posible que desees considerar el traslado a L2 para habilitar las recompensas allí a medida que el protocolo se escala en Arbitrum. Ten en cuenta que estas herramientas de transferencia son para contratos de vesting que están permitidos para staking y delegación en el protocolo. Si tu contrato no permite staking o delegación, o es revocable, entonces no hay disponible ninguna herramienta de transferencia. Aún podrás retirar tu GRT de L1 cuando esté disponible. diff --git a/website/pages/es/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/es/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..42806d672130 100644 --- a/website/pages/es/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/es/arbitrum/l2-transfer-tools-guide.mdx @@ -1,165 +1,165 @@ --- -title: L2 Transfer Tools Guide +title: Guía de las Herramientas de Transferencia a L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +The Graph ha facilitado la migración a L2 en Arbitrum One. Para cada participante del protocolo, existen un conjunto de herramientas de transferencia a L2 para que la migración sea fluida para todos los participantes de la red. Estas herramientas requerirán que sigas un conjunto específico de pasos dependiendo de lo que estés transfiriendo. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Algunas preguntas frecuentes sobre estas herramientas se responden en las [Preguntas Frecuentes de las Herramientas de Transferencia a L2](/arbitrum/l2-transfer-tools-faq). Las preguntas frecuentes contienen explicaciones detalladas sobre cómo utilizar las herramientas, cómo funcionan y aspectos a tener en cuenta al usarlas. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Cómo transferir tu subgrafo a Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Beneficios de transferir tus subgrafos -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +La comunidad de The Graph y los core devs se han [estado preparando] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) para migrar a Arbitrum durante el último año. Arbitrum, una blockchain de capa 2 o "L2", hereda la seguridad de Ethereum pero ofrece tarifas de gas considerablemente más bajas. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Cuando publicas o actualizas tus subgrafos en The Graph Network, estás interactuando con contratos inteligentes en el protocolo, lo cual requiere pagar por gas utilizando ETH. Al mover tus subgrafos a Arbitrum, cualquier actualización futura de tu subgrafo requerirá tarifas de gas mucho más bajas. Las tarifas más bajas, y el hecho de que las bonding curves de curación en L2 son planas, también facilitan que otros Curadores realicen curación en tu subgrafo, aumentando las recompensas para los Indexadores en tu subgrafo. Este contexto con tarifas más económicas también hace que sea más barato para los Indexadores indexar y servir tu subgrafo. Las recompensas por indexación aumentarán en Arbitrum y disminuirán en Ethereum mainnet en los próximos meses, por lo que cada vez más Indexadores transferirán su stake y establecerán sus operaciones en L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Transferir un subgrafo a Arbitrum utiliza el puente de GRT de Arbitrum, que a su vez utiliza el puente nativo de Arbitrum para enviar el subgrafo a L2. La "transferencia" deprecará el subgrafo en mainnet y enviará la información para recrear el subgrafo en L2 utilizando el puente. También incluirá el GRT señalizado del propietario del subgrafo, el cual debe ser mayor que cero para que el puente acepte la transferencia. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Cuando eliges transferir el subgrafo, esto convertirá toda la señal de curación del subgrafo a GRT. Esto equivale a "deprecar" el subgrafo en mainnet. El GRT correspondiente a tu curación se enviará a L2 junto con el subgrafo, donde se utilizarán para emitir señal en tu nombre. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Otros Curadores pueden elegir si retirar su fracción de GRT o también transferirlo a L2 para emitir señal en el mismo subgrafo. Si un propietario de subgrafo no transfiere su subgrafo a L2 y lo depreca manualmente a través de una llamada de contrato, entonces los Curadores serán notificados y podrán retirar su curación. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Tan pronto como se transfiera el subgrafo, dado que toda la curación se convierte en GRT, los Indexadores ya no recibirán recompensas por indexar el subgrafo. Sin embargo, habrá Indexadores que 1) continuarán sirviendo los subgrafos transferidos durante 24 horas y 2) comenzarán inmediatamente a indexar el subgrafo en L2. Dado que estos Indexadores ya tienen el subgrafo indexado, no será necesario esperar a que se sincronice el subgrafo y será posible realizar consultas al subgrafo en L2 casi de inmediato. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Las consultas al subgrafo en L2 deberán realizarse a una URL diferente (en `arbitrum-gateway.thegraph.com`), pero la URL de L1 seguirá funcionando durante al menos 48 horas. Después de eso, la gateway de L1 redirigirá las consultas a la gateway de L2 (durante algún tiempo), pero esto agregará latencia, por lo que se recomienda cambiar todas las consultas a la nueva URL lo antes posible. -## Choosing your L2 wallet +## Elección de tu wallet en L2 -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Cuando publicaste tu subgrafo en mainnet, utilizaste una wallet conectada para crear el subgrafo, y esta wallet es la propietaria del NFT que representa este subgrafo y te permite publicar actualizaciones. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +Al transferir el subgrafo a Arbitrum, puedes elegir una wallet diferente que será la propietaria del NFT de este subgrafo en L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Si estás utilizando una wallet "convencional" como MetaMask (una Cuenta de Propiedad Externa o EOA, es decir, una wallet que no es un contrato inteligente), esto es opcional y se recomienda para mantener la misma dirección del propietario que en L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Si estás utilizando una wallet de tipo smart contract, como una multisig (por ejemplo, una Safe), entonces elegir una dirección de wallet L2 diferente es obligatorio, ya que es muy probable que esta cuenta solo exista en mainnet y no podrás realizar transacciones en Arbitrum utilizando esta wallet. Si deseas seguir utilizando una wallet de tipo smart contract o multisig, crea una nueva wallet en Arbitrum y utiliza su dirección como propietario L2 de tu subgrafo. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Es muy importante utilizar una dirección de wallet que controles y que pueda realizar transacciones en Arbitrum. De lo contrario, el subgrafo se perderá y no podrá ser recuperado.** -## Preparing for the transfer: bridging some ETH +## Preparándose para la transferencia: bridgeando algo de ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Transferir el subgrafo implica enviar una transacción a través del puente y luego ejecutar otra transacción en Arbitrum. La primera transacción utiliza ETH en la red principal e incluye cierta cantidad de ETH para pagar el gas cuando se recibe el mensaje en L2. Sin embargo, si este gas es insuficiente, deberás volver a intentar la transacción y pagar el gas directamente en L2 (esto es "Paso 3: Confirmando la transferencia" que se describe a continuación). Este paso **debe ejecutarse dentro de los 7 días desde el inicio de la transferencia**. Además, la segunda transacción ("Paso 4: Finalizando la transferencia en L2") se realizará directamente en Arbitrum. Por estas razones, necesitarás tener algo de ETH en una billetera de Arbitrum. Si estás utilizando una cuenta de firma múltiple o un contrato inteligente, el ETH debe estar en la billetera regular (EOA) que estás utilizando para ejecutar las transacciones, no en la billetera de firma múltiple en sí misma. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Puedes comprar ETH en algunos exchanges y retirarlo directamente a Arbitrum, o puedes utilizar el puente de Arbitrum para enviar ETH desde una billetera en la red principal a L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Dado que las tarifas de gas en Arbitrum son más bajas, solo necesitarás una pequeña cantidad. Se recomienda que comiences con un umbral bajo (por ejemplo, 0.01 ETH) para que tu transacción sea aprobada. -## Finding the subgraph Transfer Tool +## Encontrando la herramienta de transferencia del subgrafo -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Puedes encontrar la herramienta de transferencia a L2 cuando estás viendo la página de tu subgrafo en Subgraph Studio: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +También está disponible en Explorer si estás conectado con la wallet que es propietaria de un subgrafo y en la página de ese subgrafo en Explorer: ![Transferring to L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +Al hacer clic en el botón "Transferir a L2" se abrirá la herramienta de transferencia donde puedes iniciar el proceso de transferencia. -## Step 1: Starting the transfer +## Paso 1: Iniciar la transferencia -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Antes de iniciar la transferencia, debes decidir qué dirección será la propietaria del subgrafo en L2 (ver "Elección de tu wallet en L2" anteriormente), y se recomienda encarecidamente tener ETH para gas ya transferido a Arbitrum (ver "Preparando para la transferencia: transferir ETH" anteriormente). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +También ten en cuenta que la transferencia del subgrafo requiere tener una cantidad distinta de cero de señal en el subgrafo con la misma cuenta que es propietaria del subgrafo; si no has emitido señal en el subgrafo, deberás agregar un poco de curación (añadir una pequeña cantidad como 1 GRT sería suficiente). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +Después de abrir la herramienta de transferencia, podrás ingresar la dirección de la wallet L2 en el campo "Dirección de la wallet receptora" - asegúrate de ingresar la dirección correcta aquí. Al hacer clic en "Transferir Subgrafo", se te pedirá que ejecutes la transacción en tu wallet (ten en cuenta que se incluye un valor de ETH para pagar el gas de L2); esto iniciará la transferencia y deprecará tu subgrafo de L1 (consulta "Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta" anteriormente para obtener más detalles sobre lo que ocurre detrás de escena). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Si ejecutas este paso, **asegúrate de completar el paso 3 en menos de 7 días, o el subgrafo y tu GRT de señal se perderán**. Esto se debe a cómo funciona la mensajería de L1 a L2 en Arbitrum: los mensajes que se envían a través del puente son "tickets reintentables" que deben ejecutarse dentro de los 7 días, y la ejecución inicial puede requerir un reintento si hay picos en el precio del gas en Arbitrum. ![Start the trnasfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Paso 2: Esperarando a que el subgrafo llegue a L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Después de iniciar la transferencia, el mensaje que envía tu subgrafo de L1 a L2 debe propagarse a través del puente de Arbitrum. Esto tarda aproximadamente 20 minutos (el puente espera a que el bloque de mainnet que contiene la transacción sea "seguro" para evitar posibles reorganizaciones de la cadena). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Una vez que finalice este tiempo de espera, Arbitrum intentará ejecutar automáticamente la transferencia en los contratos de L2. ![Wait screen](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## Paso 3: Confirmando la transferencia -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +En la mayoría de los casos, este paso se ejecutará automáticamente, ya que el gas de L2 incluido en el paso 1 debería ser suficiente para ejecutar la transacción que recibe el subgrafo en los contratos de Arbitrum. Sin embargo, en algunos casos, es posible que un aumento en el precio del gas en Arbitrum cause que esta autoejecución falle. En este caso, el "ticket" que envía tu subgrafo a L2 quedará pendiente y requerirá un reintento dentro de los 7 días. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Si este es el caso, deberás conectarte utilizando una wallet de L2 que tenga algo de ETH en Arbitrum, cambiar la red de tu wallet a Arbitrum y hacer clic en "Confirmar Transferencia" para volver a intentar la transacción. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +![Confirmar la transferencia a L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## Paso 4: Finalizando la transferencia en L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +En este punto, tu subgrafo y GRT se han recibido en Arbitrum, pero el subgrafo aún no se ha publicado. Deberás conectarte utilizando la wallet de L2 que elegiste como la wallet receptora, cambiar la red de tu wallet a Arbitrum y hacer clic en "Publicar Subgrafo". -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publicar el subgrafo](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Espera a que el subgrafo este publicado](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Esto publicará el subgrafo para que los Indexadores que estén operando en Arbitrum puedan comenzar a servirlo. También se emitirá señal de curación utilizando los GRT que se transfirieron desde L1. -## Step 5: Updating the query URL +## Paso 5: Actualizando la URL de consulta -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +¡Tu subgrafo se ha transferido correctamente a Arbitrum! Para realizar consultas al subgrafo, la nueva URL será: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Ten en cuenta que el ID del subgrafo en Arbitrum será diferente al que tenías en mainnet, pero siempre podrás encontrarlo en Explorer o Studio. Como se mencionó anteriormente (ver "Comprensión de lo que sucede con la señal, tu subgrafo de L1 y las URL de consulta"), la antigua URL de L1 será compatible durante un corto período de tiempo, pero debes cambiar tus consultas a la nueva dirección tan pronto como el subgrafo se haya sincronizado en L2. -## How to transfer your curation to Arbitrum (L2) +## Cómo transferir tu curación a Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Comprensión de lo que sucede con la curación al transferir subgrafos a L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +Cuando el propietario de un subgrafo transfiere un subgrafo a Arbitrum, toda la señal del subgrafo se convierte en GRT al mismo tiempo. Esto se aplica a la señal "migrada automáticamente", es decir, la señal que no está vinculada a una versión o deploy específico del subgrafo, sino que sigue la última versión del subgrafo. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Esta conversión de señal a GRT es similar a lo que sucedería si el propietario del subgrafo deprecara el subgrafo en L1. Cuando el subgrafo se depreca o se transfiere, toda la señal de curación se "quema" simultáneamente (utilizando la bonding curve de curación) y el GRT resultante se mantiene en el contrato inteligente de GNS (que es el contrato que maneja las actualizaciones de subgrafos y la señal auto-migrada). Cada Curador en ese subgrafo, por lo tanto, tiene un reclamo sobre ese GRT proporcional a la cantidad de participaciones que tenían para el subgrafo. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +Una fracción de estos GRT correspondientes al propietario del subgrafo se envía a L2 junto con el subgrafo. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +En este punto, el GRT curado ya no acumulará más tarifas de consulta, por lo que los Curadores pueden optar por retirar su GRT o transferirlo al mismo subgrafo en L2, donde se puede utilizar para generar nueva señal de curación. No hay prisa para hacerlo, ya que el GRT se puede mantener indefinidamente y todos reciben una cantidad proporcional a sus participaciones, independientemente de cuándo lo hagan. -## Choosing your L2 wallet +## Elección de tu wallet en L2 -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Si decides transferir tu GRT curado a L2, puedes elegir una wallet diferente que será propietaria de la señal de curación en L2. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +Si estás utilizando una wallet "regular" como Metamask (una cuenta de propietario externa o EOA, es decir, una wallet que no es un contrato inteligente), entonces esto es opcional y se recomienda mantener la misma dirección de Curador que en L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Si estás utilizando una billetera de contrato inteligente, como una multisig (por ejemplo, Safe), entonces elegir una dirección de wallet diferente en L2 es obligatorio, ya que es muy probable que esta cuenta solo exista en mainnet y no puedas realizar transacciones en Arbitrum utilizando esta wallet. Si deseas seguir utilizando una wallet de contrato inteligente o una multisig, crea una nueva wallet en Arbitrum y utiliza su dirección como la dirección de wallet receptora en L2. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**Es muy importante utilizar una dirección de wallet que controles y que pueda realizar transacciones en Arbitrum, ya que de lo contrario la curación se perderá y no podrá recuperarse.** -## Sending curation to L2: Step 1 +## Enviando curacion para L2: Paso 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +Antes de comenzar la transferencia, debes decidir qué dirección será la propietaria de la curación en L2 (ver "Elegir tu wallet en L2" arriba), y se recomienda tener algo de ETH para el gas ya bridgeado en Arbitrum en caso de que necesites volver a intentar la ejecución del mensaje en L2. Puedes comprar ETH en algunos exchanges y retirarlo directamente a Arbitrum, o puedes utilizar el puente de Arbitrum para enviar ETH desde una wallet en la red principal a L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - dado que las tarifas de gas en Arbitrum son muy bajas, es probable que solo necesites una pequeña cantidad, por ejemplo, 0.01 ETH será más que suficiente. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +Si un subgrafo al que has curado ha sido transferido a L2, verás un mensaje en Explorer que te indicará que estás curando hacia un subgrafo transferido. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +Cuando estás en la página del subgrafo, puedes elegir retirar o transferir la curación. Al hacer clic en "Transferir Señal a Arbitrum" se abrirá la herramienta de transferencia. -![Transfer signal](/img/transferSignalL2TransferTools.png) +![Transferir señal](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +Después de abrir la Herramienta de Transferencia, es posible que se te solicite agregar algo de ETH a tu wallet si no tienes. Luego podrás ingresar la dirección de la wallet en L2 en el campo "Dirección de la wallet receptora" - **asegúrate de ingresar la dirección correcta aquí**. Al hacer clic en "Transferir Señal" se te pedirá que ejecutes la transacción en tu wallet (nota que se incluye un valor de ETH para pagar el gas de L2); esto iniciará la transferencia. -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Si ejecutas este paso, **asegúrate de proceder y completar el paso 3 en menos de 7 días, o tu GRT de señal se perderá**. Esto se debe a cómo funciona la mensajería entre L1 y L2 en Arbitrum: los mensajes que se envían a través del puente son "tickets reintentables" que deben ejecutarse dentro de los 7 días, y la ejecución inicial podría necesitar un reintento si hay aumentos en el precio del gas en Arbitrum. -## Sending curation to L2: step 2 +## Enviando la curación a L2: paso 2 -Starting the transfer: +Empezando la transferencia: -![Send signal to L2](/img/sendingCurationToL2Step2First.png) +![Enviando la curación a L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Después de iniciar la transferencia, el mensaje que envía tu curación de L1 a L2 debe propagarse a través del puente de Arbitrum. Esto toma aproximadamente 20 minutos (el puente espera a que el bloque de mainnet que contiene la transacción sea "seguro" y esté protegido de posibles reorganizaciones de la cadena). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Una vez que finalice este tiempo de espera, Arbitrum intentará ejecutar automáticamente la transferencia en los contratos de L2. -![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) +![Enviando la señal de curacion a L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## Enviando curacion a L2: Paso 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +En la mayoría de los casos, este paso se ejecutará automáticamente, ya que el gas de L2 incluido en el paso 1 debería ser suficiente para ejecutar la transacción que recibe la curación en los contratos de Arbitrum. Sin embargo, en algunos casos, es posible que un aumento repentino en los precios del gas en Arbitrum cause que esta ejecución automática falle. En este caso, el "ticket" que envía tu curación a L2 quedará pendiente y requerirá un reintento dentro de los 7 días. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Si este es el caso, deberás conectarte utilizando una wallet de L2 que tenga algo de ETH en Arbitrum, cambiar la red de tu wallet a Arbitrum y hacer clic en "Confirmar Transferencia" para volver a intentar la transacción. -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) +![Enviar señal a L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## Retirando tu curacion en L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +Si prefieres no enviar tu GRT a L2, o prefieres bridgear GRT de forma manual, puedes retirar tu GRT curado en L1. En el banner en la página del subgrafo, elige "Retirar Señal" y confirma la transacción; el GRT se enviará a tu dirección de Curador. diff --git a/website/pages/es/billing.mdx b/website/pages/es/billing.mdx index 276a0c25cd0f..f11ddea84dc4 100644 --- a/website/pages/es/billing.mdx +++ b/website/pages/es/billing.mdx @@ -37,8 +37,12 @@ Mientras que el protocolo The Graph opera en Ethereum Mainnet, [el contrato de f ### Añadir GRT utilizando una wallet cripto + + > Esta sección está escrita asumiendo que ya tienes GRT en tu wallet de criptomonedas, y que estás en la mainnet de Ethereum. Si no tienes GRT, puedes aprender cómo conseguir GRT [aquí](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Haz clic en el botón "Conectar wallet" en la esquina superior derecha de la página. Serás redirigido a la página de selección de wallet. Selecciona tu wallet y haz clic en "Conectar". @@ -71,6 +75,8 @@ Mientras que el protocolo The Graph opera en Ethereum Mainnet, [el contrato de f ### Añadir GRT utilizando una wallet multisig + + 1. Ve a la [página de facturación de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Haz clic en el botón "Conectar Wallet" en la esquina superior derecha de la página. Selecciona tu monedero y haz clic en "Conectar". Si utilizas [Gnosis-Safe](https://gnosis-safe.io/), podrás conectar tu wallet multisig y tu wallet de firma. A continuación, firma el mensaje asociado. Esto no te costará gas. @@ -97,11 +103,11 @@ Mientras que el protocolo The Graph opera en Ethereum Mainnet, [el contrato de f ## Obtener GRT -Esta sección te mostrará cómo conseguir GRT para pagar las tarifas de consulta. +This section will show you how to get GRT to pay for query fees. ### Coinbase -Esta será una guía paso a paso para comprar GRT en Coinbase. +This will be a step by step guide for purchasing GRT on Coinbase. 1. Ingresa a [Coinbase](https://www.coinbase.com/) y crea una cuenta. 2. Una vez que hayas creado una cuenta, tendrás que verificar tu identidad a través de un proceso conocido como KYC (o Know Your Customer). Se trata de un procedimiento estándar para todos los exchanges centralizados o de custodia. @@ -117,11 +123,11 @@ Esta será una guía paso a paso para comprar GRT en Coinbase. - Ingresa la cantidad de GRT que deseas enviar y la dirección de la wallet a la que deseas enviarlo. - Haz clic en "Continuar" y confirma tu transacción. -Ten en cuenta que para importes de compra superiores, Coinbase puede exigirte que esperes entre 7 y 10 días antes de transferir el importe total a una wallet cripto. -Puedes obtener más información sobre cómo conseguir GRT en Coinbase [aquí](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -Esta será una guía paso a paso para la compra de GRT en Binance. +This will be a step by step guide for purchasing GRT on Binance. 1. Ir a [Binance](https://www.binance.com/en) y crear una cuenta. 2. Una vez que hayas creado una cuenta, tendrás que verificar tu identidad a través de un proceso conocido como KYC (o Know Your Customer). Se trata de un procedimiento estándar para todos los exchanges centralizados o de custodia. @@ -137,11 +143,11 @@ Esta será una guía paso a paso para la compra de GRT en Binance. - Ingresa la cantidad de GRT que deseas enviar y la dirección de la wallet de la lista de aprobación a la que deseas enviarlo. - Haz clic en "Continuar" y confirma tu transacción. -Puedes obtener más información sobre cómo obtener GRT en Binance [aquí](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -Así es como puedes comprar GRT en Uniswap. +This is how you can purchase GRT on Uniswap. 1. Entra en [Uniswap](https://app.uniswap.org/#/swap) y conecta tu wallet. 2. Selecciona el token desde el que desea intercambiar. Selecciona ETH. @@ -151,8 +157,52 @@ Así es como puedes comprar GRT en Uniswap. 5. Haz clic en "Swap". 6. Confirma la transacción en tu wallet y espera a que se procese. -Puedes obtener más información sobre cómo conseguir GRT en Uniswap [aquí](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Ingresa a [Coinbase](https://www.coinbase.com/) y crea una cuenta. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Haz clic en "Continuar" y confirma tu transacción. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Ir a [Binance](https://www.binance.com/en) y crear una cuenta. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Haz clic en "Continuar" y confirma tu transacción. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Arbitrum Bridge -El contrato de facturación solo está diseñado para bridgear GRT de Ethereum mainnet a la red Arbitrum. Si deseas transferir tu GRT de Arbitrum a la red principal de Ethereum, deberás utilizar el puente de [Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161). +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/es/chain-integration-overview.mdx b/website/pages/es/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/es/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/es/cookbook/arweave.mdx b/website/pages/es/cookbook/arweave.mdx index 5f6549781a28..0d527063ab5b 100644 --- a/website/pages/es/cookbook/arweave.mdx +++ b/website/pages/es/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construyendo Subgrafos en Arweave --- -> El soporte de Arweave en Graph Node y en el Servicio Alojado está en fase beta: ¡póngase en contacto con nosotros en [Discord](https://discord.gg/graphprotocol) con cualquier pregunta sobre la construcción de subgrafos Arweave! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! En esta guía, aprenderás a construir y deployar subgrafos para indexar la blockchain de Arweave. @@ -83,7 +83,7 @@ dataSources: ``` - Los subgrafos de Arweave introducen un nuevo tipo de fuente de datos (`arweave`) -- La red debe corresponder a una red en el nodo de Graph de alojamiento. En el Servicio Alojado, la red principal de Arweave es `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Las fuentes de datos de Arweave introducen un campo opcional "source.owner", que es la clave pública de una billetera Arweave Las fuentes de datos de Arweave admiten dos tipos de handlers: @@ -150,9 +150,9 @@ Los handlers de bloques reciben un `Block`, mientras que las transacciones recib Escribir los mappings de un subgrafo de Arweave es muy similar a escribir los mappings de un subgrafo de Ethereum. Para obtener más información, haz clic [aquí](/developing/creating-a-subgraph/#write-mappings). -## Deployando un subgrafo de Arweave en el Servicio Alojado +## Deploying an Arweave Subgraph on the hosted service -Una vez que tu subgrafo ha sido creado en el panel de control de Hosted Service, puedes deployarlo utilizando el comando CLI `graph deploy`. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/es/cookbook/grafting.mdx b/website/pages/es/cookbook/grafting.mdx index f44d7ab9a5de..05f4203e46b7 100644 --- a/website/pages/es/cookbook/grafting.mdx +++ b/website/pages/es/cookbook/grafting.mdx @@ -24,6 +24,22 @@ Para más información, puedes consultar: En este tutorial vamos a cubrir un caso de uso básico. Reemplazaremos un contrato existente con un contrato idéntico (con una nueva dirección, pero el mismo código). Luego, haremos grafting del subgrafo existente en el subgrafo "base" que rastrea el nuevo contrato. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Construcción de un subgrafo existente La construcción de subgrafos es una parte esencial de The Graph, descrita más a fondo [aquí](http://localhost:3000/en/cookbook/quick-start/). Para poder construir y deployar el subgrafo existente utilizado en este tutorial, se proporciona el siguiente repo: diff --git a/website/pages/es/cookbook/near.mdx b/website/pages/es/cookbook/near.mdx index ae01d2caf5b0..98469fce2bc0 100644 --- a/website/pages/es/cookbook/near.mdx +++ b/website/pages/es/cookbook/near.mdx @@ -277,7 +277,7 @@ La funcionalidad pendiente aún no es compatible con los subgrafos NEAR. Mientra ### Mi pregunta no ha sido respondida, ¿dónde puedo obtener más ayuda para crear subgrafos NEAR? -Si se trata de una pregunta general sobre el desarrollo de subgrafos, hay mucha más información en el resto de la [documentación para developers](/recetario/inicio rápido). De lo contrario, únete a [The Graph Protocol Discord](https://discord.gg/graphprotocol) y pregunta en el canal #near o envía un correo electrónico a near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Referencias diff --git a/website/pages/es/cookbook/upgrading-a-subgraph.mdx b/website/pages/es/cookbook/upgrading-a-subgraph.mdx index 6b2894ceb707..d0267b3b25af 100644 --- a/website/pages/es/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/es/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerrequisitos - Ya has deployado un subgrafo en el Servicio Alojado. -- El subgrafo está indexando una cadena disponible (o disponible en beta) en la red de The Graph. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx index 65ecbc8aff80..556143ce8fe4 100644 --- a/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/es/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deployar un Subgrafo en Subgraph Studio --- -> Asegúrate de que la red desde la que tu subgrafo está indexando datos sea [compatible](/developing/supported-chains) en la red descentralizada. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). Estos son los pasos para deployar tu subgrafo en Subgraph Studio: diff --git a/website/pages/es/deploying/hosted-service.mdx b/website/pages/es/deploying/hosted-service.mdx index c8536abee739..ac6b2dcda468 100644 --- a/website/pages/es/deploying/hosted-service.mdx +++ b/website/pages/es/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / El subgrafo de ejemplo se basa en el contrato Gravity de Dani Grant que administra los avatares de los usuarios y emite eventos `UpdateGravatar` o `UpdateGravatar` cada vez que se crean o actualizan avatares. El subgrafo maneja estos eventos escribiendo entidades `Gravatar` en el almacén de Graph Node y asegurándose de que se actualicen de acuerdo con los eventos. Continúa con el [manifiesto del subgrafo](/developing/creating-a-subgraph#the-subgraph-manifest) para comprender mejor a qué eventos de sus contratos inteligentes debe prestar atención, mappings y más. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service Puedes encontrar la lista de las redes admitidas [Aquí](/developing/supported-networks). diff --git a/website/pages/es/deploying/subgraph-studio.mdx b/website/pages/es/deploying/subgraph-studio.mdx index d231d6501f2e..2c69ba9a1d20 100644 --- a/website/pages/es/deploying/subgraph-studio.mdx +++ b/website/pages/es/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Consultar subgrafos genera tarifas de consulta, que se utilizan para recompensar 1. Inicia sesión con tu wallet - puedes hacerlo a través de MetaMask o WalletConnect 1. Una vez que te registres, verás tu clave única de deploy en la página de inicio de tu cuenta. Esto te permitirá publicar tus subgrafos o gestionar tus claves API + facturación. Tendrás una clave de deploy única que se puede volver a generar si crees que ha sido comprometida. -## Cómo Crear tu Subgrafo en Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -¡La mejor parte! Cuando creas un subgrafo por primera vez, se te pedirá que lo rellenes: - -- El Nombre de tu Subgrafo -- Imagen -- Descripción -- Categorías (por ejemplo, `DeFi`, `NFTs`, `Gobernanza`) -- Sitio web + ## Compatibilidad de los Subgrafos con The Graph Network diff --git a/website/pages/es/developing/creating-a-subgraph.mdx b/website/pages/es/developing/creating-a-subgraph.mdx index e16ed2d5f05a..887345ba27a4 100644 --- a/website/pages/es/developing/creating-a-subgraph.mdx +++ b/website/pages/es/developing/creating-a-subgraph.mdx @@ -98,7 +98,7 @@ Para este subgrafo de ejemplo, `subgraph.yaml` es: ```yaml specVersion: 0.0.4 description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/example-subgraph +repository: https://github.com/graphprotocol/graph-tooling schema: file: ./schema.graphql dataSources: @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: Las entradas importantes a actualizar para el manifiesto son: -- `description`: una descripción legible para el ser humano de lo que es el subgrafo. Esta descripción es mostrada por The Graph Explorer cuando el subgrafo se deploya en el Servicio Alojado. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: la URL del repositorio donde se encuentra el manifiesto del subgrafo. También se muestra en The Graph Explorer. @@ -146,6 +154,10 @@ Las entradas importantes a actualizar para el manifiesto son: - `dataSources.source.startBlock`: el número opcional del bloque desde el que la fuente de datos comienza a indexar. En la mayoría de los casos, sugerimos utilizar el bloque en el que se creó el contrato. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: las entidades que la fuente de datos escribe en el almacén. El esquema de cada entidad se define en el archivo schema.graphql. - `dataSources.mapping.abis`: uno o más archivos ABI con nombre para el contrato fuente, así como cualquier otro contrato inteligente con el que interactúes desde los mappings. @@ -242,6 +254,7 @@ Admitimos los siguientes escalares en nuestra API GraphQL: | `String` | Escalar para valores `string`. Los caracteres nulos no son compatibles y se eliminan automáticamente. | | `Boolean` | Escalar para valores `boolean`. | | `Int` | La especificación GraphQL define `Int` con un tamaño de 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Números enteros grandes. Se utiliza para los tipos `uint32`, `int64`, `uint64`, ..., `uint256` de Ethereum. Nota: Todo por debajo de `uint32`, como `int32`, `uint24` o `int8` se representa como `i32`. | | `BigDecimal` | `BigDecimal` Decimales de alta precisión representados como un signo y un exponente. El rango de exponentes va de -6143 a +6144. Redondeado a 34 dígitos significativos. | @@ -770,6 +783,8 @@ Además de suscribirse a eventos del contracto o calls de funciones, un subgrafo ### Filtros admitidos +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Función mapeo La función de mapeo recibirá un `ethereum.Block` como único argumento. Al igual que las funciones de mapping para eventos, esta función puede acceder a entidades de subgrafos existentes en el almacén, llamar a contratos inteligentes y crear o actualizar entidades. @@ -934,6 +988,8 @@ Si el subgrafo encuentra un error esa consulta devolverá tanto los datos como u ### Grafting sobre subgrafos existentes +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + Cuando un subgrafo es deployado por primera vez, comienza a indexar eventos en el bloque génesis de la cadena correspondiente (o en el `startBlock` definido con cada fuente de datos) En algunas circunstancias, es beneficioso reutilizar los datos de un subgrafo existente y comenzar a indexar en un bloque mucho más tardío. Este modo de indexación se denomina _Grafting_. El Grafting es, por ejemplo, útil durante el desarrollo para superar rápidamente errores simples en los mappings, o para hacer funcionar temporalmente un subgrafo existente después de que haya fallado. Un subgrafo se graftea en un subgrafo base cuando el manifiesto de subgrafo `subgraph.yaml` contiene un bloque `graft` en el nivel superior: @@ -963,7 +1019,7 @@ El subgrafo grafteado puede utilizar un esquema GraphQL que no es idéntico al d ## File Data Source -Las fuentes de datos de archivos son una nueva funcionalidad de los subgrafos para acceder a datos off-chain durante la indexación de una forma robusta y ampliable, a partir de IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > Esto también establece las bases para la indexación determinista de datos off-chain, así como la posible introducción de datos arbitrarios procedentes de HTTP. @@ -1030,7 +1086,7 @@ Si la relación es 1:1 entre la entidad padre y la entidad fuente de datos de ar > Puedes utilizar [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) para filtrar entidades padre en función de estas entidades nested. -#### Añadir una nueva fuente de datos con plantilla con `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` Esta es la fuente de datos que se generará cuando se identifique un archivo de interés. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { Ahora puedes crear fuentes de datos de archivos durante la ejecución de handlers basados en cadenas: - Importar la plantilla desde los `templates` autogenerados -- llamar a `TemplateName.create(cid: string)` desde dentro de una asignación, donde cid es un identificador de contenido IPFS válido +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Actualmente Graph Node soporta [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), e identificadores de contenido con directorios (por ejemplo, `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Ejemplo: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -Esto creará una nueva fuente de datos de archivo, que sondeará el punto final IPFS configurado del Graph Node, reintentando si no se encuentra. Cuando se encuentre el archivo, se ejecutará el handler de la fuente de datos del archivo. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. En este ejemplo se utiliza el CID como búsqueda entre la entidad `Token` principal y la entidad `TokenMetadata` resultante. diff --git a/website/pages/es/developing/developer-faqs.mdx b/website/pages/es/developing/developer-faqs.mdx index 5884fc292c98..6e995d99675d 100644 --- a/website/pages/es/developing/developer-faqs.mdx +++ b/website/pages/es/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Actualmente, el enfoque recomendado para una dapp es añadir la clave al frontend y exponerla a los usuarios finales. Dicho esto, puedes limitar esa clave a un nombre de host, como _yourdapp.io_ y subgrafo. La gateway se ejecuta actualmente por Edge & Node. Parte de la responsabilidad de un gateway es monitorear el comportamiento abusivo y bloquear el tráfico de clientes maliciosos. -## 25. ¿Dónde puedo encontrar mi subgrafo actual en el Servicio Alojado? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. ¿El Servicio Alojado comenzará a cobrar tarifas de consulta? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. ¿Cuándo se cerrará el Servicio Alojado? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/es/developing/graph-ts/api.mdx b/website/pages/es/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..61ce2ad0ffc6 --- /dev/null +++ b/website/pages/es/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +Esta página documenta qué API integradas se pueden usar al escribir mappings de subgrafos. Hay dos tipos de API disponibles listas para usar: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Referencias de API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Primitivas de bajo nivel para traducir entre los distintos tipos de sistemas, tales como, Ethereum, JSON, GraphQL y AssemblyScript. + +### Versiones + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Notas del lanzamiento | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Tipos Incorporados + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Dirección + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Almacén de API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creacion de entidades + +El siguiente es un patrón común para crear entidades a partir de eventos de Ethereum. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Cada entidad debe tener un identificador único para evitar colisiones con otras entidades. Es bastante común que los parámetros de los eventos incluyan un identificador único que pueda ser utilizado. Nota: El uso del hash de la transacción como ID asume que ningún otro evento en la misma transacción crea entidades con este hash como ID. + +#### Carga de entidades desde el store + +Si una entidad ya existe, se puede cargar desde el store con lo siguiente: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Buscando entidades creadas dentro de un bloque + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +La API de almacenamiento facilita la recuperación de entidades que se crearon o actualizaron en el bloque actual. Una situación típica para esto es cuando un handler crea una Transacción a partir de algún evento en la cadena, y un handler posterior quiere acceder a esta transacción si existe. En el caso de que la transacción no exista, el subgrafo tendrá que ir a la base de datos solo para averiguar que la entidad no existe; si el autor del subgrafo ya sabe que la entidad debe haber sido creada en el mismo bloque, el uso de loadInBlock evita este viaje de ida y vuelta a la base de datos. Para algunos subgrafos, estas búsquedas perdidas pueden contribuir significativamente al tiempo de indexación. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Usa la identidad de transferencia como se indica arriba +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Buscando entidades derivadas + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Actualización de las entidades existentes + +Hay dos maneras de actualizar una entidad existente: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Cambiar las propiedades es sencillo en la mayoría de los casos, gracias a los definidores de propiedades generados: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +También es posible desajustar las propiedades con una de las dos instrucciones siguientes: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Eliminar entidades del store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### API de Ethereum + +La API de Ethereum proporciona acceso a los contratos inteligentes, a las variables de estado públicas, a las funciones de los contratos, a los eventos, a las transacciones, a los bloques y a la codificación/decodificación de los datos de Ethereum. + +#### Compatibilidad con los tipos de Ethereum + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +El siguiente ejemplo lo ilustra. Dado un esquema de subgrafos como + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Eventos y datos de bloques/transacciones + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Acceso al Estado del Contrato Inteligente + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +Un patrón común es acceder al contrato desde el que se origina un evento. Esto se consigue con el siguiente código: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Cualquier otro contrato que forme parte del subgrafo puede ser importado desde el código generado y puede ser vinculado a una dirección válida. + +#### Tratamiento de las Llamadas Revertidas + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Ten en cuenta que un nodo Graph conectado a un cliente Geth o Infura puede no detectar todas las reversiones, si confías en esto te recomendamos que utilices un nodo Graph conectado a un cliente Parity. + +#### Codificación/Descodificación ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +Para mas informacion: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### API de Registro + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Registro de uno o varios valores + +##### Registro de un solo valor + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Registro de una sola entrada de una matriz existente + +En el ejemplo siguiente, sólo se registra el primer valor de la matriz de argumentos, a pesar de que la matriz contiene tres valores. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Registro de múltiples entradas de una matriz existente + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Registro de una entrada específica de una matriz existente + +Para mostrar un valor específico en el array, se debe proporcionar el valor indexado. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Registro de información de eventos + +El ejemplo siguiente registra el número de bloque, el hash del bloque y el hash de la transacción de un evento: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### API IPFS + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Dado un hash o ruta de IPFS, la lectura de un archivo desde IPFS se realiza de la siguiente manera: + +```typescript +// Colocar esto dentro del gestor de eventos del mapeo +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Rutas como `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// que incluye documentos en directorios que son soportados +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### API Cripto + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### API JSON + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Referencias de Tipo de Conversiones + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Metadatos de la Fuente de Datos + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity y DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/es/developing/graph-ts/common-issues.mdx b/website/pages/es/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..99563f803d43 --- /dev/null +++ b/website/pages/es/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Problemas comunes de AssemblyScript +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/es/developing/supported-networks.json b/website/pages/es/developing/supported-networks.json index 5e12392b8c7d..a1f4760729cf 100644 --- a/website/pages/es/developing/supported-networks.json +++ b/website/pages/es/developing/supported-networks.json @@ -1,5 +1,5 @@ { - "network": "Network", + "network": "Red", "cliName": "CLI Name", "chainId": "Chain ID", "studioAndHostedService": "Studio and Hosted Service", diff --git a/website/pages/es/developing/supported-networks.mdx b/website/pages/es/developing/supported-networks.mdx index 32c071752e13..d5eb03190a1c 100644 --- a/website/pages/es/developing/supported-networks.mdx +++ b/website/pages/es/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node también puede indexar otros protocolos, a través de una integración Firehose. Se han creado integraciones de Firehose para redes basadas en NEAR, Arweave y Cosmos. diff --git a/website/pages/es/developing/unit-testing-framework.mdx b/website/pages/es/developing/unit-testing-framework.mdx index 98a28748114c..514f692ac6e9 100644 --- a/website/pages/es/developing/unit-testing-framework.mdx +++ b/website/pages/es/developing/unit-testing-framework.mdx @@ -103,13 +103,13 @@ graph test path/to/file.test.ts **Opciones:** ```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used +-c, --coverage: Ejecuta las pruebas en modo de cobertura. +-d, --docker: Ejecuta las pruebas en un contenedor Docker (Nota: Por favor, ejecuta desde la carpeta raíz del subgrafo). +-f, --force: Binario: Vuelve a descargar el binario. Docker: Vuelve a descargar el archivo Docker y reconstruye la imagen Docker. +-h, --help: Muestra información de uso. +-l, --logs: Registra en la consola información sobre el sistema operativo, modelo de CPU y URL de descarga (para fines de depuración). +-r, --recompile: Fuerza a que las pruebas se recompilen. +-v, --version : Elije la versión del binario de Rust que deseas descargar/utilizar ``` ### Docker @@ -990,7 +990,7 @@ Tenga en cuenta que dataSourceMock.resetValues() se llama al final. Esto se debe ## Cobertura de prueba -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Usando **Matchstick**, los desarrolladores de subgrafos pueden ejecutar un script que calculará la cobertura de las pruebas unitarias escritas. The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. @@ -1029,7 +1029,7 @@ También puedes agregar un comando `coverage` personalizado a tu archivo `packag }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +Eso ejecutará la herramienta de cobertura y deberías ver algo como esto en la terminal: ```sh $ graph test -c diff --git a/website/pages/es/firehose.mdx b/website/pages/es/firehose.mdx index feb830c2a907..40528fbd0acc 100644 --- a/website/pages/es/firehose.mdx +++ b/website/pages/es/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose proporciona un enfoque basado en archivos y en streaming-first para procesar los datos de la blockchain. +![Firehose Logo](/img/firehose-logo.png) -Se han creado integraciones de Firehose para Ethereum (y muchas cadenas EVM), NEAR, Solana, Cosmos y Arweave, y se está trabajando en otras más. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Las integraciones de Graph Node se han creado para múltiples cadenas, por lo que los subgrafos pueden transmitir datos desde un Firehose para impulsar la indexación escalable y de alto rendimiento. Firehose también potencia [substreams](/substreams), una nueva tecnología de transformación creada por los core devs de The Graph. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visita la [firehose documentation](https://firehose.streamingfast.io/) para obtener más información. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Empezando + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/es/glossary.mdx b/website/pages/es/glossary.mdx index c44cd8fe94c2..836a92c487c3 100644 --- a/website/pages/es/glossary.mdx +++ b/website/pages/es/glossary.mdx @@ -12,7 +12,7 @@ title: Glosario - **Subgrafo (Subgraph)**: Una API personalizada basada en datos de la blockchain que puede consultarse mediante [GraphQL](https://graphql.org/). Los developers pueden crear, deployar y publicar subgrafos en la red descentralizada de The Graph. Luego, los Indexadores pueden empezar a indexar los subgrafos para que los consumidores de subgrafos puedan consultarlos. -- **Servicio Alojado (Hosted Service)**: Un servicio de plataforma temporal para construir y consultar subgrafos mientras la red descentralizada de The Graph madura su coste de servicio, calidad y experiencia del developer. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexadores (Indexers)**: Participantes de la red que ejecutan nodos de indexación para indexar datos de la blockchain y servir consultas GraphQL. @@ -24,6 +24,8 @@ title: Glosario - **Stake propio del Indexador (Indexer's Self Stake)**: La cantidad de GRT que los Indexadores depositan en stake para participar en la red descentralizada. El mínimo es de 100.000 GRT, y no hay límite superior. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegadores (Delegators)**: Participantes de la red que poseen GRT y delegan su GRT en Indexadores. Esto permite a los Indexadores aumentar su stake en los subgrafos de la red. A cambio, los Delegadores reciben una parte de las recompensas de indexación que reciben los Indexadores por procesar los subgrafos. - **Impuesto a la Delegación (Delegation Tax)**: Una tasa del 0,5% que pagan los Delegadores cuando delegan GRT en los Indexadores. El GRT utilizado para pagar la tasa se quema. @@ -38,27 +40,21 @@ title: Glosario - **Manifiesto del subgrafo (Subgraph Manifest)**: Un archivo JSON que describe el esquema GraphQL del subgrafo, las fuentes de datos y otros metadatos. [Aquí](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) tienes un ejemplo. -- **Fondo de reembolso (Rebate Pool)**: Medida de seguridad económica que retiene las tarifas de consulta pagadas por los consumidores de subgrafos hasta que puedan ser reclamadas por los Indexadores como reembolsos de tarifas de consulta. El GRT residual se quema. - -- **Época (Epoch)**: Unidad de tiempo en red. Una época equivale actualmente a 6.646 bloques o aproximadamente 1 día. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Asignación (Allocation)**: Un Indexador puede asignar su stake total de GRT (incluyendo el stake de los Delegadores) hacia subgrafos que hayan sido publicados en la red descentralizada de The Graph. Las allocations existen en una de cuatro fases. 1. **Activa (Active)**: Una allocation se considera activa cuando se crea on-chain. Esto se llama abrir una allocation, e indica a la red que el Indexador está indexando activamente y sirviendo consultas para un subgrafo en particular. Las allocations activas acumulan recompensas de indexación proporcionales a la señal del subgrafo y a la cantidad de GRT asignada. - 2. **Cerrada (Closed)**: Un Indexador puede reclamar las recompensas de indexación acumuladas en un subgrafo determinado enviando una Prueba de Indexación (POI) reciente y válida. Esto se conoce como cerrar una allocation. Una allocation debe haber estado abierta durante un mínimo de una época antes de poder cerrarse. El periodo máximo de allocation es de 28 épocas. Si un Indexador deja una allocation abierta más de 28 épocas, se denomina allocation obsoleta. Cuando una asignación está en estado **Cerrado**, un Fisherman aún puede abrir una disputa para impugnar a un Indexador por servir datos falsos. - - 3. **Finalizada (Finalized)**: El período de disputa ha finalizado y los Indexadores pueden solicitar la devolución de las tarifas de consulta. - - 4. **Reclamada (Claimed)**: La fase final de una allocation, se han distribuido todas las recompensas elegibles y se han reclamado sus reembolsos de tarifas de consulta. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: Una potente aplicación para crear, deployar y publicar subgrafos. -- **Percadores (Fishermen)**: Los participantes en la red pueden disputar las respuestas de consulta y los POI de los Indexadores. A esto se le llama ser un Fishermen (pescador). Una disputa resuelta a favor del Fishermen tiene como resultado una penalización económica para el Indexador, junto con un premio para el Fishermen, incentivando así la integridad del trabajo de indexación y consulta realizado por los Indexadores de la red. En la actualidad, la penalización (slashing) está fijada en el 2,5% del stake propio del Indexador, el 50% del GRT del slashing se destina al Fishermen y el otro 50% se quema. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Árbitros (Arbitrators)**: Los Árbitros son participantes de la red establecidos a través de gobernanza. Su función es decidir el resultado de las disputas sobre indexación y consultas. Su objetivo es maximizar la utilidad y fiabilidad de The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Recorte (Slashing)**: Los Indexadores pueden ver recortado su GRT en stake por proporcionar una prueba de indexación (POI) incorrecta o por servir datos inexactos. El porcentaje de slashing es un parámetro de protocolo fijado actualmente en el 2,5% del stake propia del Indexador. El 50% del GRT recortado va al Fishermen que disputó los datos inexactos o el POI incorrecto. El otro 50% se quema. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Recompensas de Indexación (Indexing Rewards)**: Las recompensas que reciben los Indexadores por indexar subgrafos. Las recompensas de indexación se distribuyen en GRT. @@ -66,7 +62,7 @@ title: Glosario - **GRT**: El token de utilidad de trabajo de The Graph. GRT ofrece incentivos económicos a los participantes en la red por contribuir a ella. -- **POI o Prueba de Indexación (Proof of Indexing)**: Cuando un Indexador cierra su allocation y quiere reclamar sus recompensas de Indexador acumuladas en un subgrafo dado, debe proporcionar una Prueba de Indexación (POI) válida y reciente. Los Fishermen pueden disputar la POI proporcionada por un Indexador. Una disputa resuelta a favor del Fishermen resultará en la eliminación del Indexador. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node es el componente que indexa los subgrafos, y hace que los datos resultantes estén disponibles para su consulta a través de una API GraphQL. Como tal, es fundamental para el stack del Indexador, y el correcto funcionamiento de Graph Node es crucial para ejecutar un Indexador con éxito. @@ -80,10 +76,10 @@ title: Glosario - **Período de enfriamiento (Cooldown Period)**: El tiempo restante hasta que un Indexador que cambió sus parámetros de delegación pueda volver a hacerlo. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/es/graphcast.mdx b/website/pages/es/graphcast.mdx index 45d7d9f9e648..22db9de9b677 100644 --- a/website/pages/es/graphcast.mdx +++ b/website/pages/es/graphcast.mdx @@ -10,7 +10,7 @@ En la actualidad, el costo de transmitir información a otros participantes de l El Graphcast SDK (Kit de Desarrollo de Software) permite a los desarrolladores construir Radios, que son aplicaciones impulsadas por gossip que los Indexadores pueden utilizar con una finalidad específica. También queremos crear algunas Radios (o dar soporte a otros desarrolladores/equipos que deseen construir Radios) para los siguientes casos de uso: -- Cross-checking en tiempo real de la integridad de datos del subgrafo ([POI Radio] \(https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Llevar a cabo subastas y coordinar warp-syncing de datos de subgrafos, substreams y Firehose de otros Indexadores. - Autoinforme sobre análisis de consultas activas, incluidos volúmenes de consultas de subgrafos, volúmenes de tarifas, etc. - Generar informes propios sobre análisis del proceso de indexación, que incluyan período de indexación de subgrafos, costos de gas handler, indexación de errores encontrados, etc. diff --git a/website/pages/es/index.json b/website/pages/es/index.json index 54eafbca6f71..c85fd14a2cd8 100644 --- a/website/pages/es/index.json +++ b/website/pages/es/index.json @@ -23,8 +23,8 @@ "description": "Utiliza Studio para crear subgrafos" }, "migrateFromHostedService": { - "title": "Migración desde el Servicio Alojado", - "description": "Migración de subgrafos a The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Servicio Alojado", - "description": "Crea y explora subgrafos en el servicio alojado" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Redes Admitidas", - "description": "The Graph admite las siguientes redes en The Graph Network y en el Servicio Alojado.", - "graphNetworkAndHostedService": "The Graph Network y el Servicio Alojado", - "hostedService": "Servicio Alojado", - "betaWarning": "En beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/es/mips-faqs.mdx b/website/pages/es/mips-faqs.mdx index 1386685b83f9..e0a60ea776d5 100644 --- a/website/pages/es/mips-faqs.mdx +++ b/website/pages/es/mips-faqs.mdx @@ -4,122 +4,124 @@ title: Preguntas Frecuentes sobre MIPs ## Introducción -Es un momento emocionante para participar en el ecosistema de The Graph. Durante el [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal anunció la [desaparición del Servicio Alojado](https://thegraph.com/blog/sunsetting-hosted-service/), un momento hacia el que el ecosistema de The Graph ha estado trabajando durante muchos años. +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! -Para apoyar el cese del Servicio Alojado y la migración de todas sus actividades a la red descentralizada, The Graph Foundation ha anunciado el programa de Proveedores de Infraestructura de Migración (MIPs, por sus siglas en inglés) (https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. -El programa MIPs es un programa de incentivos para Indexadores que les brinda recursos para indexar cadenas más allá de Ethereum mainnet y ayuda al protocolo The Graph a expandir la red descentralizada en una capa de infraestructura multi-chain. +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). + +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. -### Recursos útiles +### Useful Resources -- [Herramientas para Indexadores de Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [Cómo convertirte en un Indexador eficaz en The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) - [Indexer Knowledge Hub](https://thegraph.academy/indexers/) - [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) - [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -### 1. ¿Es posible generar una prueba de indexación (POI) válida aunque haya fallado un subgrafo? +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -Sí, así es. +Yes, it is indeed. -Para contextualizar, el estatuto de arbitraje, [obtenga más información sobre el estatuto aquí] \(https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), especifica la metodología para generar un POI para un subgrafo fallido. +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -Un miembro de la comunidad, [SunTzu] \(https://github.com/suntzu93), ha creado un script para automatizar este proceso en cumplimiento de la metodología del estatuto de arbitraje. Consulte el repositorio [here] \(https://github.com/suntzu93/get_valid_poi_subgraph). +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). -### 2. ¿Qué cadena incentivará primero el programa MIP? +### 2. Which chain will the MIPs program incentivise first? -¡La primera cadena que se admitirá en la red descentralizada es Gnosis Chain! Anteriormente conocida como xDAI, Gnosis Chain es una cadena basada en EVM. Gnosis Chain fue seleccionada como la primera debido a su facilidad de uso para ejecutar nodos, preparación de Indexadores, alineación con The Graph y adopción dentro de web3. +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. -### 3. ¿Cómo se añadirán nuevas cadenas al programa MIP? +### 3. How will new chains be added to the MIPs program? -Se anunciarán nuevas cadenas durante el programa MIPs, según la preparación de los Indexadores, la demanda y el sentimiento de la comunidad. En primer lugar, las cadenas serán admitidas en la red de prueba y, posteriormente, se aprobará un GIP para admitir esa cadena en la mainnet. Los Indexadores que participen en el programa MIPs elegirán en qué cadenas están interesados en admitir y ganarán recompensas por cada cadena, además de ganar tarifas de consulta y recompensas de indexación en la red por servir subgrafos. Los participantes de MIPs serán evaluados según su desempeño, capacidad para satisfacer las necesidades de la red y el apoyo de la comunidad. +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. -### 4. ¿Cómo sabremos cuándo la red está preparada para una nueva cadena? +### 4. How will we know when the network is ready for a new chain? -The Graph Foundation supervisará las métricas de rendimiento de la calidad de servicio, el rendimiento de la red y los canales de la comunidad para evaluar mejor el grado de preparación. La prioridad es garantizar que la red satisfaga las necesidades de rendimiento para que las dapps multi-chain puedan migrar sus subgrafos. +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. -### 5. ¿Cómo se reparten las recompensas por cadena? +### 5. How are rewards divided per chain? -Dado que las cadenas varían en sus requisitos de sincronización de nodos, y difieren en volumen de consultas y adopción, las recompensas por cadena se decidirán al final del ciclo de esa cadena para garantizar que se recogen todos los comentarios y aprendizajes. Sin embargo, en todo momento, los Indexadores también podrán ganar tarifas de consulta y recompensas por indexación una vez que la cadena sea compatible con la red. +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. -### 6. ¿Necesitamos indexar todas las cadenas en el programa MIPs o podemos elegir solo una cadena e indexarla? +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? -Puedes indexar cualquier cadena que desees. El objetivo del programa MIPs es dotar a los Indexadores de las herramientas y conocimientos necesarios para indexar las cadenas que deseen y apoyar los ecosistemas web3 que les interesen. Sin embargo, para cada cadena, hay fases desde la testnet hasta la mainnet. Asegúrate de completar todas las fases para las cadenas que está indexando. Consulta [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) para saber más sobre las fases. +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. -### 7. ¿Cuándo se distribuirán las recompensas? +### 7. When will rewards be distributed? -Las recompensas de los MIPs se distribuirán por cadena una vez que se cumplan las métricas de rendimiento y los subgrafos migrados sean compatibles con esos Indexadores. Presta atención a la información sobre las recompensas totales por cadena a mitad del ciclo de esa cadena. +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. -### 8. ¿Cómo funciona la puntuación? +### 8. How does scoring work? -Los Indexadores competirán por recompensas basadas en la puntuación obtenida a lo largo del programa en la tabla de clasificación. La puntuación del programa se basará en: +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: -**Cobertura del subgrafo** +**Subgraph Coverage** -- ¿Proporciona un soporte máximo de subgrafos por cadena? +- Are you providing maximal support for subgraphs per chain? -- Durante los MIPs, se espera que los grandes Indexadores realicen stake en más del 50% de los subgrafos por cadena que soportan. +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. -**Calidad del servicio** +**Quality Of Service** -- ¿Sirve el Indexador la cadena con una buena calidad de servicio (latencia, datos actuales, tiempo de actividad, etc.)? +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? -- ¿Apoya el Indexador a los desarrolladores de dapps siendo reactivo a sus necesidades? +- Is the Indexer supporting dapp developers being reactive to their needs? -¿Está el Indexador asignando eficientemente, contribuyendo a la salud general de la red? +Is Indexer allocating efficiently, contributing to the overall health of the network? -**Apoyo comunitario** +**Community Support** -- ¿Está colaborando el Indexador con otros Indexadores para ayudarles a prepararse para ser multi-chain? +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? -- ¿Está el Indexador proporcionando información a los core devs a través del programa o compartiendo información con los Indexadores en el Foro? +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? -### 9. ¿Cómo se asignará el rol en Discord? +### 9. How will the Discord role be assigned? -Los moderadores asignarán los roles en los próximos días. +Moderators will assign the roles in the next few days. -### 10. ¿Está bien iniciar el programa en una red de prueba y luego pasar a la mainnet? ¿Podrán identificar mi nodo y tenerlo en cuenta a la hora de distribuir las recompensas? +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? -Sí, de hecho se espera que lo hagas. Hay varias fases en Göerli y una en la mainnet. +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. -### 11. ¿En qué momento espera que los participantes añadan un deploy en la mainnet? +### 11. At what point do you expect participants to add a mainnet deployment? -Durante la fase 3 será necesario disponer de un indexador de la mainnet. Próximamente se publicará más información al respecto en esta página de notion (https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -### 12. ¿Las recompensas estarán sujetas a vesting? +### 12. Will rewards be subject to vesting? -El porcentaje que se distribuirá al final del programa estará sujeto a vesting. Para más información, consulta el Acuerdo de Indexación. +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. -### 13. Para los equipos con más de un miembro, ¿todos los miembros del equipo recibirán un rol de Discord de MIPs? +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -Si +Yes -### 14. ¿Es posible utilizar los tokens bloqueados del programa graph curator para participar en la red de pruebas de los MIPs? +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -Si +Yes -### 15. Durante el programa MIPs, ¿habrá un plazo para disputar los POI no válidos? +### 15. During the MIPs program, will there be a period to dispute invalid POI? -Por decidirse. Vuelve periódicamente a esta página para obtener más detalles al respecto o, si tu solicitud es urgente, envía un correo electrónico a info@thegraph.foundation +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation -### 17. ¿Podemos combinar dos contratos de vesting? +### 17. Can we combine two vesting contracts? -No. Las opciones son: puedes delegar uno en el otro o ejecutar dos indexadores distintos. +No. The options are: you can delegate one to the other one or run two separate indexers. -### 18. ¿Preguntas sobre KYC? +### 18. KYC Questions? -Envía un correo electrónico a info@thegraph.foundation +Please email info@thegraph.foundation -### 19. No estoy preparado para indexar Gnosis Chain, ¿puedo saltar y empezar a indexar desde otra cadena cuando esté listo? +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -Si +Yes -### 20. ¿Existen regiones recomendadas para instalar los servidores? +### 20. Are there recommended regions to run the servers? -No damos recomendaciones sobre regiones. A la hora de elegir ubicaciones, quizá quieras pensar dónde están los principales mercados de criptomonedas. +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. -### 21. ¿Qué es el "coste del gas de handler"? +### 21. What is “handler gas cost”? -Es la medida determinista del coste de ejecución de un handler. Contrariamente a lo que podría sugerir su nombre, no está relacionado con el coste de gas en blockchains. +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/es/network/benefits.mdx b/website/pages/es/network/benefits.mdx index d202d42c13ee..c0e6d9fedf40 100644 --- a/website/pages/es/network/benefits.mdx +++ b/website/pages/es/network/benefits.mdx @@ -14,7 +14,7 @@ Aqui hay un analisis: - Costo mensual un 60-98% menor - Costos de configuración de infraestructura de $0 - Tiempo de actividad superior -- Acceso a 438 Indexadores (y contando) +- Access to hundreds of independent Indexers around the world - Soporte técnico 24/7 por parte de la comunidad global ## Los beneficios explicados @@ -90,7 +90,7 @@ Tarifas de instalación cero. Comienza de inmediato sin costos generales ni de i ## Confiabilidad & Resistencia -La red descentralizada de The Graph brinda a los usuarios acceso a una redundancia geográfica que no existe cuando se hospeda un `graph-node`. Las consultas se atienden de manera confiable gracias a un tiempo de actividad superior al 99,9 %, logrado por 168 Indexadores (y contando) que aseguran la red a nivel mundial. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. En conclusión: The Graph Network es menos costoso, más fácil de usar y produce resultados superiores en comparación con ejecutar un `graph-node` localmente. diff --git a/website/pages/es/network/explorer.mdx b/website/pages/es/network/explorer.mdx index 9d1dc638ad0b..d19881edb1b8 100644 --- a/website/pages/es/network/explorer.mdx +++ b/website/pages/es/network/explorer.mdx @@ -74,7 +74,7 @@ Si deseas obtener más información sobre el rol de Curador, puedes hacerlo visi Los Delegadores juegan un rol esencial en la seguridad y descentralización que conforman la red de The Graph. Participan en la red delegando (es decir, "stakeado") tokens GRT a uno o varios Indexadores. Sin Delegadores, es menos probable que los Indexadores obtengan recompensas y tarifas significativas. Por lo tanto, los Indexadores buscan atraer Delegadores ofreciéndoles una parte de las recompensas de indexación y las tarifas de consulta que ganan. -Los Delegadores, a su vez, seleccionan a los Indexadores en función de una serie de diferentes parámetros, como el rendimiento que tenía ese indexador, las tasas de recompensa por indexación y los recortes compartidos de las tarifas de consulta. ¡La reputación dentro de la comunidad también puede influir en esto! Se recomienda conectarse con los Indexadores seleccionados a través del [Discord de The Graph](https://discord.gg/graphprotocol) o el [¡Foro de The Graph](https://forum.thegraph.com/)! +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![Imagen de Explorer 7](/img/Delegation-Overview.png) diff --git a/website/pages/es/network/indexing.mdx b/website/pages/es/network/indexing.mdx index 42630e4f8e5a..b3ed8579fb0b 100644 --- a/website/pages/es/network/indexing.mdx +++ b/website/pages/es/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexación --- -Los Indexadores son operadores de nodos en The Graph Network que realizan stake de Graph Tokens (GRT) para proporcionar servicios de indexación y procesamiento de consultas. Por sus servicios, los Indexadores obtienen tarifas por consulta y recompensas de indexación. También obtienen ingresos de un Rebate Pool (Fondo de Reembolso) que se reparte entre todos los contribuyentes de la red de forma proporcional a su trabajo, siguiendo la Función Rebate de Cobb-Douglas. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. Los GRT que se depositan en stake en el protocolo está sujeto a un periodo de desbloqueo y puede incurrir en slashing (ser reducidos) si los Indexadores son maliciosos y sirven datos incorrectos a las aplicaciones o si indexan incorrectamente. Los Indexadores también obtienen recompensas por stake delegados de los Delegadores, para contribuir a la red. @@ -26,7 +26,7 @@ El stake mínimo para un Indexador es actualmente de 100.000 GRT. Las recompensas de indexación proceden de la inflación del protocolo, que se fija en un 3% anual de emisión. Se distribuyen entre los subgrafos en función de la proporción de todas las señales de curación en cada uno de ellos y, a luego, se distribuyen proporcionalmente a los Indexadores en función de su allocated stake en ese subgrafo. **Una allocation debe cerrarse con una prueba válida de indexación (POI) que cumpla las normas establecidas por el acta de arbitraje para poder optar a las recompensas.** -La comunidad ha creado numerosas herramientas para calcular recompensas; encontrarás una recopilación de ellas organizada en la colección [Community Guides](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). También puedes encontrar una lista actualizada de herramientas en los canales #Delegators e #Indexers del [Discord Server](https://discord.gg/graphprotocol). Aquí enlazamos un [recomended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrado con el stack de software del Indexador. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. ### ¿Qué es una prueba de indexación (POI)? @@ -81,17 +81,17 @@ Las disputas se podran visualizar en la interfaz correspondiente al perfil del I ### ¿Qué son los reembolsos de tarifas de consulta y cuándo se distribuyen? -El gateway recoge las tarifas de consulta cada vez que se cierra una allocation y se acumulan en el fondo de reembolso de tarifas de consulta del subgrafo. El fondo de reembolso está diseñado para animar a los Indexadores a asignar stake en proporción aproximada a la cantidad de tarifas de consulta que ganan para la red. La parte de las tarifas de consulta del fondo que se asigna a un Indexador concreto se calcula utilizando la Función de Producción Cobb-Douglas; la cantidad distribuida por Indexador es una función de sus contribuciones al fondo y de su allocation de stake en el subgrafo. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Una vez que se ha cerrado una allocation y ha pasado el periodo de disputa, el Indexador puede reclamar los reembolsos. Una vez reclamados, los reembolsos de las tarifas de consulta se distribuyen al Indexador y a sus Delegadores en función del recorte de la tarifas de consulta y de las proporciones del pool de delegaciones. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### ¿Qué es el recorte de la tarifa de consulta y el recorte de la recompensa de indexación? Los valores de los `QueryFeeCut` e `IndexingRewardCut` son parámetros de delegación que el Indexador debe establecer junto con cooldownBlocks para controlar la distribución de GRT entre el Indexador y sus Delegadores. Hecha un vistazo de los últimos pasos de [Staking in the Protocol](/network/indexing#stake-in-the-protocol) para obtener instrucciones sobre la configuración de los parámetros de delegación. -- **QueryFeeCut** - es el % de reembolso de tarifas de consulta acumuladas en un subgrafo que serán distribuidas al Indexador. Si esto se establece en 95%, el Indexador recibirá 95% de las tarifas de consulta del fondo de reembolso cuando se reclame una allocation, y el 5% restante irá a los Delegadores. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **IndexingRewardCut** - es el % de las recompensas de indexación acumuladas en un subgrafo que serán distribuidas al Indexador. Si esto se establece en 95%, el Indexador recibirá el 95% de las recompensas de indexación del fondo cuando se cierre una allocation y los Delegadores se repartirán el 5% restante. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### ¿Cómo saben los Indexadores qué subgrafos indexar? @@ -375,7 +375,7 @@ Para participar con éxito en la red se requiere una supervisión e interacción #### Comenzar -El agente Indexador y el servicio indexador deben ubicarse junto con su infraestructura Graph Node. Hay muchas formas de configurar entornos de ejecución virtual para tus componentes de Indexador; aquí explicaremos cómo ejecutarlos en baremetal utilizando paquetes o fuente NPM, o mediante kubernetes y docker en Google Cloud Kubernetes Engine. Si estos ejemplos de configuración no se traducen bien en tu infraestructura, es probable que haya una guía de la comunidad de referencia, ¡ven a saludar en [Discord](https://discord.gg/graphprotocol)! Recuerda [hacer stake en el protocolo](/indexing#stake-in-the-protocol) antes de iniciar tus componentes de Indexador! +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! #### Paquetes de NPM @@ -662,21 +662,21 @@ ActionType { Ejemplo de uso de la fuente: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Ten en cuenta que los tipos de acción admitidos para la gestión de la allocation tienen diferentes requisitos de entrada: @@ -798,8 +798,4 @@ Después de ser creada por un Indexador, una allocation saludable pasa por cuatr - **Cerrado**: Un Indexador puede cerrar una allocation una vez que haya pasado 1 ciclo ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) o su agente Indexador cerrará automáticamente la allocation después de **maxAllocationEpochs** (actualmente 28 días). Cuando una allocation se cierra con una prueba válida de indexación (POI), sus recompensas de indexación se distribuyen al Indexador y sus Delegadores (consulta "¿Cómo se distribuyen las recompensas?" a continuación para obtener más información). -- **Finalizada**: Una vez que se ha cerrado una allocation, hay un período de disputa después del cual la allocation se considera **finalizada** y los reembolsos de tarifas de consulta están disponibles para ser reclamados (claim()). El agente Indexador supervisa la red para detectar allocation **finalizadas** y las reclama si están por encima de un umbral configurable (y opcional), **—-allocation-claim-threshold**. - -- **Reclamado**: El estado final de una asignación; ha seguido su curso como una asignación activa, se han distribuido todas las recompensas elegibles y se han reclamado los reembolsos de las tarifas de consulta. - Se recomienda a los Indexadores que utilicen la funcionalidad de sincronización fuera de la cadena para sincronizar el deploy de subgrafos con el cabezal de la cadena antes de crear la allocation on-chain. Esta función es especialmente útil para subgrafos que pueden tardar más de 28 épocas en sincronizarse o que tienen algunas posibilidades de fallar de forma indeterminada. diff --git a/website/pages/es/new-chain-integration.mdx b/website/pages/es/new-chain-integration.mdx index c5934efa6f87..2719d8758bea 100644 --- a/website/pages/es/new-chain-integration.mdx +++ b/website/pages/es/new-chain-integration.mdx @@ -1,75 +1,75 @@ --- -title: Integrating New Networks +title: Integración de nuevas redes --- -Graph Node can currently index data from the following chain types: +El Graph Node actualmente puede indexar datos de los siguientes tipos de cadena: -- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) -- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) -- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) -- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) +- Ethereum, a través de EVM JSON-RPC y [Ethereum Firehose] (https://github.com/streamingfast/firehose-ethereum) +- NEAR, a través de [NEAR Firehose] \(https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, a través de [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, a través de [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) -If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. +Si estás interesado en alguna de esas cadenas, la integración es una cuestión de configuración y prueba de Graph Node. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +Si estás interesado en un tipo de cadena diferente, se debe crear una nueva con Graph Node. Nuestra recomendación es desarrollar un nuevo Firehose para la cadena en cuestión y luego la integración de ese Firehose con Graph Node. Más información a continuación. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). +Si la cadena de bloques es equivalente a EVM y el cliente/nodo expone la EVM JSON-RPC API estándar, Graph Node debería poder indexar la nueva cadena. Para obtener más información, consulte [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. -## Difference between EVM JSON-RPC & Firehose +## Diferencia entre EVM JSON-RPC y Firehose -While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. +Si bien los dos son adecuados para subgrafos, siempre se requiere un Firehose para los desarrolladores que quieran compilar con [Substreams](substreams/), como crear [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). Además, Firehose permite velocidades de indexación mejoradas en comparación con JSON-RPC. -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +Los nuevos integradores de cadenas EVM también pueden considerar el enfoque basado en Firehose, dados los beneficios de los substreams y sus enormes capacidades de indexación en paralelo. El soporte de ambos permite a los desarrolladores elegir entre crear substreams o subgrafos para la nueva cadena. -> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **NOTA**: Una integración basada en Firehose para cadenas EVM aún requerirá que los indexadores ejecuten el nodo RPC de archivo de la cadena para indexar correctamente los subgrafos. Esto se debe a la incapacidad de Firehose para proporcionar un estado de contrato inteligente al que normalmente se puede acceder mediante el método RPC `eth_call`. (Vale la pena recordar que eth_calls [no es una buena práctica para desarrolladores] \(https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) --- -## Testing an EVM JSON-RPC +## Probando un EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: +Para que Graph Node pueda ingerir datos de una cadena EVM, el nodo RPC debe exponer los siguientes métodos EVM JSON RPC: - `eth_getLogs` -- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_call` \_(para bloques históricos, con EIP-1898 - requiere nodo de archivo): - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ +- `eth_getTransactionReceipt`, en una solicitud por lotes JSON-RPC +- _`trace_filter`_ _(opcionalmente necesario para que Graph Node admita call handlers)_ -### Graph Node Configuration +### Configuración del Graph Node -**Start by preparing your local environment** +**Empiece por preparar su entorno local** 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ +2. Modifique [esta línea](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) para incluir el nuevo nombre de la red y la URL compatible con EVM JSON RPC + > No cambie el nombre de la var env. Debe seguir siendo "ethereum" incluso si el nombre de la red es diferente. +3. Ejecute un nodo IPFS o use el utilizado por The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Prueba la integración implementando localmente un subgrafo** -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +1. Instale [graph-cli] \(https://github.com/graphprotocol/graph-cli) +2. Crea un subgrafo simple de prueba. Algunas opciones están a continuación: + 1. El contrato inteligente y el subgrafo [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) preempaquetados son un buen comienzo + 2. Arranca un subgrafo local desde cualquier contrato inteligente existente o entorno de desarrollo de solidity [usando Hardhat con un plugin Graph] \(https://github.com/graphprotocol/hardhat-graph) +3. Adapta el `subgraph.yaml` resultante cambiando [`dataSources.network`](http://dataSources.network) al mismo nombre que se pasó previamente a Graph Node. +4. Crea tu subgrafo en Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publica tu subgrafo en Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. +Graph Node debería sincronizar el subgrafo implementado si no hay errores. Dale tiempo para que se sincronice y luego envíe algunas queries GraphQL al punto final de la API impreso en los registros. --- -## Integrating a new Firehose-enabled chain +## Integración de una nueva cadena habilitada para Firehose -Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: +También es posible integrar una nueva cadena utilizando el enfoque Firehose. Actualmente, esta es la mejor opción para cadenas que no son EVM y un requisito para el soporte de substreams. La documentación adicional se centra en cómo funciona Firehose, agregando soporte de Firehose para una nueva cadena e integrándola con Graph Node. Documentos recomendados para integradores: -1. [General docs on Firehose](firehose/) +1. [Documentos generales sobre Firehose](firehose/) 2. [Adding Firehose support for a new chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) -3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) +3. [Integración de Graph Node con una nueva cadena a través de Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/es/operating-graph-node.mdx b/website/pages/es/operating-graph-node.mdx index ae0092552801..759184d08cd7 100644 --- a/website/pages/es/operating-graph-node.mdx +++ b/website/pages/es/operating-graph-node.mdx @@ -22,7 +22,7 @@ Para indexar una red, Graph Node necesita acceso a un cliente de red a través d Mientras que algunos subgrafos pueden requerir solo un nodo completo, otros pueden tener features de indexación que requieren funcionalidades adicionales de RPC. Específicamente, los subgrafos que realicen `eth_calls` como parte de la indexación requerirán un nodo de archivo que admita el [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), y los subgrafos con` callHandlers` o `blockHandlers` con un filtro de `call` requerirán soporte de `trace_filter` ([consulta la documentación del módulo de trazas aquí](https://openethereum.github.io/JSONRPC-trace-module)). -**Próximamente: Network Firehoses**: un Firehose es un servicio gRPC que proporciona un flujo de bloques ordenado, pero consciente de bifurcaciones, desarrollado por los core devs de The Graph para mejorar el rendimiento de la indexación a escala. Actualmente no es un requisito para los Indexadores, pero se les anima a que se familiaricen con la tecnología antes de que la red sea totalmente compatible. Más información sobre Firehose [aquí](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Nodos IPFS diff --git a/website/pages/es/publishing/publishing-a-subgraph.mdx b/website/pages/es/publishing/publishing-a-subgraph.mdx index 06528c5a27e3..dca0908133b1 100644 --- a/website/pages/es/publishing/publishing-a-subgraph.mdx +++ b/website/pages/es/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Una vez que tu subgrafo se haya [deployado en Subgraph Studio](/deploying/deploy La publicación de un subgrafo en la red descentralizada hace que esté disponible para que los [Curadores](/network/curating) comiencen a seleccionarlo y los [Indexadores](/network/indexing) comienzen a indexarlo. -Para ver un tutorial sobre cómo publicar un subgrafo en la red descentralizada, consulta [este video](https://youtu.be/HfDgC2oNnwo?t=580). + Puedes encontrar la lista de las redes admitidas [Aquí](/developing/supported-networks). diff --git a/website/pages/es/querying/querying-the-hosted-service.mdx b/website/pages/es/querying/querying-the-hosted-service.mdx index 6035e983bb6e..2ecdab47bfd3 100644 --- a/website/pages/es/querying/querying-the-hosted-service.mdx +++ b/website/pages/es/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Consultar el Servicio Alojado --- -Con el subgrafo deployado, visita el [Servicio alojado](https://thegraph.com/hosted-service/) para abrir una interfaz [GraphiQL](https://github.com/graphql/graphiql) donde puedes explorar la API GraphQL deployada para el subgrafo emitiendo consultas y viendo el esquema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. A continuación se proporciona un ejemplo, pero consulta la [Query API](/querying/graphql-api) para obtener una referencia completa sobre cómo consultar las entidades del subgrafo. @@ -19,9 +19,9 @@ Estas listas de consultas muestran todos los contadores que nuestro mapping ha c } ``` -## Utilización del Servicio Alojado +## Using the hosted service -The Graph Explorer y su playground GraphQL es una forma útil de explorar y consultar los subgrafos deployados en el Servicio Alojado. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. A continuación se detallan algunas de las principales características: diff --git a/website/pages/es/querying/querying-with-python.mdx b/website/pages/es/querying/querying-with-python.mdx new file mode 100644 index 000000000000..24026110bba0 --- /dev/null +++ b/website/pages/es/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Empezando + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/es/quick-start.mdx b/website/pages/es/quick-start.mdx new file mode 100644 index 000000000000..e4f18f3bfaab --- /dev/null +++ b/website/pages/es/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Comienzo Rapido +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +Esta guía está escrita asumiendo que tú tienes: + +- Una dirección de un smart contract en la red de tu preferencia +- GRT para crear tu subgráfo +- Una wallet crypto + +## 1. Crea un subgrafo en el Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Una vez conectado, puedes comenzar presionando "Crea un subgrafo". Selecciona la red de tu elección y presiona continuar. + +## 2. Instala the graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +En tu dispositivo, ejecuta alguno de los siguientes comandos: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Inicia tu subgrafo + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Cuando inicies tu subgrafo, la herramienta CLI te preguntará por la siguiente información: + +- Protocol: elige el protocolo desde el cual tu subgrafo indexará datos +- Subgraph slug: crea un nombre para tu subgrafo. El slug de tu subgrafo es un identificador para el mismo. +- Directorio para crear el subgrafo: elige el directorio local de tu elección +- Red Ethereum (opcional): Es posible que debas especificar desde qué red compatible con EVM tu subgrafo indexará datos +- Dirección del contrato: Localiza la dirección del contrato inteligente del que deseas consultar los datos +- ABI: Si el ABI no se completa automáticamente, deberás ingresar los datos manualmente en formato JSON +- Start Block: se sugiere que ingreses el bloque de inicio para ahorrar tiempo mientras tu subgrafo indexa los datos de la blockchain. Puedes ubicar el bloque de inicio encontrando el bloque en el que se deployó tu contrato. +- Nombre del contrato: introduce el nombre de tu contrato +- Indexar eventos del contrato como entidades: se sugiere que lo establezcas en "verdadero" ya que automáticamente agregará mapeos a tu subgrafo para cada evento emitido +- Añade otro contrato(opcional): puedes añadir otro contrato + +Inicializa tu subgrafo a partir de un contrato existente ejecutando el siguiente comando: + +```sh +graph init --studio +``` + +Ve la siguiente captura para un ejemplo de que debes de esperar cuando inicializes tu subgrafo: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Escribe tu subgrafo + +Los comandos anteriores crean un subgrafo de andamio que puedes utilizar como punto de partida para construir tu subgrafo. Al realizar cambios en el subgrafo, trabajarás principalmente con tres archivos: + +- Manifest (subgraph.yaml) - El manifiesto define qué fuentes de datos indexarán tus subgrafos. +- Schema (schema.graphql) - El esquema GraphQL define los datos que deseas recuperar del subgrafo. +- AssemblyScript Mappings (mapping.ts) - Este es el código que traduce los datos de tus fuentes de datos a las entidades definidas en el esquema. + +Para obtener más información sobre cómo escribir tu subgrafo, consulta [Crear un subgrafo](/developing/creating-a-subgraph). + +## 5. Despliega en Subgraph Studio + +Una vez escrito tu subgrafo, ejecuta los siguientes comandos: + +```sh +$ graph codegen +$ graph build +``` + +- Autentica y deploya tu subgrafo. La clave para deployar se puede encontrar en la página de Subgraph en Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Prueba tu subgrafo + +Puedes probar tu subgrafo haciendo una consulta de muestra en la sección de "playground". + +Los registros te indicarán si hay algún error con tu subgrafo. Los registros de un subgrafo operativo se verán así: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publica tu subgrafo en la red descentralizada de the graph + +Una vez que tu subgrafo se haya deployado en Subgraph Studio, lo hayas probado y estés listo para ponerlo en producción, lo podrás publicar en la red descentralizada. + +En el subgraph studio, da click en tu subgrafo. En la pagina del subgrafo, podrás darle click en el boton de publicar que se encuentra en la parte superior derecha. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Antes de poder hacer consultas a tu subgrafo, los indexadores deben comenzar a responder a las consultas sobre él. Para simplificar este proceso, puedes curar tu propio subgrafo utilizando GRT. + +Al momento de escribir este artículo, se recomienda que cures tu propio subgrafo con 10,000 GRT para asegurarte de que esté indexado y disponible para consultas lo antes posible. + +Para ahorrar en costos de gas, puedes curar tu subgrafo en la misma transacción en la que lo publicas seleccionando este botón al publicar tu subgrafo en la red descentralizada de The Graph: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Consulta tu subgrafo + +Ahora puedes hacer consultas a tu subgrafo enviando consultas GraphQL a la URL de consulta de tu subgrafo, que puedes encontrar haciendo clic en el botón de consulta. + +Puedes consultar desde tu dapp si no tienes tu clave de API a través de la URL de consulta temporal, libre y de tarifa limitada, que puede utilizarse para el desarrollo y la puesta en marcha. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/es/substreams.mdx b/website/pages/es/substreams.mdx index 4c7d37fdf6a2..cc5953268eec 100644 --- a/website/pages/es/substreams.mdx +++ b/website/pages/es/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams es una nueva tecnología desarrollada por los Core Devs del protocolo The Graph, creada para permitir un consumo y procesamiento extremadamente rápidos de datos indexados de la blockchain. Substreams se encuentra actualmente en fase beta abierta, disponible para pruebas y desarrollo en múltiples blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visite la [documentación sobre substreams](https://substreams.streamingfast.io/) para obtener más información y empezar a crear substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Empezando + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/es/sunrise.mdx b/website/pages/es/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/es/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/es/tokenomics.mdx b/website/pages/es/tokenomics.mdx index a5eba97b634b..196fde9f6115 100644 --- a/website/pages/es/tokenomics.mdx +++ b/website/pages/es/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph es un protocolo descentralizado que permite un acceso sencillo a los d Es similar a un modelo B2B2C, pero está impulsado por una red descentralizada de participantes. Los participantes de la red trabajan juntos para proporcionar datos a los usuarios finales a cambio de recompensas en GRT. GRT es el token de utilidad que coordina a los proveedores y consumidores de datos. GRT actúa como una utilidad para coordinar a los proveedores y consumidores de datos dentro de la red, e incentiva a los participantes del protocolo a organizar los datos de manera efectiva. -Al utilizar The Graph, los usuarios pueden acceder fácilmente a los datos de la blockchain, pagando solo por la información específica que necesitan. The Graph es utilizado por muchas [aplicaciones populares](https://thegraph.com/explorer) en el ecosistema web3 en la actualidad. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexa los datos de la blockchain de forma similar a como Google indexa la web. De hecho, es posible que ya estés utilizando The Graph sin darte cuenta. Si has visto la interfaz de una aplicación que obtiene sus datos de un subgrafo, ¡has consultado datos de un subgrafo! @@ -75,7 +75,7 @@ Los indexadores son la columna vertebral de The Graph. Funcionan con hardware y Los Indexadores pueden obtener recompensas de GRT de dos maneras: -1. Tarifas de consulta: GRT pagada por Desarrolladores o usuarios por consultas de datos de subgrafos. Las tarifas de consulta se depositan en un rebate pool (fondo de reembolso) y se distribuyen a los Indexadores. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Recompensas de indexación: la emisión anual del 3% se distribuye a los Indexadores en función del número de subgrafos que indexan. Estas recompensas incentivan a los Indexadores a indexar subgrafos, ocasionalmente antes de que comiencen las tarifas de consulta, para acumular y enviar Pruebas de Indexación (POIs) que verifiquen que han indexado datos con precisión. diff --git a/website/pages/fr/about.mdx b/website/pages/fr/about.mdx index 0e4e1b0acb3b..c545f3e026ce 100644 --- a/website/pages/fr/about.mdx +++ b/website/pages/fr/about.mdx @@ -1,47 +1,47 @@ --- -title: À propos de The Graph +title: À propos du Graph --- -Cette page explique ce qu'est « The Graph » et comment faire ses premiers pas. +Cette page expliquera ce qu'est The Graph et comment vous pouvez commencer. ## What is The Graph? The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. -Les projets avec des smart contracts complexes comme [Uniswap](https://uniswap.org/) et des projets NFT comme [Bored Ape](https://boredapeyachtclub.com/) Yacht Club stockent des données sur la blockchain Ethereum. La façon dont ces données sont stockées rend leur lecture difficile au delà de quelques informations simples. +Les projets avec des contrats intelligents complexes comme [Uniswap](https://uniswap.org/) et des projets NFT comme [Bored Ape](https://boredapeyachtclub.com/) Yacht Club stockent des données sur la blockchain Ethereum. La façon dont ces données sont stockées rend leur lecture difficile au-delà de quelques informations simples. -Dans le cas de Bored Ape Yacht Club, nous pouvons effectuer des opérations de lecture de base sur [le contrat](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code). Comme par exemple, obtenir l'adresse qui possède un certain «singe». Il est également possible d'obtenir l'identificateur de ressource uniforme (URI) du contenu d'un «singe» en fonction de son ID ou de l'approvisionnement total. En effet, ces opérations de lecture sont programmées directement dans le contrat intelligent. Par contre, les requêtes et les opérations plus complexes du monde réel comme l'agrégation, la recherche, les relations et le filtrage non trivial ne sont pas possibles. Par conséquent, si nous voulions rechercher «les singes» qui appartiennent à une certaine adresse, et par la suite les filtrer par l'une de ses caractéristiques, nous ne pourrions pas obtenir cette information en interagissant directement avec le contrat lui-même. +Dans le cas du Bored Ape Yacht Club, nous pouvons effectuer des opérations de lecture de base sur [le contrat](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code), comme obtenir le propriétaire d'un certain singe, obtenir l'URI du contenu d'un singe en fonction de son identifiant ou de l'offre totale. , car ces opérations de lecture sont programmées directement dans le contrat intelligent, mais des requêtes et des opérations plus avancées du monde réel telles que l'agrégation, la recherche, les relations et le filtrage non trivial ne sont pas possibles. Par exemple, si nous souhaitions rechercher les singes appartenant à une certaine adresse et filtrer selon l'une de ses caractéristiques, nous ne serions pas en mesure d'obtenir cette information en interagissant directement avec le contrat lui-même. -Pour obtenir ces données, il faudrait donc traiter chaque événement de [`transfert`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) depuis la création du contrat intelligent. Autrement dit, lire toutes les métadonnées d'IPFS à l'aide de l'ID du jeton et du hachage IPFS. Une fois cette action faite, nous pourrions les agréger. Même pour ce type de questions relativement simples, il faudrait par conséquent **des heures, voire des jours**, à une application décentralisée (dapp) fonctionnant dans un navigateur pour obtenir une réponse. +Pour obtenir ces données, vous devez traiter chaque événement de [`transfert`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) jamais émis, lire les métadonnées d'IPFS à l'aide de l'ID de jeton et du hachage IPFS, puis les agréger. Même pour ce type de questions relativement simples, il faudrait **heures, voire jours** à une application décentralisée (dapp) exécutée dans un navigateur pour obtenir une réponse. You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. -**L'indexation des données de la blockchain est extrêmement complexe** +**L’indexation des données blockchain est vraiment très difficile.** -Les propriétés de la blockchain, telles que la finalité, les réorganisations de la chaîne ou les blocs non verrouillés, compliquent encore ce processus. En effet, ils rendent la récupération des données non seulement longue et inefficace mais également difficile à conceptualiser. +Les propriétés de la blockchain telles que la finalité, les réorganisations de chaîne ou les blocs non verrouillés compliquent davantage ce processus et rendent non seulement la récupération de résultats de requête corrects à partir des données de la blockchain, mais aussi conceptuellement difficile. -The Graph résout ce problème grâce à un protocole décentralisé qui indexe et permet une interrogation performante et efficace des données de la blockchain. Ces API (les subgraphes indexés) peuvent ensuite être interrogées avec une API GraphQL standard. Aujourd'hui, il existe un service hébergé ainsi qu'un protocole décentralisé avec les mêmes capacités. Les deux sont donc soutenus par l'implémentation open source « [Graph Node](https://github.com/graphprotocol/graph-node) ». +The Graph résout ce problème avec un protocole décentralisé qui indexe et permet l'interrogation performante et efficace des données blockchain. Ces API (les « sous-graphes » indexés) peuvent ensuite être interrogées avec une API GraphQL standard. Il existe aujourd'hui un service hébergé ainsi qu'un protocole décentralisé avec les mêmes capacités. Les deux s'appuient sur l'implémentation open source de [Graph Node](https://github.com/graphprotocol/graph-node). -## Comment fonctionne The Graph +## Fonctionnement du Graph -The Graph apprend quoi et de quelle manière indexer les données sur Ethereum en fonction des instructions fournies dans le manifeste de chaque subgraphe. La description du subgraphe quant à elle définit les contrats intelligents d'intérêt pour celui-ci et les événements auxquels il doit prêter attention. Il va également définir la façon de faire correspondre les informations des événements aux ressources que The Graph stockera dans sa base de données. +The Graph apprend quoi et comment indexer les données Ethereum en fonction des descriptions de subgraphs, connues sous le nom de manifeste de subgraph. La description du subgraph définit les contrats intelligents d'intérêt pour un subgraph, les événements de ces contrats auxquels il faut prêter attention et comment mapper les données d'événement aux données que The Graph stockera dans sa base de données. -Une fois que vous avez écrit un `manifeste de subgraphe`, vous utilisez le Graph CLI pour stocker la définition dans IPFS et vous indiquez par la même occasion à l'indexeur de commencer à indexer les données pour ce subgraphe. +Une fois que vous avez écrit un `manifeste de subgraph`, vous utilisez le Graph CLI pour stocker la définition dans IPFS et vous indiquez par la même occasion à l'indexeur de commencer à indexer les données pour ce subgraph. -Ce diagramme donne plus de détails sur le flux de données, généré par les transactions Ethereum, une fois qu'un manifeste a été déployé : +Ce diagramme donne plus de détails sur le flux de données une fois qu'un manifeste de subgraph a été déployé, traitant des transactions Ethereum : ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) -Description des étapes du flux : +La description des étapes du flux : 1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. Le contrat intelligent va alors émettre un ou plusieurs événements lors du traitement de la transaction. -3. Parallèlement, Le nœud de The Graph scanne continuellement Ethereum à la recherche de nouveaux blocs et de nouvelles données intéressantes pour votre subgraphe. -4. The Graph Node trouve alors les événements Ethereum d'intérêt pour votre subgraphe dans ces blocs et vient exécuter les corrélations correspondantes que vous avez fournies. Le gestionnaire de corrélation se définit comme un module WASM qui crée ou met à jour les entités de données que le nœud de The Graph stocke en réponse aux événements Ethereum. +2. Le contrat intelligent va alors produire un ou plusieurs événements lors du traitement de la transaction. +3. Parallèlement, Le nœud de The Graph scanne continuellement Ethereum à la recherche de nouveaux blocs et de nouvelles données intéressantes pour votre subgraph. +4. The Graph Node trouve alors les événements Ethereum d'intérêt pour votre subgraph dans ces blocs et vient exécuter les corrélations correspondantes que vous avez fournies. Le gestionnaire de corrélation se définit comme un module WASM qui crée ou met à jour les entités de données que le nœud de The Graph stocke en réponse aux événements Ethereum. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. -## Étapes suivantes +## Les Étapes suivantes -Dans les sections suivantes, nous expliquerons plus en détail comment définir les subgraphes, comment les déployer et comment interroger les données à partir des index que Le nœud de The Graph construit. +Dans les sections suivantes, nous expliquerons plus en détail comment définir les subgraphs, comment les déployer et comment interroger les données à partir des index que Le nœud de The Graph construit. -Avant de commencer à écrire votre propre subgraphe, vous voudrez peut-être jeter un coup d'œil à « Graph Explorer »The Graph et explorer certains des subgraphes qui ont déjà été déployés. La page de chaque subgraphe contient un terrain de jeu qui vous permet d'interroger les données de ce subgraphe avec GraphQL. +Avant de commencer à écrire votre propre subgraph, vous voudrez peut-être jeter un coup d'œil à « Graph Explorer »The Graph et explorer certains des subgraphs qui ont déjà été déployés. La page de chaque subgraph contient un terrain de jeu qui vous permet d'interroger les données de ce subgraph avec GraphQL. diff --git a/website/pages/fr/arbitrum/arbitrum-faq.mdx b/website/pages/fr/arbitrum/arbitrum-faq.mdx index 849d08c92b93..2ce099660ee3 100644 --- a/website/pages/fr/arbitrum/arbitrum-faq.mdx +++ b/website/pages/fr/arbitrum/arbitrum-faq.mdx @@ -1,78 +1,78 @@ --- -title: Arbitrum FAQ +title: FAQ d'Arbitrum --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Cliquez [ici] \(#billing-on-arbitrum-faqs) si vous souhaitez passer à la FAQ sur la facturation Arbitrum. -## Why is The Graph implementing an L2 Solution? +## Pourquoi The Graph met-il en place une solution L2 ? -By scaling The Graph on L2, network participants can expect: +En faisant passer The Graph à l'échelle L2, les participants au réseau peuvent espérer : -- Upwards of 26x savings on gas fees +- Jusqu'à 26 fois plus d'économies sur les frais de gaz -- Faster transaction speed +- Vitesse de transaction plus élevée -- Security inherited from Ethereum +- La sécurité héritée d'Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +La mise à l'échelle des contrats intelligents du protocole sur L2 permet aux participants au réseau d'interagir plus fréquemment pour un coût réduit en termes de frais de gaz. Par exemple, les indexeurs peuvent ouvrir et fermer des allocations pour indexer un plus grand nombre de subgraphs avec une plus grande fréquence, les développeurs peuvent déployer et mettre à jour des subgraphs plus facilement, les délégués peuvent déléguer des GRT avec une fréquence accrue, et les curateurs peuvent ajouter ou supprimer des signaux à un plus grand nombre de subgraphs - des actions auparavant considérées comme trop coûteuses pour être effectuées fréquemment en raison des frais de gaz. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +La communauté Graph a décidé d'avancer avec Arbitrum l'année dernière après le résultat de la discussion [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). -## What do I need to do to use The Graph on L2? +## Que dois-je faire pour utiliser The Graph en L2 ? -Users bridge their GRT and ETH  using one of the following methods: +Les utilisateurs bridgent leurs GRT et ETH en utilisant l'une des méthodes suivantes : -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Le pont The Graph sur Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) -- [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) +- [HopExchange ](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +Pour tirer parti de l'utilisation de The Graph sur L2, utilisez ce sélecteur déroulant pour passer d'une chaîne à l'autre. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +[Sélecteur déroulant pour activer Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## En tant que développeur de subgraphs, consommateur de données, indexeur, curateur ou délégateur, que dois-je faire maintenant ? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Aucune action immédiate n'est requise, cependant, les participants au réseau sont encouragés à commencer à migrer vers Arbitrum pour profiter des avantages de L2. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Les équipes de développeurs de base travaillent à la création d'outils de transfert L2 qui faciliteront considérablement le transfert de la délégation, de la curation et des subgraphes vers Arbitrum. Les participants au réseau peuvent s'attendre à ce que les outils de transfert L2 soient disponibles d'ici l'été 2023. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +À partir du 10 avril 2023, 5 % de toutes les récompenses d'indexation sont frappées sur Arbitrum. Au fur et à mesure que la participation au réseau augmentera et que le Conseil l'approuvera, les récompenses d'indexation passeront progressivement de l'Ethereum à l'Arbitrum, pour finalement passer entièrement à l'Arbitrum. -## If I would like to participate in the network on L2, what should I do? +## Que dois-je faire si je souhaite participer au réseau L2 ? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Veuillez aider à [tester le réseau](https://testnet.thegraph.com/explorer) sur L2 et signaler vos commentaires sur votre expérience dans [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Existe-t-il des risques associés à la mise à l’échelle du réseau vers L2 ? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Tous les contrats intelligents ont été minutieusement [audités](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Tout a été testé minutieusement et un plan d’urgence est en place pour assurer une transition sûre et fluide. Les détails peuvent être trouvés [here]\(https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and- considérations de sécurité-20). -## Will existing subgraphs on Ethereum continue to work? +## Les subgraphs existants sur Ethereum continueront-ils à fonctionner ? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Oui, les contrats The Graph Network fonctionneront en parallèle sur Ethereum et Arbitrum jusqu'à leur passage complet à Arbitrum à une date ultérieure. -## Will GRT have a new smart contract deployed on Arbitrum? +## GRT disposera-t-il d'un nouveau contrat intelligent déployé sur Arbitrum ? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Oui, GRT dispose d'un [contrat intelligent sur Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) supplémentaire. Cependant, le réseau principal Ethereum [contrat GRT](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) restera opérationnel. -## Billing on Arbitrum FAQs +## FAQ sur la facturation sur Arbitrum -## What do I need to do about the GRT in my billing balance? +## Que dois-je faire à propos du GRT dans mon solde de facturation ? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Rien! Votre GRT a été migré de manière sécurisée vers Arbitrum et est utilisé pour payer les requêtes au moment où vous lisez ceci. -## How do I know my funds have migrated securely to Arbitrum? +## Comment savoir si mes fonds ont migré en toute sécurité vers Arbitrum ? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Tous les soldes de facturation GRT ont déjà été migrés avec succès vers Arbitrum. Vous pouvez afficher le contrat de facturation sur Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Comment savoir si le pont Arbitrum est sécurisé ? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Le pont a été [fortement audité](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) pour garantir la sécurité de tous les utilisateurs. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Que dois-je faire si j'ajoute du nouveau GRT depuis mon portefeuille du mainnet Ethereum ? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +L'ajout de GRT à votre solde de facturation Arbitrum peut être effectué en un seul clic dans [Subgraph Studio](https://thegraph.com/studio/). Vous pourrez facilement relier votre GRT à Arbitrum et remplir vos clés API en une seule transaction. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Visitez la [page de facturation](https://thegraph.com/docs/en/billing/) pour des instructions plus détaillées sur l'ajout, le retrait ou l'acquisition de GRT. diff --git a/website/pages/fr/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/fr/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..9203f9d967dd 100644 --- a/website/pages/fr/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/fr/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: FAQ sur les outils de transfert L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## Général -## What are L2 Transfer Tools? +### Que sont les outils de transfert L2 ? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph a rendu la participation au réseau 26 fois moins chère pour les contributeurs en déployant le protocole sur Arbitrum One. Les outils de transfert L2 ont été créés par les développeurs du noyau pour faciliter le passage à L2. -## Can I use the same wallet I use on Ethereum mainnet? +Pour chaque participant au réseau, un ensemble d'outils de transfert L2 est disponible pour rendre l'expérience transparente lors du passage à L2, en évitant les périodes de dégel ou d'avoir à retirer manuellement et à ponter les TRG. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Ces outils vous obligeront à suivre un ensemble d'étapes spécifiques en fonction de votre rôle au sein de The Graph et de ce que vous transférez vers L2. -## Subgraph Transfer +### Puis-je utiliser le même portefeuille que celui que j'utilise sur le réseau principal Ethereum ? -## How do I transfer my subgraph? +Si vous utilisez un portefeuille [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), vous pouvez utiliser la même adresse. Si votre portefeuille du mainnet Ethereum est un contrat (par exemple un multisig), vous devez alors spécifier une [adresse du portefeuille Arbitrum]\(/arbitrum/arbitrum-faq/#what-do-i-deed-to-do-to-use-the- graph-on-l2) où votre virement sera envoyé. Veuillez vérifier attentivement l'adresse car tout transfert vers une adresse incorrecte peut entraîner une perte permanente. Si vous souhaitez utiliser un multisig sur L2, veillez à déployer un contrat multisig sur Arbitrum One. -To transfer your subgraph, you will need to complete the following steps: +Les portefeuilles sur les blockchains EVM comme Ethereum et Arbitrum sont une paire de clés (publiques et privées) que vous créez sans avoir besoin d'interagir avec la blockchain. Ainsi, tout portefeuille créé pour Ethereum fonctionnera également sur Arbitrum sans action supplémentaire. -1. Initiate the transfer on Ethereum mainnet +Une exception concerne les portefeuilles de smart contracts comme les multisigs : ces smart contrats sont déployés séparément sur chaque chaîne, et obtiennent leur adresse une fois déployés. Si un multisig a été déployé sur Ethereum, il n'existera pas avec la même adresse sur Arbitrum. Un nouveau multisig doit d'abord être créé sur Arbitrum, et aura une adresse différente. -2. Wait 20 minutes for confirmation +### Que se passe-t-il si je ne termine pas mon transfert dans les 7 jours? -3. Confirm subgraph transfer on Arbitrum\* +Les outils de transfert L2 utilisent le mécanisme natif d’Arbitrum pour envoyer des messages de L1 à L2. Ce mécanisme s’appelle un « billet modifiable » et est utilisé par tous les ponts de jetons natifs, y compris le pont GRT Arbitrum. Vous pouvez en savoir plus sur les billets retryables dans le [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +Lorsque vous transférez vos actifs (subgraph, enjeu, délégation ou curation) vers L2, un message est envoyé par le pont GRT Arbitrum qui crée un ticket modifiable en L2. L’outil de transfert inclut une certaine valeur ETH dans la transaction, qui est utilisée pour 1) payer la création du ticket et 2) payer pour le gaz utile à l'exécution du ticket en L2. Cependant, comme le prix du gaz peut varier durant le temps nécessaire à l'exécution du ticket en L2, il est possible que cette tentative d’exécution automatique échoue. Lorsque cela se produit, le pont Arbitrum maintient le billet remboursable en vie pendant 7 jours, et tout le monde peut réessayer de « racheter » le billet (ce qui nécessite un portefeuille avec des ETH liés à Arbitrum). -5. Update Query URL (recommended) +C'est ce que nous appelons l'étape « Confirmer » dans tous les outils de transfert : elle s'exécute automatiquement dans la plupart des cas et l'exécution automatique réussit le plus souvent. Il est tout de même important de vérifier que le transfert se soit bien déroulé. Si cela échoue et qu'aucune autre tentative n'est confirmé dans les 7 jours, le pont Arbitrum rejettera le ticket et vos actifs (subgraph, participation, délégation ou curation) ne pourront pas être récupérés. Les développeurs principaux de Graph ont mis en place un système de surveillance pour détecter ces situations et essayer d'échanger les billets avant qu'il ne soit trop tard, mais il en reste de votre responsabilité de vous assurer que votre transfert est terminé à temps. Si vous rencontrez des difficultés pour confirmer votre transaction, veuillez nous contacter en utilisant [ce formulaire](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) et les développeurs seront là pour vous aider. -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### J'ai commencé le transfert de ma délégation/enjeu/curation et je ne suis pas sûr qu'il soit parvenu jusqu'à L2, comment puis-je confirmer qu'il a été transféré correctement ? -## Where should I initiate my transfer from? +Si vous ne voyez pas de bannière sur votre profil vous demandant de terminer le transfert, il est probable que la transaction soit arrivée en toute sécurité en L2 et qu'aucune autre action ne soit nécessaire. En cas de doute, vous pouvez vérifier si Explorer affiche votre délégation, votre participation ou votre curation sur Arbitrum One. -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Si vous disposez du hachage de transaction L1 (que vous pouvez trouver en consultant les transactions récentes dans votre portefeuille), vous pouvez également confirmer si le « ticket réessayable » qui a transporté le message vers L2 a été utilisé ici : https://retryable-dashboard.arbitrum.io/ - si l'échange automatique échoue, vous pouvez également y connecter votre portefeuille et l'échanger. Soyez assuré que les développeurs principaux surveillent également les messages bloqués et tenteront de les récupérer avant leur expiration. -## How long do I need to wait until my subgraph is transferred +## Subgraph transfert -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +### Comment transférer mon subgraph ? -## Will my subgraph still be discoverable after I transfer it to L2? + -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Pour transférer votre subgraph, suivez les étapes qui suivent : -## Does my subgraph need to be published to transfer it? +1. Initier le transfert sur le mainnet Ethereum -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +2. Attendre 20 minutes pour une confirmation -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +3. Vérifier le transfert de subgraph sur Arbitrum\* -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +4. Terminer la publication du sous-graphe sur Arbitrum -## After I transfer, do I also need to re-publish on Arbitrum? +5. Mettre à jour l’URL de requête (recommandé) -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +\*Notez que vous devez confirmer le transfert dans un délai de 7 jours, faute de quoi votre subgraph pourrait être perdu. Dans la plupart des cas, cette étape s'exécutera automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. En cas de problème au cours de ce processus, des ressources seront disponibles pour vous aider : contactez le service d'assistance à l'adresse support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). -## Will there be a down-time to my endpoint while re-publishing? +### D’où dois-je initier mon transfert ? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Vous pouvez effectuer votre transfert à partir de la [Subgraph Studio] \(https://thegraph.com/studio/), [Explorer,] \(https://thegraph.com/explorer) ou de n’importe quelle page de détails de subgraph. Cliquez sur le bouton "Transférer le subgraph" dans la page de détails du subgraph pour démarrer le transfert. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Combien de temps dois-je attendre avant que mon subgraph soit transféré ? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Le temps de transfert prend environ 20 minutes. Le pont Arbitrum fonctionne en arrière-plan pour terminer automatiquement le transfert du pont. Dans certains cas, les coûts du gaz peuvent augmenter et vous devrez confirmer à nouveau la transaction. -## Will my subgraph's curation move with my subgraph? +### Mon subgraph sera-t-il toujours repérable après le transfert à L2? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Votre subgraph ne sera détectable que sur le réseau sur lequel il est publié. Par exemple, si votre subgraph est sur Arbitrum One, vous ne pouvez le trouver que dans Explorer sur Arbitrum One et vous ne pourrez pas le trouver sur Ethereum. Assurez-vous que vous avez Arbitrum One sélectionné dans le commutateur de réseau en haut de la page pour vous assurer que vous êtes sur le bon réseau.  Après le transfert, le subgraph L1 apparaîtra comme obsolète. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### Mon subgraph doit-il être publié afin d'être transférer ? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +Pour profiter de l’outil de transfert de subgraph, votre subgraph doit déjà être publié sur Ethereum mainnet et doit avoir un signal de curation appartenant au portefeuille qui possède le subgraph. Si votre subgraph n’est pas publié, il est recommandé de publier simplement directement sur Arbitrum One - les frais de gaz associés seront considérablement moins élevés. Si vous souhaitez transférer un subgraph publié mais que le compte propriétaire n’a pas sélectionné de signal, vous pouvez signaler un petit montant (par ex. 1 GRT) à partir de ce compte; assurez-vous de choisir le signal de “migration automatique”. -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### Que se passe-t-il pour la version Ethereum mainnet de mon subgraph après que j'ai transféré sur Arbitrum ? -## Why do I need bridged ETH to complete my transfer? +Après avoir transféré votre subgraph vers Arbitrum, la version du réseau principal Ethereum deviendra obsolète. Nous vous recommandons de mettre à jour votre URL de requête dans les 48 heures. Cependant, il existe une période de grâce qui maintient le fonctionnement de votre URL mainnet afin que tout support dapp tiers puisse être mis à jour. -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### Après le transfert, dois-je également republier sur Arbitrum ? -## Curation Signal +Après la fenêtre de transfert de 20 minutes, vous devrez confirmer le transfert avec une transaction dans l'interface utilisateur pour terminer le transfert, mais l'outil de transfert vous guidera tout au long de cette étape. Votre point de terminaison L1 continuera à être pris en charge pendant la fenêtre de transfert et pendant une période de grâce après. Il est encouragé que vous mettiez à jour votre point de terminaison lorsque cela vous convient. -## How do I transfer my curation? +### Mon point de terminaison subira-t-il un temps d'arrêt lors de la republication ? -To transfer your curation, you will need to complete the following steps: +Il est peu probable, mais possible, de subir un bref temps d'arrêt selon les indexeurs qui prennent en charge le subgraph sur L1 et s'ils continuent à l'indexer jusqu'à ce que le subgraph soit entièrement pris en charge sur L2. -1. Initiate signal transfer on Ethereum mainnet +### La publication et la gestion des versions sont-elles les mêmes sur L2 que sur le mainnet Ethereum Ethereum ? -2. Specify an L2 Curator address\* +Oui. Sélectionnez Arbitrum One comme réseau publié lors de la publication dans le Subgraph Studio. Dans le Studio, le dernier point de terminaison sera disponible et vous dirigera vers la dernière version mise à jour du subgraph. -3. Wait 20 minutes for confirmation +### La curation de mon subgraph sera-t-elle déplacée avec mon subgraph? -\*If necessary - i.e. you are using a contract address. +Si vous avez choisi le signal de migration automatique, 100% de votre propre curation se déplacera avec votre subgraph vers Arbitrum One. Tout le signal de curation du subgraph sera converti en GTR au moment du transfert, et le GRT correspondant à votre signal de curation sera utilisé pour frapper le signal sur le subgraph L2. -## How will I know if the subgraph I curated has moved to L2? +D’autres conservateurs peuvent choisir de retirer leur fraction de GRT ou de la transférer à L2 pour créer un signal neuf sur le même subgraph. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Puis-je déplacer mon subgraph vers le mainnet Ethereum après le transfert? -## What if I do not wish to move my curation to L2? +Une fois transféré, votre version mainnet Ethereum de ce subgraph deviendra obsolète. Si vous souhaitez revenir au mainnet, vous devrez redéployer et publier à nouveau sur le mainnet. Cependant, le transfert vers le mainnet Ethereumt est fortement déconseillé car les récompenses d’indexation seront distribuées entièrement sur Arbitrum One. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Pourquoi ai-je besoin d’un pont ETH pour finaliser mon transfert ? -## How do I know my curation successfully transferred? +Les frais de gaz sur Arbitrum One sont payés en utilisant l'ETH bridgé (c'est-à-dire l'ETH qui a été bridgé sur Arbitrum One). Cependant, les frais de gaz sont nettement inférieurs à ceux de mainnet Ethereum. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +## Délégation -## Can I transfer my curation on more than one subgraph at a time? +### Comment puis-je transférer ma délégation? -There is no bulk transfer option at this time. + -## Indexer Stake +Pour transférer votre délégation, complétez les étapes suivantes : -## How do I transfer my stake to Arbitrum? +1. Lancer un transfert de délégation sur le mainnet Ethereum +2. Attender 20 minutes pour la confirmation +3. Confirmer le transfert de délégation sur Arbitrum -To transfer your stake, you will need to complete the following steps: +\*\*\*\*Vous devez confirmer la transaction pour compléter le transfert de la délégation sur Arbitrum. Cette étape doit être réalisée dans les 7 jours, sinon la délégation pourrait être perdue. Dans la plupart des cas, cette étape se déroule automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. Si vous rencontrez des problèmes au cours de ce processus, il y aura des ressources pour vous aider : contactez le support à support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). -1. Initiate stake transfer on Ethereum mainnet +### Qu'arrive-t-il à mes récompenses si j'initie un transfert avec une allocation ouverte sur le mainnet Ethereum ? -2. Wait 20 minutes for confirmation +Si l'indexeur auquel vous déléguez est encore en activité sur L1, lorsque vous transférez sur Arbitrum, vous perdrez toutes les récompenses de délégation provenant d'allocations ouvertes sur le réseau principal Ethereum. Cela signifie que vous perdrez les récompenses de la dernière période de 28 jours au maximum. Si vous effectuez le transfert juste après que l'indexeur ait fermé les allocations, vous pouvez vous assurer que ce montant est le plus bas possible. Si vous disposez d'un canal de communication avec votre (vos) indexeur(s), envisagez de discuter avec lui (eux) pour trouver le meilleur moment pour effectuer votre transfert. -3. Confirm stake transfer on Arbitrum +### Que se passe-t-il si l’indexeur auquel je délègue actuellement mes pouvoirs n’est pas sur Arbitrum One ? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +L'outil de transfert L2 ne sera activé que si l'indexeur à qui vous avez délégué a transféré sa propre participation à Arbitrum. -## Will all of my stake transfer? +### Les délégués ont-ils la possibilité de déléguer à un autre indexeur ? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Si vous souhaitez déléguer à un autre indexeur, vous pouvez transférer vers le même indexeur sur Arbitrum, puis annuler la délégation et attendre la période de décongélation. Après cela, vous pouvez sélectionner un autre indexeur actif auquel déléguer. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Que faire si je ne trouve pas l'indexeur auquel je délègue sur L2 ? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +L'outil de transfert L2 détectera automatiquement l'indexeur auquel vous avez précédemment délégué. -## How much time do I have to confirm my stake transfer to Arbitrum? +### Pourrai-je mélanger et trier ou "répartir" ma délégation sur un ou plusieurs indexeurs au lieu de l'indexeur précédent ? -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +L'outil de transfert L2 déplacera toujours votre délégation vers le même indexeur que celui auquel vous l'avez déléguée précédemment. Une fois que vous avez transféré vers L2, vous pouvez annuler la délégation, attendre la période de dégel et décider si vous souhaitez diviser votre délégation. -## What if I have open allocations? +### Suis-je soumis à la période de suspension ou puis-je effectuer un retrait immédiatement après avoir utilisé l'outil de transfert de délégation L2 ? -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +L’outil de transfert permet de passer immédiatement en L2. Si vous souhaitez annuler la délégation, vous devrez attendre la période de décongélation. Cependant, si un indexeur a transféré la totalité de sa participation vers L2, vous pouvez immédiatement retirer sur le réseau principal Ethereum. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Mes récompenses peuvent-elles être affectées négativement si je ne transfère pas ma délégation ? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +Il est prévu que toutes les participations au réseau soient transférées vers Arbitrum One à l'avenir. -## How long will it take to transfer my stake? +### Combien de temps faut-il pour effectuer le transfert de ma délégation au L2? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +Une confirmation de 20 minutes est requise pour le transfert de la délégation. Veuillez noter qu'après la période de 20 minutes, vous devez revenir et terminer l'étape 3 du processus de transfert dans les 7 jours. Si vous ne le faites pas, votre délégation risque d'être perdue. Notez que dans la plupart des cas, l'outil de transfert effectuera cette étape automatiquement pour vous. En cas d'échec d'une tentative automatique, vous devrez la compléter manuellement. Si des problèmes surviennent au cours de ce processus, ne vous inquiétez pas, nous serons là pour vous aider : contactez-nous à support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). -## Do I have to index on Arbitrum before I transfer my stake? +### Puis-je transférer ma délégation si j'utilise un contrat d'acquisition GRT/un portefeuille token lock ? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +Oui! Le processus est un peu différent car les contrats d'acquisition ne peuvent pas transmettre l'ETH nécessaire au paiement du gaz L2, vous devez donc le déposer au préalable. Si votre contrat d'acquisition n'est pas totalement acquis, vous devrez également initialiser au préalable un contrat d'acquisition contrepartie sur L2 et ne pourrez transférer la délégation que sur ce contrat d'acquisition L2. L'interface utilisateur d'Explorer peut vous guider tout au long de ce processus lorsque vous êtes connecté à Explorer à l'aide du portefeuille de verrouillage d'acquisition. -## Can Delegators move their delegation before I move my indexing stake? +### Mon contrat d'acquisition Arbitrum permet-il de libérer du GRT comme sur le réseau principal ? -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +Non, le contrat d'acquisition créé sur Arbitrum ne permettra pas de libérer de GRT avant la fin du délai d'acquisition, c'est-à-dire jusqu'à ce que votre contrat soit entièrement acquis. L’objectif est d’éviter une double dépense, sinon il serait possible de débloquer les mêmes montants sur les deux niveaux. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Y a-t-il une taxe de délégation ? -## Delegation +Non. Les jetons reçus sur L2 sont délégués à l'indexeur spécifié au nom du délégateur spécifié sans facturer de taxe de délégation. -## How do I transfer my delegation? +### Mes récompenses non réalisées seront-elles transférées lorsque je transférerai ma délégation ? -To transfer your delegation, you will need to complete the following steps: +Oui! Les seules récompenses qui ne peuvent pas être transférées sont celles des allocations ouvertes, car elles n'existeront pas tant que l'indexeur n'aura pas clôturé les allocations (généralement tous les 28 jours). Si vous déléguez depuis un certain temps, cela ne représente probablement qu'une petite fraction des récompenses. -1. Initiate delegation transfer on Ethereum mainnet +Au niveau du smart contract, les récompenses non réalisées font déjà partie de votre solde de délégation, elles seront donc transférées lorsque vous transférerez votre délégation en L2. ​ -2. Wait 20 minutes for confirmation +### Le déplacement des délégations en L2 est-il obligatoire ? Y a-t-il un délai ? -3. Confirm delegation transfer on Arbitrum +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +Finalement, si le Conseil continue d'approuver les augmentations des récompenses d'indexation en L2, toutes les récompenses seront distribuées sur L2 et il n'y aura aucune récompense d'indexation pour les indexeurs et les délégués en L1. ​ -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +### Je ne vois pas de bouton pour transférer ma délégation. Pourquoi donc? -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -## Do Delegators have the option to delegate to another Indexer? +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Mon Indexeur est également sur Arbitrum, mais je ne vois pas de bouton pour transférer la délégation dans mon profil. Pourquoi donc? -## What if I can't find the Indexer I'm delegating to on L2? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Puis-je transférer ma délégation à L2 si j'ai entamé la procédure de dé-délégation et que je ne l'ai pas encore retirée ? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +Les jetons en cours de dé-délégation sont « verrouillés » et ne peuvent donc pas être transférés vers L2. -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +## Signal de curation -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Comment puis-je transférer ma curation ? -## Can my rewards be negatively impacted if I do not transfer my delegation? +Pour transférer votre curation, vous devrez compléter les étapes suivantes : -It is anticipated that all network participation will move to Arbitrum One in the future. +1. Initier le transfert de signal sur le mainnet Ethereum -## How long does it take to complete the transfer of my delegation to L2? +2. Spécifiez une adresse de curateur L2\* -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +3. Attendez 20 minutes pour confirmer -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +\*Si nécessaire, c'est-à-dire que vous utilisez une adresse contractuelle. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Comment saurai-je si le subgraph que j'ai organisé a été déplacé vers L2 ? -## Is there any delegation tax? +Lors de la visualisation de la page de détails du subgraph, une bannière vous informera que ce subgraph a été transféré. Vous pouvez suivre l'invite pour transférer votre curation. Vous pouvez également trouver ces informations sur la page de détails du subgraph de tout subgraph déplacé. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Que se passe-t-il si je ne souhaite pas déplacer ma curation en L2 ? -## Vesting Contract Transfer +Lorsqu’un subgraph est déprécié, vous avez la possibilité de retirer votre signal. De même, si un subgraph est passé à L2, vous pouvez choisir de retirer votre signal dans Ethereum mainnet ou d’envoyer le signal à L2. -## How do I transfer my vesting contract? +### Comment puis-je savoir si ma curation a été transférée avec succès? -To transfer your vesting, you will need to complete the following steps: +Les détails du signal seront accessibles via Explorer environ 20 minutes après le lancement de l'outil de transfert L2. -1. Initiate the vesting transfer on Ethereum mainnet +### Puis-je transférer ma curation sur plus d’un subgraph à la fois? -2. Wait 20 minutes for confirmation +Il n’existe actuellement aucune option de transfert groupé. -3. Confirm vesting transfer on Arbitrum +## Enjeu de l'indexeur -## How do I transfer my vesting contract if I am only partially vested? +### Comment transférer ma participation vers Arbitrum ? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +> Avis de non-responsabilité : si vous supprimez actuellement une partie de votre GRT sur votre indexeur, vous ne pourrez pas utiliser les outils de transfert L2. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Pour transférer votre participation, vous devrez suivre les étapes suivantes : -4. Withdraw any remaining ETH from the transfer tool contract +1. Initier un transfert de participation sur Ethereum mainnet -## How do I transfer my vesting contract if I am fully vested? +2. Attendre 20 minutes pour confirmer -For those that are fully vested, the process is similar: +3. Confirmer le transfert de participation sur Arbitrum -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +\*Notez que vous devez confirmer le transfert dans les 7 jours, sinon votre mise pourrait être perdue. Dans la plupart des cas, cette étape s'exécutera automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. Si vous rencontrez des problèmes au cours de ce processus, des ressources seront disponibles pour vous aider : contactez l'assistance à support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). -2. Set your L2 address with a call to the transfer tool contract +### Est-ce que tous mes enjeux seront transférés? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +Vous pouvez choisir la part de votre mise à transférer. Si vous choisissez de transférer la totalité de votre participation en une seule fois, vous devrez d'abord clôturer toutes les allocations ouvertes. -4. Withdraw any remaining ETH from the transfer tool contract +Si vous prévoyez de transférer une partie de votre participation sur plusieurs transactions, vous devez toujours indiquer la même adresse de bénéficiaire. -## Can I transfer my vesting contract to Arbitrum? +Notez : Vous devez répondre aux exigences minimales de mise sur L2 la première fois que vous utilisez l’outil de transfert. Les indexeurs doivent envoyer le minimum de cent mille GRT (lors de l’appel de cette fonction la première fois). Si vous laissez une partie de la mise sur L1, elle doit également être supérieure à cent mille GRT minimum et être suffisante (avec vos délégations) pour couvrir vos allocations ouvertes. -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +### De combien de temps ai-je pour confirmer le transfert de ma participation à Arbitrum ? -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +\*\*\* Vous devez confirmer votre transaction pour finaliser le transfert de participation sur Arbitrum. Cette étape doit être complétée dans les 7 jours sinon la mise pourrait être perdue. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +### Que se passe-t-il si j'ai des allocations ouvertes ? -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Si vous n’envoyez pas la totalité de votre mise, l’outil de transfert L2 validera qu’au moins cent mille GRT restent dans le mainnet Ethereum et que votre participation et votre délégation restantes sont suffisantes pour couvrir toute allocation ouverte. Vous devrez peut-être fermer les allocations ouvertes si votre solde de GRT ne couvre pas les allocations minimales + ouvertes. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### À l’aide des outils de transfert, faut-il attendre 28 jours pour se détacher du mainnet Ethereum avant de transférer? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +Non, vous pouvez transférer votre mise vers L2 immédiatement, vous n'avez pas besoin de vous désengager et d'attendre avant d'utiliser l'outil de transfert. L'attente de 28 jours ne s'applique que si vous souhaitez retirer la mise sur votre portefeuille, sur le réseau principal Ethereum ou L2. -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Combien de temps faudra-t-il pour transférer ma participation ? -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +Il faudra environ 20 minutes à l'outil de transfert L2 pour achever le transfert de votre participation. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Dois-je indexer sur Arbitrum avant de transférer ma mise ? -## Can I specify a different beneficiary for my vesting contract on L2? +Vous pouvez effectivement transférer votre mise d’abord avant de mettre en place l’indexation, mais vous ne serez pas en mesure de réclamer des récompenses sur L2 jusqu’à ce que vous allouez à des sous-graphes sur L2, les indexer, et présenter des points d’intérêt. -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### Les délégués peuvent-ils déplacer leur délégation avant que je ne déplace ma participation à l'indexation ? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Non, pour que les délégataires puissent transférer leur GRT délégué à Arbitrum, l'indexeur auquel ils délèguent doit être actif sur L2. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### Puis-je transférer ma participation si j'utilise un contrat de vesting GRT / un token lock wallet ? -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Oui! Le processus est un peu différent, car les contrats d'acquisition ne peuvent pas transmettre l'ETH nécessaire pour payer le gaz L2, vous devez donc le déposer au préalable. Si votre contrat d'acquisition n'est pas entièrement acquis, vous devrez également initialiser au préalable un contrat d'acquisition contrepartie sur L2 et ne pourrez transférer la participation que sur ce contrat d'acquisition L2. L'interface utilisateur d'Explorer peut vous guider tout au long de ce processus lorsque vous êtes connecté à Explorer à l'aide du portefeuille de verrouillage d'acquisition. -This allows you to transfer your stake or delegation to any L2 address. +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +Non. Si une fraction de votre mise est en train de dégeler, vous devez attendre les 28 jours et la retirer avant de pouvoir transférer votre mise. Les jetons mis en jeu sont "verrouillés" et empêchent tout transfert ou mise vers L2. -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +## Transfert de contrat de vesting -2. Revoke protocol access to the vesting contract (needed for the next step) +### Comment transférer mon contrat de vesting ? -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Pour transférer votre vesting, vous devez suivre les étapes suivantes : -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +1. Initier le transfert de vesting sur Ethereum mainnet -5. Wait 20 minutes for confirmation +2. Attendre 20 minutes pour confirmer -6. Confirm the balance transfer on L2 +3. Confirmer le transfert de vesting dans Arbitrum -\*If necessary - i.e. you are using a contract address. +### Comment puis-je transférer mon contrat de vesting si je ne suis que partiellement investi? -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + -## Can I move my vesting contract back to L1? +1. Déposez quelques ETH dans le contrat de l'outil de transfert (l'interface utilisateur peut vous aider à estimer un montant raisonnable) -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +2. Envoyez une partie de la GRT verrouillée au moyen du contrat de l’outil de transfert au L2 pour initialiser le verrou d’acquisition L2, ce qui établira également l’adresse du bénéficiaire L2. -## Why do I need to move my vesting contract to begin with? +3. Envoyer leur enjeu/délégation à L2 via les fonctions de l’outil de transfert "verrouillé" du contrat L1Staking. -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +4. Retirer tout ETH restant du contrat d’outil de transfert -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Comment puis-je transférer mon contrat d’acquisition si je suis entièrement investi? -This is not a possibility. You can move funds back to L1 and withdraw them there. + -## What if I don't want to move my vesting contract to L2? +Pour ceux qui ont acquis la totalité de leurs droits, le processus est similaire : -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +1. Déposez quelques ETH dans le contrat de l'outil de transfert (l'interface utilisateur peut vous aider à estimer un montant raisonnable) + +2. Paramétrez votre adresse L2 en appelant le contrat outil de transfert + +3. Envoyez votre participation/délégation à L2 via les fonctions de l'outil de transfert « verrouillées » dans le contrat de Staking L1. + +4. Retirez tout ETH restant du contrat de l'outil de transfert + +### Puis-je transférer mon contrat de vesting à Arbitrum ? + +Vous pouvez transférer le solde GRT de votre contrat de vesting vers un contrat de vesting en L2. C’est une condition préalable au transfert de participation ou de délégation de votre contrat de vesting vers L2. Le contrat de vesting doit contenir un montant non nul de GRT (vous pouvez y transférer un petit montant comme 1 GRT si nécessaire). + +Lorsque vous transférez des TRG de votre contrat de vesting L1 vers L2, vous pouvez choisir le montant à envoyer et vous pouvez le faire autant de fois que vous le souhaitez. Le contrat de vesting L2 sera initialisé la première fois que vous transférez des TRG. + +Les transferts s'effectuent à l'aide d'un outil de transfert qui sera visible sur votre profil Explorer lorsque vous vous connecterez au compte du contrat de vesting. + +Veuillez noter que vous ne pourrez pas libérer/retirer la GRT du contrat de vesting de L2 avant la fin de votre délai de vesting lorsque votre contrat sera entièrement acquis. Si vous devez libérer la GRT avant cette date, vous pouvez la transférer au contrat de vesting de L1 à l’aide d’un autre outil de transfert disponible à cette fin. + +Si vous n’avez transféré aucun solde de contrat de vesting à L2 et que votre contrat de vesting est entièrement dévolu, vous ne devez pas transférer votre contrat de vesting à L2. Au lieu de cela, vous pouvez utiliser les outils de transfert pour définir une adresse de portefeuille L2 et transférer directement votre participation ou délégation à ce portefeuille régulier sur L2. + +### J’utilise mon contrat de vesting pour investir dans mainnet. Puis-je transférer ma participation à Arbitrum? + +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat d’acquisition L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat d’acquisition dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. + +### J’utilise mon contrat de vesting pour déléguer sur mainnet. Puis-je transférer mes délégations à Arbitrum? + +Oui, mais si votre contrat est toujours acquis, vous ne pouvez transférer la participation que pour qu’elle soit détenue par votre contrat de vesting L2. Vous devez d’abord initialiser ce contrat L2 en transférant un solde de GRT à l’aide de l’outil de transfert de contrat de vesting dans Explorer. Si votre contrat est entièrement acquis, vous pouvez transférer votre participation à n’importe quelle adresse en L2, mais vous devez le définir au préalable et déposer des GRT pour l’outil de transfert L2 pour payer le gaz L2. + +### Puis-je spécifier un bénéficiaire différent pour mon contrat de vesting sur L2? + +Oui, la première fois que vous transférez un solde et mettez en place votre contrat de vesting L2, vous pouvez désigner un bénéficiaire L2. Assurez-vous que ce bénéficiaire est un portefeuille capable d'effectuer des transactions sur Arbitrum One, c'est-à-dire qu'il doit s'agir d'un EOA ou d'un multisig déployé sur Arbitrum One. + +Si votre contrat est entièrement dévolu, vous n’établirez pas de contrat de vesting sur L2 ; au lieu de cela, vous établirez une adresse de portefeuille L2 et ce sera le portefeuille destinataire de votre mise ou délégation sur Arbitrum. + +### Mon contrat est entièrement dévolu. Puis-je transférer ma participation ou ma délégation à une autre adresse qui n’est pas un contrat de vesting L2? + +Oui. Si vous n’avez transféré aucun solde de contrat de vesting à L2 et que votre contrat de vesting est entièrement dévolu, vous ne devez pas transférer votre contrat de vesting à L2. Au lieu de cela, vous pouvez utiliser les outils de transfert pour définir une adresse de portefeuille L2 et transférer directement votre participation ou délégation à ce portefeuille régulier sur L2. + +Ceci vous permet de transférer votre participation ou délégation à n’importe quelle adresse L2. + +### Mon contrat de vesting est toujours acquis. Comment puis-je transférer le solde de mon contrat de vesting à L2 ? + +Ces étapes ne s'appliquent que si votre contrat est encore en cours de vesting ou si vous avez déjà utilisé cette procédure lorsque votre contrat était encore en cours de vesting. + +Pour transférer votre contrat de vesting à L2, vous enverrez tout solde de TRB à L2 à l’aide des outils de transfert, ce qui initialisera votre contrat de vesting à L2 : + +1. Déposez de l'ETH dans le contrat de l'outil de transfert (cela sera utilisé pour payer le gaz L2) + +2. Révoquer l'accès au protocole au contrat de vesting (nécessaire pour l'étape suivante) + +3. Donner au protocole l'accès au contrat de vesting (pour permettre à votre contrat d'interagir avec l'outil de transfert) + +4. Spécifiez une adresse de bénéficiaire L2\* et lancez le transfert de solde sur le mainnet Ethereum + +5. Attendre 20 minutes pour une confirmation + +6. Confirmation du transfert de solde à L2 + +\*Si nécessaire, c'est-à-dire que vous utilisez une adresse contractuelle. + +\*\*\*\*Vous devez confirmer votre transaction pour terminer le transfert de solde sur Arbitrum. Cette étape doit être complétée dans un délai de 7 jours sinon le solde pourrait être perdu. Dans la plupart des cas, cette étape s'exécutera automatiquement, mais une confirmation manuelle peut être nécessaire en cas de hausse du prix du gaz sur Arbitrum. Si vous rencontrez des problèmes au cours de ce processus, des ressources seront disponibles pour vous aider : contactez l'assistance à support@thegraph.com ou sur [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +Si vous avez stacké ou délégué tous vos GRT à partir du contrat de vesting, vous pouvez envoyer manuellement un petit montant (1 GRT) à l'adresse du contrat de vesting à partir de n'importe quel autre endroit (exemple : à partir d'un autre portefeuille ou d'un échange) + +### J'utilise un contrat de vesting pour mettre en jeu ou déléguer, mais je ne vois pas de bouton pour transférer ma mise en jeu ou ma délégation à L2, que dois-je faire ? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +Lorsque vous êtes connecté au contrat de vesting sur Explorer, vous devriez voir un bouton pour initialiser votre contrat de vesting L2. Suivez d'abord cette procédure et vous verrez ensuite les boutons pour transférer votre participation ou votre délégation dans votre profil + +### Si j’initialise mon contrat de vesting L2, est-ce que cela transférera également automatiquement ma délégation en L2 ? + +Non, l'initialisation de votre contrat de vesting L2 est une condition préalable au transfert de la participation ou de la délégation du contrat de vesting, mais vous devez toujours les transférer séparément. + +Vous verrez apparaître sur votre profil une bannière vous invitant à transférer votre participation ou votre délégation après avoir initialisé votre contrat de vesting L2. + +### Puis-je ramener mon contrat de vestng au L1? + +Il n’est pas nécessaire de le faire car votre contrat de vesting est toujours en L1. Lorsque vous utilisez les outils de transfert, vous créez simplement un nouveau contrat en L2 qui est lié à votre contrat de vesting L1, et vous pouvez envoyer GRT aller-retour entre les deux. + +### Pourquoi dois-je commencer par déplacer mon contrat de vesting ? + +Vous devez mettre en place un contrat de vesting L2 afin que ce compte puisse être propriétaire de votre mise ou délégation sur L2. Sinon, il n’y aurait aucun moyen pour vous de transférer la participation / délégation à L2 sans "échapper" au contrat de vesting. + +### Que se passe-t-il si j'essaie d'encaisser mon contrat alors qu'il n'est que partiellement acquis ? Est-ce possible ? + +Ce n'est pas possible. Vous pouvez transférer des fonds vers L1 et les y retirer. + +### Que se passe-t-il si je ne souhaite pas transférer mon contrat de vesting vers L2 ? + +Vous pouvez continuer à jalonner/déléguer sur L1. Au fil du temps, vous souhaiterez peut-être envisager de passer à L2 pour y activer les récompenses à mesure que le protocole évolue sur Arbitrum. Notez que ces outils de transfert sont destinés à l'acquisition de contrats qui sont autorisés à participer et à déléguer dans le protocole. Si votre contrat ne permet pas de jalonner ou de déléguer, ou est révocable, aucun outil de transfert n'est disponible. Vous pourrez toujours retirer votre GRT de L1 lorsqu'il sera disponible. diff --git a/website/pages/fr/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/fr/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..39532e0f5f59 100644 --- a/website/pages/fr/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/fr/arbitrum/l2-transfer-tools-guide.mdx @@ -1,76 +1,76 @@ --- -title: L2 Transfer Tools Guide +title: Guide des outils de transfert L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +The Graph a facilité le passage à L2 sur Arbitrum One. Pour chaque participant au protocole, il existe un ensemble d'outils de transfert L2 permettant de rendre le transfert vers L2 transparent pour tous les participants du réseau. Ces outils vous obligeront à suivre un ensemble d’étapes spécifiques en fonction de ce que vous transférez. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Certaines questions fréquentes sur ces outils trouvent leur réponse dans la [FAQ sur les outils de transfert L2](/arbitrum/l2-transfer-tools-faq). Les FAQ contiennent des explications détaillées sur la façon d'utiliser les outils, leur fonctionnement et les éléments à garder à l'esprit lors de leur utilisation. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Comment transférer votre subgraph vers Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + ## Benefits of transferring your subgraphs -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +La communauté et les développeurs du Graph se sont préparés (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) à passer à Arbitrum au cours de l'année écoulée. Arbitrum, une blockchain de couche 2 ou "L2", hérite de la sécurité d'Ethereum mais offre des frais de gaz considérablement réduits. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Lorsque vous publiez ou mettez à niveau votre subgraph sur The Graph Network, vous interagissez avec des contrats intelligents sur le protocole, ce qui nécessite de payer le gaz avec ETH. En déplaçant vos subgraphs vers Arbitrum, toute mise à jour future de votre subgraph nécessitera des frais de gaz bien inférieurs. Les frais inférieurs et le fait que les courbes de liaison de curation sur L2 soient plates facilitent également la curation pour les autres conservateurs sur votre subgraph, augmentant ainsi les récompenses des indexeurs sur votre subgraph. Cet environnement moins coûteux rend également moins cher pour les indexeurs l'indexation et la diffusion de votre subgraph. Les récompenses d'indexation augmenteront sur Arbitrum et diminueront sur le réseau principal Ethereum au cours des prochains mois, de sorte que de plus en plus d'indexeurs transféreront leur participation et établiront leurs opérations sur L2. ## Understanding what happens with signal, your L1 subgraph and query URLs Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Lorsque vous choisissez de transférer le subgraph, cela convertira tous les signaux de curation du subgraph en GRT. Cela équivaut à "déprécier" le subgraph sur le mainnet. Les TRG correspondant à votre curation seront envoyés à L2 avec le subgraph, où ils seront utilisés pour monnayer des signaux en votre nom. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Les autres curateurs peuvent choisir de retirer leur fraction de GRT ou de la transférer également à L2 pour le signal de monnayage sur le même subgraph. Si un propriétaire de subgraph ne transfère pas son subgraph à L2 et le déprécie manuellement via un appel de contrat, les curateurs en seront informés et pourront retirer leur curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Dès que le subgraph est transféré, puisque toute la curation est convertie en TRG, les indexeurs ne recevront plus de récompenses pour l'indexation du subgraph. Cependant, certains indexeurs 1) continueront à servir les subgraphs transférés pendant 24 heures et 2) commenceront immédiatement à indexer le subgraph sur L2. Comme ces indexeurs ont déjà indexé le subgraph, il ne devrait pas être nécessaire d'attendre la synchronisation du subgraph, et il sera possible d'interroger le subgraph L2 presque immédiatement. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Les requêtes vers le subgraph L2 devront être effectuées vers une URL différente (sur `arbitrum-gateway.thegraph.com`), mais l'URL L1 continuera à fonctionner pendant au moins 48 heures. Après cela, la passerelle L1 transmettra les requêtes à la passerelle L2 (pendant un certain temps), mais cela augmentera la latence. Il est donc recommandé de basculer toutes vos requêtes vers la nouvelle URL dès que possible. -## Choosing your L2 wallet +## Choisir son portefeuille L2 -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Lorsque vous avez publié votre subgraph sur le mainnet, vous avez utilisé un portefeuille connecté pour créer le subgraph, et ce portefeuille possède le NFT qui représente ce subgraph et vous permet de publier des mises à jour. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +Lors du transfert du subgraph vers Arbitrum, vous pouvez choisir un autre portefeuille qui possédera ce subgraph NFT sur L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Si vous utilisez un portefeuille "normal" comme MetaMask (un Externally Owned Account ou EOA, c'est-à-dire un portefeuille qui n'est pas un smart contract), cette étape est facultative et il est recommandé de conserver la même adresse de propriétaire que dans L1.portefeuille. If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Il est très important d'utiliser une adresse de portefeuille que vous contrôlez, et qui peut effectuer des transactions sur Arbitrum. Dans le cas contraire, le subgraph sera perdu et ne pourra pas être récupéré** -## Preparing for the transfer: bridging some ETH +## Préparer le transfert : faire le pont avec quelques EPF -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Le transfert du subgraph implique l'envoi d'une transaction à travers le pont, puis l'exécution d'une autre transaction sur Arbitrum. La première transaction utilise de l'ETH sur le mainnet, et inclut de l'ETH pour payer le gaz lorsque le message est reçu sur L2. Cependant, si ce gaz est insuffisant, vous devrez réessayer la transaction et payer le gaz directement sur L2 (c'est "l'étape 3 : Confirmer le transfert" ci-dessous). Cette étape **doit être exécutée dans les 7 jours suivant le début du transfert**. De plus, la deuxième transaction ("Etape 4 : Terminer le transfert sur L2") se fera directement sur Arbitrum. Pour ces raisons, vous aurez besoin de quelques ETH sur un portefeuille Arbitrum. Si vous utilisez un compte multisig ou smart contract, l'ETH devra être dans le portefeuille régulier (EOA) que vous utilisez pour exécuter les transactions, et non sur le portefeuille multisig lui-même. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Vous pouvez acheter de l'ETH sur certains échanges et le retirer directement sur Arbitrum, ou vous pouvez utiliser le pont Arbitrum pour envoyer de l'ETH d'un portefeuille du mainnet vers L2 : [bridge.arbitrum.io](http://bridge.arbitrum.io). Étant donné que les frais de gaz sur Arbitrum sont moins élevés, vous ne devriez avoir besoin que d'une petite quantité. Il est recommandé de commencer par un seuil bas (0,par exemple 01 ETH) pour que votre transaction soit approuvée. -## Finding the subgraph Transfer Tool +## Trouver l'outil de transfert de subgraph -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Vous pouvez trouver l'outil de transfert L2 lorsque vous consultez la page de votre subgraph dans le Subgraph Studio : ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +Elle est également disponible sur Explorer si vous êtes connecté au portefeuille qui possède un subgraph et sur la page de ce subgraph sur Explorer : ![Transferring to L2](/img/transferToL2.png) Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. -## Step 1: Starting the transfer +## Étape 1 : Démarrer le transfert -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Avant de commencer le transfert, vous devez décider quelle adresse sera propriétaire du subgraph sur L2 (voir "Choisir votre portefeuille L2" ci-dessus), et il est fortement recommandé d'avoir quelques ETH pour le gaz déjà bridgé sur Arbitrum (voir "Préparer le transfert : brider quelques ETH" ci-dessus). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Veuillez également noter que le transfert du subgraph nécessite d'avoir un montant de signal non nul sur le subgraph avec le même compte qui possède le subgraph ; si vous n'avez pas signalé sur le subgraph, vous devrez ajouter un peu de curation (ajouter un petit montant comme 1 GRT suffirait). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +Après avoir ouvert l'outil de transfert, vous pourrez saisir l'adresse du portefeuille L2 dans le champ "Adresse du portefeuille destinataire" - **assurez-vous que vous avez saisi la bonne adresse ici**. En cliquant sur Transférer le subgraph, vous serez invité à exécuter la transaction sur votre portefeuille (notez qu'une certaine valeur ETH est incluse pour payer le gaz L2) ; cela lancera le transfert et dépréciera votre subgraph L1 (voir "Comprendre ce qui se passe avec le signal, votre subgraph L1 et les URL de requête" ci-dessus pour plus de détails sur ce qui se passe en coulisses). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Si vous exécutez cette étape, **assurez-vous de continuer jusqu'à terminer l'étape 3 en moins de 7 jours, sinon le subgraph et votre signal GRT seront perdus.** Cela est dû au fonctionnement de la messagerie L1-L2 sur Arbitrum : les messages qui sont envoyés via le pont sont des « tickets réessayables » qui doivent être exécutés dans les 7 jours, et l'exécution initiale peut nécessiter une nouvelle tentative s'il y a des pics dans le prix du gaz sur Arbitrum. -![Start the trnasfer to L2](/img/startTransferL2.png) +![Démarrer le transfert vers L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Étape 2 : Attendre que le subgraphe atteigne L2 After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). @@ -84,82 +84,82 @@ In most cases, this step will auto-execute as the L2 gas included in step 1 shou If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +[Confirmer le transfert vers L2](/img/confirmTransferToL2.png) ## Step 4: Finishing the transfer on L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +À ce stade, votre subgraph et votre GRT ont été reçus sur Arbitrum, mais le subgraph n'est pas encore publié. Vous devrez vous connecter à l'aide du portefeuille L2 que vous avez choisi comme portefeuille de réception, basculer votre réseau de portefeuille sur Arbitrum et cliquer sur « Publier le subgraph » -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publier le subgraph](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Attendez que le subgraph soit publié](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Cela permettra de publier le subgraph afin que les indexeurs opérant sur Arbitrum puissent commencer à le servir. Il va également modifier le signal de curation en utilisant les GRT qui ont été transférés de L1. ## Step 5: Updating the query URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Votre subgraph a été transféré avec succès vers Arbitrum ! Pour interroger le subgraph, la nouvelle URL sera : -`https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` +https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]\` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Notez que l'ID du subgraph sur Arbitrum sera différent de celui que vous aviez sur le mainnet, mais vous pouvez toujours le trouver sur Explorer ou Studio. Comme mentionné ci-dessus (voir "Comprendre ce qui se passe avec le signal, votre subgraph L1 et les URL de requête"), l'ancienne URL L1 sera prise en charge pendant une courte période, mais vous devez basculer vos requêtes vers la nouvelle adresse dès que le subgraph aura été synchronisé. sur L2. ## How to transfer your curation to Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Comprendre ce qui arrive à la curation lors des transferts de subgraphs vers L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +Lorsque le propriétaire d'un subgraph transfère un subgraph vers Arbitrum, tout le signal du subgraph est converti en GRT en même temps. Cela s'applique au signal "auto-migré", c'est-à-dire au signal qui n'est pas spécifique à une version de subgraph ou à un déploiement, mais qui suit la dernière version d'un subgraph. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Cette conversion du signal en GRT est identique à ce qui se produirait si le propriétaire du subgraph dépréciait le subgraph en L1. Lorsque le subgraph est déprécié ou transféré, tout le signal de curation est "brûlé" simultanément (en utilisant la courbe de liaison de curation) et le TRG résultant est détenu par le contrat intelligent GNS (c'est-à-dire le contrat qui gère les mises à niveau des subgraphs et le signal auto-migré). Chaque curateur de ce subgraph a donc un droit sur cette TRG proportionnel à la quantité de parts qu'il avait pour le subgraph. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +Une fraction de ces GRT correspondant au propriétaire du subgraph est envoyée à L2 avec le subgraph. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +À ce stade, le GRT organisé n'accumulera plus de frais de requête, les conservateurs peuvent donc choisir de retirer leur GRT ou de le transférer vers le même subgraph sur L2, où il pourra être utilisé pour créer un nouveau signal de curation. Il n'y a pas d'urgence à le faire car le GRT peut être utile indéfiniment et chacun reçoit un montant proportionnel à ses actions, quel que soit le moment où il le fait. -## Choosing your L2 wallet +## Choisir son portefeuille L2 -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Si vous décidez de transférer votre GRT organisé vers L2, vous pouvez choisir un autre portefeuille qui détiendra le signal de curation sur L2. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +Si vous utilisez un portefeuille « normal » comme Metamask (un compte externe ou EOA, c'est-à-dire un portefeuille qui n'est pas un contrat intelligent), alors ceci est facultatif et il est recommandé de conserver la même adresse de conservateur qu'en L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Si vous utilisez un portefeuille de contrat intelligent, comme un multisig (par exemple un coffre-fort), alors choisir une autre adresse de portefeuille L2 est obligatoire, car il est fort probable que ce compte n'existe que sur le mainnet et que vous ne pourrez pas effectuer de transactions. sur Arbitrum en utilisant ce portefeuille. Si vous souhaitez continuer à utiliser un portefeuille de contrat intelligent ou multisig, créez un nouveau portefeuille sur Arbitrum et utilisez son adresse comme adresse du portefeuille de réception L2. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**Il est très important d'utiliser une adresse de portefeuille que vous contrôlez et qui peut effectuer des transactions sur Arbitrum, sinon la curation sera perdue et ne pourra pas être récupérée.** -## Sending curation to L2: Step 1 +## Envoi de la curation à L2 : Étape 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +Avant de commencer le transfert, vous devez décider quelle adresse détiendra la curation sur L2 (voir "Choisir votre portefeuille L2" ci-dessus), et il est recommandé d'avoir des ETH pour le gaz déjà pontés sur Arbitrum au cas où vous auriez besoin de réessayer l'exécution du message sur L2. Vous pouvez acheter de l'ETH sur certaines bourses et le retirer directement sur Arbitrum, ou vous pouvez utiliser le pont Arbitrum pour envoyer de l'ETH depuis un portefeuille du mainnet vers L2 : [bridge.arbitrum.io](http://bridge.arbitrum.io) - étant donné que les frais de gaz sur Arbitrum sont si bas, vous ne devriez avoir besoin que d'un petit montant, par ex. 0,01 ETH sera probablement plus que suffisant. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +Si un subgraph que vous organisez a été transféré vers L2, vous verrez un message sur l'Explorateur vous indiquant que vous organisez un subgraph transféré. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +En consultant la page du subgraph, vous pouvez choisir de retirer ou de transférer la curation. En cliquant sur "Transférer le signal vers Arbitrum", vous ouvrirez l'outil de transfert. -![Transfer signal](/img/transferSignalL2TransferTools.png) +Signal de transfert](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +Après avoir ouvert l'outil de transfert, vous serez peut-être invité à ajouter de l'ETH à votre portefeuille si vous n'en avez pas. Vous pourrez ensuite saisir l'adresse du portefeuille L2 dans le champ "Adresse du portefeuille de réception" - **assurez-vous d'avoir saisi la bonne adresse ici**. En cliquant sur Transfer Signal, vous serez invité à exécuter la transaction sur votre portefeuille (notez qu'une certaine valeur ETH est incluse pour payer le gaz L2) ; cela lancera le transfert. If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. -## Sending curation to L2: step 2 +## Envoi de la curation vers L2 : étape 2 Starting the transfer: ![Send signal to L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Une fois que vous avez commencé le transfert, le message qui envoie votre curation L1 à L2 doit se propager à travers le pont Arbitrum. Cela prend environ 20 minutes (le pont attend que le bloc du mainnet contenant la transaction soit "à l'abri" d'une éventuelle réorganisation de la chaîne). Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## Envoi de la curation vers L2 : étape 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +Dans la plupart des cas, cette étape s'exécutera automatiquement car le gaz L2 inclus dans l'étape 1 devrait être suffisant pour exécuter la transaction qui reçoit la curation sur les contrats Arbitrum. Dans certains cas, cependant, il est possible qu'une flambée des prix du gaz sur Arbitrum fasse échouer cette exécution automatique. Dans ce cas, le « ticket » qui envoie votre curation vers L2 sera en attente et nécessitera une nouvelle tentative sous 7 jours. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Si c'est le cas, vous devrez vous connecter en utilisant un portefeuille L2 qui a quelques ETH sur Arbitrum, changer le réseau de votre portefeuille pour Arbitrum, et cliquer sur "Confirmer le transfert" pour réessayer la transaction. ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## Retrait de la curation sur L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +Si vous préférez ne pas envoyer votre GRT vers L2, ou si vous préférez combler le GRT manuellement, vous pouvez retirer votre GRT organisé sur L1. Sur la bannière de la page du subgraph, choisissez « Retirer le signal » et confirmez la transaction ; le GRT sera envoyé à votre adresse de conservateur. diff --git a/website/pages/fr/billing.mdx b/website/pages/fr/billing.mdx index 3c21e5de1cdc..5c5c04396519 100644 --- a/website/pages/fr/billing.mdx +++ b/website/pages/fr/billing.mdx @@ -29,7 +29,7 @@ It may take up to 10 minutes to complete the transaction. Once the transaction i ## Billing on Arbitrum -While The Graph protocol operates on Ethereum Mainnet, [the billing contract](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) lives on the [Arbitrum](https://arbitrum.io/) network to reduce transaction times and cost. You'll be required to pay the query fees generated from your API keys. Using the billing contract, you'll be able to: +Alors que le protocole The Graph fonctionne sur Ethereum Mainnet, le [contrat de facturation](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) réside sur le réseau [Arbitrum](https://arbitrum.io/) pour réduire les délais et les coûts de transaction. Vous devrez payer les frais de requête générés à partir de vos clés API. Grâce au contrat de facturation, vous pourrez : - Add and withdraw GRT from your account balance. - Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. @@ -37,9 +37,13 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + +1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -57,7 +61,7 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht > This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on the Arbitrum network. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". @@ -71,7 +75,9 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). + + +1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -83,7 +89,7 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht > This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on Ethereum mainnet. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +1. Accédez à la [page de facturation de Subgraph Studio](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". @@ -103,7 +109,7 @@ This section will show you how to get GRT to pay for query fees. This will be a step by step guide for purchasing GRT on Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +1. Accédez à [Coinbase](https://www.coinbase.com/) et créez un compte. 2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. 3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. 4. Select the currency you want to purchase. Select GRT. @@ -135,7 +141,7 @@ This will be a step by step guide for purchasing GRT on Binance. - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your crypto wallet, add your crypto wallet's address to the withdrawel whitelist. - Click on the "wallet" button, click withdraw, and select GRT. - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. + - Cliquez sur "Continuer" et confirmez votre transaction. You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Accédez à [Coinbase](https://www.coinbase.com/) et créez un compte. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Cliquez sur "Continuer" et confirmez votre transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Cliquez sur "Continuer" et confirmez votre transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge -The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Le contrat de facturation est uniquement conçu pour relier le GRT du réseau principal Ethereum au réseau Arbitrum. Si vous souhaitez transférer votre GRT d'Arbitrum vers le réseau principal Ethereum, vous devrez utiliser le [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/fr/chain-integration-overview.mdx b/website/pages/fr/chain-integration-overview.mdx new file mode 100644 index 000000000000..dd9c85eda66f --- /dev/null +++ b/website/pages/fr/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. Comment les priorités seront-elles gérées ? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/fr/cookbook/arweave.mdx b/website/pages/fr/cookbook/arweave.mdx index b50aaa1245c8..86998f478c1e 100644 --- a/website/pages/fr/cookbook/arweave.mdx +++ b/website/pages/fr/cookbook/arweave.mdx @@ -1,239 +1,239 @@ --- -title: Construction de subgraphes pour Arweave +title: Construction de subgraphs pour Arweave --- -> Le support d'Arweave est actuellemement en version bêta dans Graph Node et sur le Service Hébergé. N'hésite donc pas nous joindre sur [Discord](https://discord.gg/graphprotocol) pour toutes questions concernant la construction de subgraphes Arweave ! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! -Ce guide vous apprendra à construire et à déployer des subgraphes pour indexer la blockchain Arweave. +Dans ce guide, vous apprendrez comment créer et déployer des subgraphs pour indexer la blockchain Arweave. -## Qu'est-ce que Arweave ? +## Qu’est-ce qu’Arweave ? Arweave est un protocole qui permet aux développeurs de stocker des données de façon permanente. C'est cette caractéristique qui constitue la principale différence entre Arweave et IPFS. En effet, IPFS n'a pas la caractéristique de permanence, et les fichiers stockés sur Arweave ne peuvent pas être modifiés ou supprimés. -Ce protocole a déjà construit de nombreuses bibliothèques pour intégrer un certain nombre de langages de programmation différents. Si vous souhaitez en savoir plus, vous pouvez consulter : +Arweave a déjà construit de nombreuses bibliothèques pour intégrer le protocole dans plusieurs langages de programmation différents. Pour plus d'informations, vous pouvez consulter : - [Arwiki](https://arwiki.wiki/#/en/main) -- [Ressources Arweave](https://www.arweave.org/build) +- [Ressources d'Arweave](https://www.arweave.org/build) ## À quoi servent les subgraphes d'Arweave ? The Graph vous permet de créer des API ouvertes personnalisées appelées « subgraphes ». Les subgraphes sont utilisés pour indiquer aux indexeurs (gestionnaires de serveur) les données à indexer sur une blockchain et à enregistrer sur leurs serveurs afin que vous puissiez les interroger à tout moment à l'aide de [GraphQL](https://graphql.org/). -À l'heure d'aujourd'hui, [Graph Node](https://github.com/graphprotocol/graph-node) est capable d'indexer les données sur le protocole Arweave. L'intégration actuelle indexe uniquement protocole en tant que blockchain (blocs et transactions), elle n'indexe donc pas encore les fichiers stockés. +[Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable d'indexer les données sur le protocole Arweave. L'intégration actuelle indexe uniquement Arweave en tant que blockchain (blocs et transactions), elle n'indexe pas encore les fichiers stockés. -## Construction d'un subgraphe pour Arweave +## Construire un subgraph Arweave -Afin de pouvoir construire et déployer des subgraphes pour Arweave, vous avez besoin de deux modules : +Pour pouvoir créer et déployer des Arweave Subgraphs, vous avez besoin de deux packages : 1. Les versions supérieures à 0.30.2 du `@graphprotocol/graph-cli` - Il s'agit d'un outil caractérisé par l'utilisation de lignes de commandes pour construire et déployer des subgraphes. Cliquez [ici](https://www.npmjs.com/package/@graphprotocol/graph-cli) pour le télécharger en utilisant `npm`. -2. Les versions supérieures à 0.27.0 du `@graphprotocol/graph-ts` - Il s'agit d'une bibliothèque de types spécifiques aux subgraphes. Cliquez [ici](https://www.npmjs.com/package/@graphprotocol/graph-ts) pour la télécharger en utilisant `npm`. +2. `@graphprotocol/graph-ts` version supérieure à 0.27.0 - Il s'agit d'une bibliothèque de types spécifiques aux subgraphs. [Cliquez ici](https://www.npmjs.com/package/@graphprotocol/graph-ts) pour télécharger en utilisant `npm`. -## Caractéristique des subgraphes +## Caractéristique des subgraphs -Trois éléments caractérisent un subgraphe : +Il y a trois composants d'un subgraph : ### 1. Manifeste - `subgraph.yaml` -Définit les sources de données d'intérêt et la manière dont elles doivent être traitées. Arweave est aujourd'hui un nouveau type de source de données. +Définit les sources de données intéressantes et la manière dont elles doivent être traitées. Arweave est un nouveau type de source de données. -### 2. Schema - `schema.graphql` +### 2. Schéma - `schema.graphql` -Vous définissez ici les données que vous souhaitez pouvoir interroger après avoir indexé votre subgraphe à l'aide de GraphQL. Ceci est en fait similaire à un modèle pour une API, où le modèle définit la structure d'un corps de requête. +Vous définissez ici les données que vous souhaitez pouvoir interroger après avoir indexé votre subgraph à l'aide de GraphQL. Ceci est en fait similaire à un modèle pour une API, où le modèle définit la structure d'un corps de requête. -Les exigences relatives aux subgraphes Arweave sont couvertes dans la [documentation existante](/developing/creating-a-subgraph/#the-graphql-schema). +Les exigences relatives aux subgraphs Arweave sont couvertes par la [documentation existante](/developing/creating-a-subgraph/#the-graphql-schema). ### 3. Mappages AssemblyScript - `mapping.ts` Il s'agit de la logique qui détermine comment les données doivent être récupérées et stockées lorsqu'une personne interagit avec les sources de données que vous interrogez. Les données sont traduites et stockées sur la base du schema que vous avez répertorié. -Lors du développement du subgraphe, il y a deux commandes clés : +Lors du développement du subgraph, il y a deux commandes clés : ``` $ graph codegen # génère des types à partir du fichier de schéma identifié dans le manifeste $ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphes dans un dossier /build ``` -## Définition du manifeste du subgraphe +## Définition du manifeste du subgraph -Le manifeste du subgraphe `subgraph.yaml` identifie les sources de données pour le subgraphe, les déclencheurs d'intérêt, et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Ci-dessous un exemple de manifeste pour un subgraphe visant Arweave : +Le manifeste du subgraph `subgraph.yaml` identifie les sources de données pour le subgraph, les déclencheurs d'intérêt, et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Ci-dessous un exemple de manifeste pour un subgraph visant Arweave : ```yaml -specVersion: 0.0.5 -description: Arweave Blocks Indexing -schema: - file: ./schema.graphql # lien vers le fichier de schéma -dataSources: - - kind: arweave - name: arweave-blocks - network: arweave-mainnet # The Graph ne supporte que le Arweave Mainnet +version spec: 0.0.5 +description: Indexation des blocs Arweave +schéma: + fichier: ./schema.graphql # lien vers le fichier de schéma +les sources de données: + - genre: arweave + nom: arweave-blocks + réseau: arweave-mainnet # Le Graph prend uniquement en charge Arweave Mainnet source: - owner: 'ID-OF-AN-OWNER' # La clé publique d'un porte-monnaie Arweave - startBlock: 0 # mettez cette valeur à 0 pour commencer l'indexation à partir de la genèse de la chaîne. - mapping: - apiVersion: 0.0.5 - language: wasm/assemblyscript - file: ./src/blocks.ts # lien vers le fichier contenant les mappages d'Assemblyscript - entities: - - Block + Owner: 'ID-OF-AN-OWNER' # La clé publique d'un portefeuille Arweave + startBlock: 0 # réglez-le sur 0 pour démarrer l'indexation à partir de la genèse de la chaîne + cartographie: + Version API: 0.0.5 + langage: wasm/assemblyscript + fichier: ./src/blocks.ts # lien vers le fichier avec les mappages Assemblyscript + entités: + - Bloc - Transaction - blockHandlers: - - handler: handleBlock # le nom de la fonction dans le fichier de mapping - transactionHandlers: - - handler: handleTx # le nom de la fonction dans le fichier de mapping + gestionnaires de blocs: + - handler: handleBlock # le nom de la fonction dans le fichier de mappage + Gestionnaires de transactions: + - handler: handleTx # le nom de la fonction dans le fichier de mappage ``` -- Les subgraphes Arweave introduisent un nouveau type de source de données (`arweave`) -- Le réseau doit correspondre à un réseau sur le nœud d'hébergement The Graph. Pour le Service Hébergé, le réseau principal d'Arweave est `arweave-mainnet` -- Les sources de données Arweave introduisent un champ optionnel source.owner, qui est la clé publique d'un porte-monnaie Arweave +- Les subgraphs Arweave introduisent un nouveau type de source de données (`arweave`) +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` +- Les sources de données Arweave introduisent un champ source.owner facultatif, qui est la clé publique d'un portefeuille Arweave -Les sources de données Arweave supportent deux types de gestionnaires : +Les sources de données Arweave prennent en charge deux types de gestionnaires : -- `blockHandlers` - exécuté sur chaque nouveau bloc Arweave. Aucune source.owner n'est requise. -- `transactionHandlers` - exécuté sur chaque transaction dont la `source.owner` est le propriétaire. Actuellement, un propriétaire est requis pour `transactionHandlers`, si les utilisateurs veulent traiter toutes les transactions, ils doivent fournir la « clé publique » comme `source.owner` +- `blockHandlers` - Exécuté sur chaque nouveau bloc Arweave. Aucun source.owner n'est requis. +- `transactionHandlers` : exécuté sur chaque transaction dont le `source.owner` de la source de données est le propriétaire. Actuellement, un propriétaire est requis pour les `transactionHandlers`. Si les utilisateurs souhaitent traiter toutes les transactions, ils doivent fournir "" comme `source.owner` -> La source.owner peut être l'adresse du propriétaire ou sa clé publique. +> Source.owner peut être l’adresse du propriétaire ou sa clé publique. -> Les transactions sont les éléments constitutifs du permaweb Arweave et sont des objets créés par les utilisateurs finaux. +> Les transactions sont les éléments constitutifs du permaweb Arweave et ce sont des objets créés par les utilisateurs finaux. -> Remarque : les transactions [Bundlr](https://bundlr.network/) ne sont pas encore prises en charge. +> Notez : les transactions [Bundlr](https://bundlr.network/) ne sont pas encore prises en charge. -## Définition du Schema +## Définition d'un schéma -La définition du schema décrit la structure de la base de données de subgraphes en sortie et les relations entre les entités. Elle est indépendante de la source de données d'origine. Il y a plus de détails sur la définition du schema du subgraphe [ici](/developing/creating-a-subgraph/#the-graphql-schema). +La définition du schéma décrit la structure de la base de données de subgraphs résultante et les relations entre les entités. Ceci est indépendant de la source de données d’origine. Vous trouverez plus de détails sur la définition du schéma de subgraph [ici](/developing/creating-a-subgraph/#the-graphql-schema). -## Mappages AssemblyScript +## Cartographies AssemblyScript -Les gestionnaires de traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). +Les gestionnaires pour le traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). -L'indexation Arweave introduit des types de données spécifiques à Arweave dans l'API [AssemblyScript](/developing/assemblyscript-api/). +L'indexation Arweave introduit des types de données spécifiques à Arweave dans l'[API AssemblyScript](/developing/assemblyscript-api/). ```tsx -class Block { - timestamp: u64 - lastRetarget: u64 - height: u64 - indepHash: Bytes - nonce: Bytes - previousBlock: Bytes - diff: Bytes - hash: Bytes - txRoot: Bytes - txs: Bytes[] - walletList: Bytes - rewardAddr: Bytes - tags: Tag[] - rewardPool: Bytes - weaveSize: Bytes - blockSize: Bytes - cumulativeDiff: Bytes - hashListMerkle: Bytes - poa: ProofOfAccess +bloc de classe { + horodatage : u64 + dernière cible : u64 + taille : u64 + indepHash : octets + occasionnel : octets + bloc précédent : octets + diff : octets + hachage : octets + txRoot : octets + txs : octets[] + liste de portefeuilles : octets + récompenseAddr : octets + balises : Balise[] + pool de récompenses : octets + weaveSize : octets + taille du bloc : octets + cumulativeDiff : octets + hashListMerkle : octets + poa : Preuve d'accès } -class Transaction { - format: u32 - id: Bytes - lastTx: Bytes - owner: Bytes - tags: Tag[] - target: Bytes - quantity: Bytes - data: Bytes - dataSize: Bytes - dataRoot: Bytes - signature: Bytes - reward: Bytes +classe Transaction { + format : u32 + identifiant : octets + lastTx : octets + propriétaire : octets + balises : Balise[] + cible : octets + quantité : octets + données : octets + taille des données : octets + dataRoot : octets + signature : octets + récompense : octets } ``` -Les gestionnaires de bloc reçoivent un `bloc`, tandis que les transactions reçoivent une `transaction`. +Les gestionnaires de blocs reçoivent un `Block`, tandis que les transactions reçoivent un `Transaction`. -L'écriture des mappings d'un subgraphe visant Arweave est très similaire à l'écriture des mappings d'un subgraphe Ethereum. Pour plus d'informations, cliquez [ici](/developing/creating-a-subgraph/#writing-mappings). +L'écriture des mappages d'un subgraph Arweave est très similaire à l'écriture des mappages d'un subgraph Ethereum. Pour plus d'informations, cliquez [ici](/developing/creating-a-subgraph/#writing-mappings). -## Déploiement d'un subgraphe Arweave sur le Service Hébergé +## Deploying an Arweave Subgraph on the hosted service -Une fois que votre subgraphe a été créé sur le tableau de bord du Service hébergé, vous pouvez le déployer en utilisant la commande CLI `graph deploy`. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -## Interroger un subgraphe d'Arweave +## Interroger un subgraph d'Arweave -Le endpoint GraphQL pour les subgraphes d'Arweave est déterminé par la définition du schema, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api/) pour plus d'informations. +Le endpoint GraphQL pour les subgraphs d'Arweave est déterminé par la définition du schema, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api/) pour plus d'informations. -## Exemples de subgraphes +## Exemples de subgraphs -Voici un exemple de modèle subgraphe : +Voici un exemple de modèle subgraph : -- [Exemple de subgraphe pour Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Exemple de subgraph pour Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## FAQ +## Questions fréquemment posées -### Un subgraphe peut-il indexer Arweave et d'autres chaînes ? +### Un subgraph peut-il indexer Arweave et d'autres chaînes ? -Non, un subgraphe ne peut supporter que les sources de données d'une seule chaîne/réseau. +Non, un subgraph ne peut supporter que les sources de données d'une seule chaîne/réseau. -### Puis-je indexer les fichiers stockés sur Arweave ? +### Puis-je indexer les fichiers enregistrés sur Arweave ? -Actuellement, The Graph indexe uniquement Arweave en tant que blockchain (ses blocs et ses transactions). +Actuellement, The Graph n'indexe Arweave qu'en tant que blockchain (ses blocs et ses transactions). -### Puis-je identifier les bundles de Bundlr dans mon subgraphe ? +### Puis-je identifier les bundles de Bundlr dans mon subgraph ? -Ceci n'est pas pris en charge actuellement. +Cette fonction n'est pas prise en charge actuellement. -### Comment puis-je filtrer les transactions sur un compte spécifique ? +### Comment puis-je filtrer les transactions sur un compte spécifique ? -La source.owner peut être la clé publique de l'utilisateur ou l'adresse du compte. +La source.owner peut être la clé publique de l'utilisateur ou l'adresse de son compte. -### Quel est le format de cryptage actuel ? +### Quel est le format de chiffrement actuel ? -Les données sont généralement transmises dans les mappings sous forme d'octets. Si elles sont stockées directement, elles sont renvoyées dans le subgraphe au format `hexadécimal` (par exemple, les hachages de blocs et de transactions). Vous pouvez vouloir convertir en un format `base64` ou `base64 URL`-safe dans vos mappings, afin de correspondre à ce qui est affiché dans les explorateurs de blocs comme [Arweave Explorer](https://viewblock.io/arweave/). +Les données sont généralement transmises dans les mappages sous forme d'octets, qui, s'ils sont stockés directement, sont renvoyés dans le subgraph au format `hex` (ex. hachages de bloc et de transaction). Vous souhaiterez peut-être convertir vos mappages en un format `base64` ou `base64 URL` sécurisé, afin de correspondre à ce qui est affiché dans les explorateurs de blocs comme [Explorateur Arweave](https : //viewblock.io/arweave/). -La fonction d'aide suivante `bytesToBase64(bytes : Uint8Array, urlSafe : boolean) : string` peut être utilisée et sera ajoutée à `graph-ts` : +La fonction d'assistance `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` suivante peut être utilisée et sera ajoutée à `graph-ts` : ``` const base64Alphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M ", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z". ", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m ", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z ", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "+", "/" ]; const base64UrlAlphabet = [ - "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", - "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", - "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", - "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", - "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" + "A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M ", + "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z". ", + "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m ", + "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z ", + "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "-", "_" ]; -function bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string { - let alphabet = urlSafe? base64UrlAlphabet : base64Alphabet; - - let result = '', i: i32, l = bytes.length; - for (i = 2; i < l; i += 3) { - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[((bytes[i - 1] & 0x0F) << 2) | (bytes[i] >> 6)]; - result += alphabet[bytes[i] & 0x3F]; - } - if (i === l + 1) { // 1 octet yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[(bytes[i - 2] & 0x03) << 4]; - if (!urlSafe) { - result += "=="; - } - } - if (!urlSafe && i === l) { // 2 octets yet to write - result += alphabet[bytes[i - 2] >> 2]; - result += alphabet[((bytes[i - 2] & 0x03) << 4) | (bytes[i - 1] >> 4)]; - result += alphabet[(bytes[i - 1] & 0x0F) << 2]; - if (!urlSafe) { - result += "="; - } - } - return result; +function bytesToBase64 (octets : Uint8Array, urlSafe : booléen) : chaîne { + laisser alphabet = urlSafe ? base64UrlAlphabet : base64Alphabet; + + soit result = '', i: i32, l = bytes.length; + pour (je = 2 ; je < l ; je += 3) { + résultat += alphabet[octets[i - 2] >> 2]; + résultat += alphabet[((octets[i - 2] & 0x03) << 4) | (octets[i - 1] >> 4)] ; + résultat += alphabet[((octets[i - 1] & 0x0F) << 2) | (octets[i] >> 6)] ; + résultat += alphabet[octets[i] & 0x3F] ; + } + if (i === l + 1) { // 1 octet restant à écrire + résultat += alphabet[octets[i - 2] >> 2]; + résultat += alphabet[(octets[i - 2] & 0x03) << 4]; + si (!urlSafe) { + résultat += "=="; + } + } + if (!urlSafe && i === l) { // Il reste encore 2 octets à écrire + résultat += alphabet[octets[i - 2] >> 2]; + résultat += alphabet[((octets[i - 2] & 0x03) << 4) | (octets[i - 1] >> 4)] ; + résultat += alphabet[(octets[i - 1] & 0x0F) << 2]; + si (!urlSafe) { + résultat += "="; + } + } + renvoyer le résultat ; } ``` diff --git a/website/pages/fr/cookbook/base-testnet.mdx b/website/pages/fr/cookbook/base-testnet.mdx index 49a2b153fc88..362dbe8c34f8 100644 --- a/website/pages/fr/cookbook/base-testnet.mdx +++ b/website/pages/fr/cookbook/base-testnet.mdx @@ -2,100 +2,100 @@ title: Building Subgraphs on Base --- -This guide will quickly take you through how to initialize, create, and deploy your subgraph on Base testnet. +Ce guide vous montrera rapidement comment initialiser, créer et déployer votre subgraph sur le réseau de test de la base. -What you'll need: +Ce dont vous avez besoin : -- A Base testnet contract address -- A crypto wallet (e.g. MetaMask or Coinbase Wallet) +- A Adresse du contrat de testnet de la base +- Un portefeuille cryptographique (par exemple MetaMask ou Coinbase Wallet) ## Subgraph Studio -### 1. Installation de « The Graph CLI » +### 1. Installez la CLI de The Graph -The Graph CLI (>=v0.41.0) is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. +La CLI Graph (>=v0.41.0) est écrite en JavaScript et vous devrez avoir installé « npm » ou « yarn » pour l'utiliser. ```sh -# NPM +# NPM npm install -g @graphprotocol/graph-cli -# Yarn -yarn global add @graphprotocol/graph-cli +# Fil +fil global ajouter @graphprotocol/graph-clio ``` -### 2. Create your subgraph in the Subgraph Studio +### 2. Créez votre subgraph dans Subgraph Studio -Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your crypto wallet. +Accédez au [Subgraph Studio](https://thegraph.com/studio/) et connectez votre portefeuille crypto. -Once connected, click "Create a Subgraph" and enter a name for your subgraph. +Une fois connecté, cliquez sur "Create my h a Subgraph" et saisissez un nom pour votre subgraph. -Select "Base (testnet)" as the indexed blockchain and click Create Subgraph. +Sélectionnez "Base (testnet)" comme blockchain indexée et cliquez sur Create Subgraph (Créer un subgraph). -### 3. Initialize your Subgraph +### 3. Initialiser votre subgraph -> You can find specific commands for your subgraph in the Subgraph Studio. +> Vous trouverez des commandes spécifiques pour votre subgraph dans le Studio des subgraphs. -Make sure that the graph-cli is updated to latest (above 0.41.0) +Assurez-vous que le graph-cli est mis à jour vers la dernière version (supérieure à 0.41.0) ```sh graph --version ``` -Initialiser votre subgraphe à partir d'un contrat existant. +Initialiser votre subgraph à partir d'un contrat existant. ```sh graph init --studio ``` -Your subgraph slug is an identifier for your subgraph. The CLI tool will walk you through the steps for creating a subgraph, including: +Votre nom de subgraph est un identifiant pour votre subgraph. L'outil CLI vous guidera à travers les étapes de la création d'un subgraph, y compris : -- Protocol: ethereum +- Protocol: Ethereum - Subgraph slug: `` -- Directory to create the subgraph in: `` -- Ethereum network: base-testnet \_ Contract address: `` +- Répertoire dans lequel créer le subgraph : `` +- Réseau Ethereum : base-testnet \_ Adresse du contrat : `` - Start block (optional) -- Contract name: `` -- Yes/no to indexing events (yes means your subgraph will be bootstrapped with entities in the schema and simple mappings for emitted events) +- Nom du contrat : `` +- Oui/non à l'indexation des événements (oui signifie que votre subgraph sera amorcé avec des entités dans le schéma et des mappings simples pour les événements émis) -### Rédiger votre subgraphe +### 3. Rédigez votre subgraph -> If emitted events are the only thing you want to index, then no additional work is required, and you can skip to the next step. +> Si les événements émis sont la seule chose que vous souhaitez indexer, aucun travail supplémentaire n'est nécessaire et vous pouvez passer à l'étape suivante. -The previous command creates a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: +Si les événements émis sont la seule chose que vous souhaitez indexer, aucun travail supplémentaire n'est nécessaire et vous pouvez passer à l'étape suivante: -- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. -- Schéma (schema.graphql) : le schéma GraphQL définit les données que vous souhaitez extraire du subgraphe. -- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. +- Manifeste (subgraph.yaml) - Le manifeste définit les sources de données que vos subgraphs indexeront. Assurez-vous d'ajouter « base-testnet » comme nom de réseau dans le fichier manifeste pour déployer votre subgraph sur Base testnet. +- Schéma (schema.graphql) : le schéma GraphQL définit les données que vous souhaitez extraire du subgraph. +- Mappages AssemblyScript (mapping.ts) - Il s'agit du code qui traduit les données de vos sources de données vers les entités définies dans le schéma. -If you want to index additional data, you will need extend the manifest, schema and mappings. +Si vous souhaitez indexer des données supplémentaires, vous devrez étendre le manifeste, le schéma et les mappages. -For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). +Pour plus d'informations sur la façon d'écrire votre subgraph, voir [Création d'un subgraph](/developing/creating-a-subgraph). -### 4. Déployer vers le Subgraph Studio +### 4. Déployer sur Subgraph Studio -Before you can deploy your subgraph, you will need to authenticate with the Subgraph Studio. You can do this by running the following command: +Avant de pouvoir déployer votre subgraph, vous devrez vous authentifier auprès de Subgraph Studio. Vous pouvez le faire en exécutant la commande suivante : -Authenticate the subgraph on studio +Authentifier le subgraph sur studio ``` -graph auth --studio +authentification graph --studio ``` -Next, enter your subgraph's directory. +Ensuite, entrez le répertoire de votre subgraph. ``` cd ``` -Build your subgraph with the following command: +Construisez votre subgraph avec la commande suivante: ```` ``` -graph codegen && graph build +codegen graph && construction de graph ``` ```` -Finally, you can deploy your subgraph using this command: +Enfin, vous pouvez déployer votre subgraph à l'aide de cette commande : ```` ``` @@ -103,10 +103,10 @@ graph deploy --studio ``` ```` -### 5. Query your subgraph +### 5. Interrogez votre subgraph -Once your subgraph is deployed, you can query it from your dapp using the `Development Query URL` in the Subgraph Studio. +Une fois votre subgraph déployé, vous pouvez l'interroger à partir de votre dapp en utilisant l'« URL de requête de développement » dans Subgraph Studio. -Note - Studio API is rate-limited. Hence should preferably be used for development and testing. +Remarque : L'API Studio est limitée en débit. Il doit donc être utilisé de préférence pour le développement et les tests. -To learn more about querying data from your subgraph, see the [Querying a Subgraph](/querying/querying-the-graph) page. +Pour en savoir plus sur l'interrogation des données de votre subgraph, consultez la page [Interrogation d'un subgraph](/querying/querying-the-graph). diff --git a/website/pages/fr/cookbook/cosmos.mdx b/website/pages/fr/cookbook/cosmos.mdx index 3b784e9fd10c..ad5486175d46 100644 --- a/website/pages/fr/cookbook/cosmos.mdx +++ b/website/pages/fr/cookbook/cosmos.mdx @@ -2,95 +2,95 @@ title: Création de subgraphes sur Cosmos --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. +Ce guide est une introduction à la création de subgraphs indexant des blockchains basées sur [Cosmos](https://docs.cosmos.network/). -## Que sont les subgraphes de Cosmos ? +## Que sont les subgraphs de Cosmos ? -The Graph permet aux développeurs de traiter les événements de la blockchain et de rendre les données résultantes facilement disponibles via une API GraphQL publique, connue sous le nom de subgraphe. Par exemple : [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements Cosmos, ce qui signifie que les développeurs peuvent désormais construire des subgraphes pour indexer facilement les événements sur cette chaîne. +The Graph permet aux développeurs de traiter les événements de la blockchain et de rendre les données résultantes facilement disponibles via une API GraphQL publique, connue sous le nom de subgraph. Par exemple : [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements Cosmos, ce qui signifie que les développeurs peuvent désormais construire des subgraphs pour indexer facilement les événements sur cette chaîne. -Il existe quatre types de gestionnaires pris en charge dans les subgraphes de Cosmos : +Il existe quatre types de gestionnaires pris en charge dans les subgraphs de Cosmos : -- **Les gestionnaires de blocs** s'exécutent chaque fois qu'un nouveau bloc est ajouté à la chaîne. -- **Les gestionnaires d'événements** s'exécutent lorsqu'un événement spécifique est émis. -- **Les gestionnaires de transaction** s'exécutent lorsqu'une transaction se produit. -- **Les gestionnaires de messages** s'exécutent lorsqu'un message spécifique se produit. +- Les **gestionnaires de blocs** s'exécutent chaque fois qu'un nouveau bloc est ajouté à la chaîne. +- Les **gestionnaires d'événements** s'exécutent lorsqu'un événement spécifique est émis. +- Les **gestionnaires d'événements** s'exécutent lorsqu'un événement spécifique est émis. +- Les **gestionnaires de messages** s'exécutent lorsqu'un message spécifique apparaît. -Basé sur la [documentation officielle de Cosmos](https://docs.cosmos.network/) : +Basé sur la [documentation officielle de Cosmos](https://docs.cosmos.network/) : -> [Les événements](https://docs.cosmos.network/main/core/events) sont des objets qui contiennent des informations sur l'exécution de l'application. Ils sont principalement utilisés par les fournisseurs de services tels que les explorateurs de blocs et les portefeuilles pour suivre l'exécution de divers messages et indexer les transactions. +> Les [Événements](https://docs.cosmos.network/main/core/events) sont des objets qui contiennent des informations sur l'exécution de l'application. Ils sont principalement utilisés par les fournisseurs de services tels que les explorateurs de blocs et les portefeuilles pour suivre l'exécution de divers messages et transactions d'index. -> [Les transactions](https://docs.cosmos.network/main/core/transactions) sont des objets créés par les utilisateurs finaux pour déclencher des changements d'état dans l'application. +> Les [Transactions](https://docs.cosmos.network/main/core/transactions) sont des objets créés par les utilisateurs finaux pour déclencher des changements d'état dans l'application. -> [Les messages](https://docs.cosmos.network/main/core/transactions#messages) sont des objets spécifiques au module qui déclenchent des transitions d'état dans le cadre du module auquel ils appartiennent. +> Les [Messages](https://docs.cosmos.network/main/core/transactions#messages) sont des objets spécifiques au module qui déclenchent des transitions d'état dans le cadre du module auquel ils appartiennent. -Même si toutes les données sont accessibles avec un gestionnaire de blocs, des gestionnaires tiers permettent aux développeurs de subgraphes de traiter les données de manière beaucoup plus précise. +Même si toutes les données sont accessibles avec un gestionnaire de blocs, des gestionnaires tiers permettent aux développeurs de subgraphs de traiter les données de manière beaucoup plus précise. -## Création d'un subgraphe ciblant Cosmos +## Création d'un subgraph ciblant Cosmos -### Dépendances des subgraphes +### Dépendances des subgraphs -[graph-cli](https://github.com/graphprotocol/graph-cli) est un outil CLI pour construire et déployer des subgraphes. La version `>=0.30.0` est nécessaire pour travailler avec les subgraphes Cosmos. +[graph-cli](https://github.com/graphprotocol/graph-cli) est un outil CLI pour construire et déployer des subgraphs. La version `>=0.30.0` est nécessaire pour travailler avec les subgraphs Cosmos. -[graph-ts](https://github.com/graphprotocol/graph-ts) est une bibliothèque de types spécifiques aux subgraphes. La version `>=0.27.0` est nécessaire pour travailler avec les subgraphes Cosmos. +[graph-ts](https://github.com/graphprotocol/graph-ts) est une bibliothèque de types spécifiques aux subgraphs. La version `>=0.27.0` est nécessaire pour travailler avec les subgraphs Cosmos. -### Subgraph Main Components +### Composants principaux du subgraph -La définition d'un subgraphe comporte trois éléments clés : +La définition d'un subgraph comporte trois éléments clés : -**subgraph.yaml** : un fichier YAML contenant le manifeste du subgraphe, qui identifie les événements à suivre et la façon de les traiter. +**subgraph.yaml** : un fichier YAML contenant le manifeste du subgraph, qui identifie les événements à suivre et la façon de les traiter. -**schema.graphql** : un schéma GraphQL qui définit quelles données sont stockées pour votre subgraphe, et comment les interroger via GraphQL. +**schema.graphql** : un schéma GraphQL qui définit quelles données sont stockées pour votre subgraph, et comment les interroger via GraphQL. -**Mappings AssemblyScript** : Code [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) qui traduit les données de la blockchain vers les entités définies dans votre schéma. +**Mappings AssemblyScript** : Code [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) qui a traduit les données de la blockchain vers les entités définies dans votre schéma. -### Définition du manifeste du subgraphe +### Définition du manifeste du subgraph -Le manifeste du subgraphe (`subgraph.yaml`) identifie les sources de données du subgraphe, les déclencheurs d'intérêt et les fonctions (`handlers`) qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraphe pour un subgraphe Cosmos : +Le manifeste du subgraph (`subgraph.yaml`) identifie les sources de données du subgraph, les déclencheurs d'intérêt et les fonctions (`handlers`) qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraph pour un subgraph Cosmos : ```yaml -specVersion: 0.0.5 -description: Cosmos Subgraph Example -schema: - file: ./schema.graphql # lien vers le fichier de schema -dataSources: - - kind: cosmos - name: CosmosHub - network: cosmoshub-4 # Cela changera pour chaque blockchain basée sur Cosmos. Dans ce cas, l'exemple utilise le mainnet du Hub Cosmos. +version spec: 0.0.5 +description: Exemple de subgraph Cosmos +schéma: + fichier: ./schema.graphql # lien vers le fichier de schéma +les sources de données: + - genre: cosmos + nom: CosmosHub + réseau: cosmoshub-4 # Cela changera pour chaque blockchain basée sur le cosmos. Dans ce cas, l’exemple utilise le mainnet Cosmos Hub. source: - startBlock: 0 # Requis pour Cosmos, définissez-le à 0 pour commencer l'indexation à partir de la genèse de la chaîne. - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - blockHandlers: - - handler: handleNewBlock # le nom de la fonction dans le fichier de mapping + startBlock: 0 # Requis pour Cosmos, définissez-le sur 0 pour démarrer l'indexation à partir de la genèse de la chaîne + cartographie: + Version api: 0.0.7 + langage: wasm/assemblyscript + gestionnaires de blocs: + - handler: handleNewBlock # le nom de la fonction dans le fichier de mappage Gestionnaires d'événements: - - event: rewards # le type d'événement qui sera traité - handler: handleReward # le nom de la fonction dans le fichier de mapping - transactionHandlers: - - handler: handleTransaction # le nom de la fonction dans le fichier de mapping - messageHandlers: - - message: /cosmos.staking.v1beta1.MsgDelegate # le type de message - handler: handleMsgDelegate # le nom de la fonction dans le fichier de mapping - file: ./src/mapping.ts # lien vers le fichier contenant les mappages d'Assemblyscript + - event: récompenses # le type d'événement qui sera géré + handler: handleReward # le nom de la fonction dans le fichier de mappage + Gestionnaires de transactions: + - handler: handleTransaction # le nom de la fonction dans le fichier de mappage + Gestionnaires de messages: + - message: /cosmos.staking.v1beta1.MsgDelegate # le type d'un message + handler: handleMsgDelegate # le nom de la fonction dans le fichier de mappage + fichier: ./src/mapping.ts # lien vers le fichier avec les mappages Assemblyscript ``` -- Les subgraphes cosmos introduisent un nouveau `type` de source de données (`cosmos`). -- Le `réseau` doit correspondre à une chaîne dans l'écosystème Cosmos. Dans l'exemple, le réseau principal du hub Cosmos est utilisé. +- Les subgraphs cosmos introduisent un nouveau `type` de source de données (`cosmos`). +- Le `réseau` doit correspondre à une chaîne de l'écosystème Cosmos. Dans l’exemple, le mainnet Cosmos Hub est utilisé. -### Schema Definition +### Définition de schéma -La définition du schéma décrit la structure de la base de données de subgraphes résultante et les relations entre les entités. Elle est indépendante de la source de données d'origine. Vous trouverez plus de détails sur la définition du schéma des subgraphes [ici](/developing/creating-a-subgraph/#the-graph-ql-schema). +La définition du schéma décrit la structure de la base de données de subgraphs résultante et les relations entre les entités. Elle est indépendante de la source de données d'origine. Vous trouverez plus de détails sur la définition du schéma des subgraphs [ici](/developing/creating-a-subgraph/#the-graph-ql-schema). -### Mappages AssemblyScript +### Cartographies AssemblyScript -Les gestionnaires de traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). +Les gestionnaires pour le traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). -L'indexation Cosmos introduit des types de données spécifiques à Cosmos dans [l'API AssemblyScript](/developing/assemblyscript-api/). +L'indexation Cosmos introduit des types de données spécifiques à Cosmos dans l'[API AssemblyScript](/developing/assemblyscript-api/). ```tsx class Block { header: Header - evidence: EvidenceList + preuve: Liste de preuves resultBeginBlock: ResponseBeginBlock resultEndBlock: ResponseEndBlock transactions: Array @@ -163,97 +163,97 @@ class Any { } ``` -Chaque type de gestionnaire est accompagné de sa propre structure de données qui est transmise comme argument à une fonction de mappage. +Chaque structure de type de gestionnaire transmise en tant qu'argument à une fonction de mappage. -- Les gestionnaires de bloc reçoivent le type de `Block`. -- Les gestionnaires d'événements reçoivent le type `EventData`. -- Les gestionnaires de transactions reçoivent le type `TransactionData`. -- Les gestionnaires de messages reçoivent le type `MessageData`. +- Les gestionnaires de blocs reçoivent le type `Block`. +- Les gestionnaires d'événements recevront le type `EventData`. +- Les gestionnaires de transactions recevront le type `TransactionData`. +- Les gestionnaires de messages recevront le type `MessageData`. En tant que partie de `MessageData`, le gestionnaire de messages reçoit un contexte de transaction, qui contient les informations les plus importantes sur une transaction qui englobe un message. Le contexte de transaction est également disponible dans le type `EventData`, mais uniquement lorsque l'événement correspondant est associé à une transaction. En outre, tous les gestionnaires reçoivent une référence à un bloc (`HeaderOnlyBlock`). -Vous pouvez trouver la liste complète des types pour l'intégration Cosmos [ici](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Vous trouverez la liste complète des types pour l'intégration Cosmos [ici](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). -### Décryptage des messages +### Décodage des messages -Il est important de noter que les messages Cosmos sont spécifiques à une chaîne et qu'ils sont transmis à un sous-graphe sous la forme d'une charge utile sérialisée [Protocol Buffers](https://developers.google.com/protocol-buffers/). Par conséquent, les données du message doivent être décodées dans une fonction de mappage avant de pouvoir être traitées. +Il est important de noter que les messages Cosmos sont spécifiques à la chaîne et qu'ils sont transmis à un subgraph sous la forme d'une charge utile sérialisée [Protocol Buffers](https://developers.google.com/protocol-buffers/). Par conséquent, les données du message doivent être décodées dans une fonction de mappage avant de pouvoir être traitées. -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Un exemple de décodage des données d'un message dans un subgraph peut être trouvé [ici](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). -## Création et construction d'un subgraphe Cosmos +## Création et construction d'un subgraph Cosmos -La première étape avant de commencer à écrire les mappings du subgraphes est de générer les liaisons de type basées sur les entités qui ont été définies dans le fichier schéma du subgraphe (`schema.graphql`). Cela permettra aux fonctions de mappage de créer de nouveaux objets de ces types et de les enregistrer dans le magasin. Ceci est fait en utilisant la commande CLI `codegen` : +La première étape avant de commencer à écrire les mappings du subgraphs est de générer les liaisons de type basées sur les entités qui ont été définies dans le fichier schéma du subgraph (`schema.graphql`). Cela permettra aux fonctions de mappage de créer de nouveaux objets de ces types et de les enregistrer dans le magasin. Ceci est fait en utilisant la commande CLI `codegen` : ```bash -$ graph codegen +$ codegen graph ``` -Une fois que le mapping est prêt, le subgraphe peut être construit. Cette étape mettra en évidence toute erreur que le manifeste ou le mapping pourraient avoir. Un subgraphe doit être construit avec succès afin d'être déployé sur Graph Node. Ceci est fait en utilisant la commande CLI `build` : +Une fois que le mapping est prêt, le subgraph peut être construit. Cette étape mettra en évidence toute erreur que le manifeste ou le mapping pourraient avoir. Un subgraph doit être construit avec succès afin d'être déployé sur Graph Node. Ceci est fait en utilisant la commande CLI `build` : ```bash -$ graph build +$ construction de graph ``` -## Déploiement d'un subgraphe Cosmos +## Déploiement d'un subgraph Cosmos -Une fois que votre subgraphe a été créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` après avoir exécuté la commande CLI `graph create` : +Une fois que votre subgraph a été créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` après avoir exécuté la commande CLI `graph create` : -**Service hébergé** +**Services hébergé** ```bash -graph create account/subgraph-name --product hosted-service +graphique créer un compte/nom-subgraph --product hébergé-service ``` ```bash -graph deploy account/subgraph-name --product hosted-service +compte de déploiement graph/nom-subgraph --product hébergé-service ``` -**Nœud The Graph local (basé sur la configuration par défaut) :** +**Nœud graph local (basé sur la configuration par défaut) :** ```bash -graph create subgraph-name --node http://localhost:8020 +graph créer nom-subgraph --node http://localhost:8020 ``` ```bash -graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 +graph déployer nom-subgraph --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -## Interroger un subgraphe de Cosmos +## Interroger un subgraph de Cosmos -Le point de terminaison GraphQL pour les subgraphes Cosmos est déterminé par la définition du schéma, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api/) pour plus d'informations. +Le point de terminaison GraphQL pour les subgraphs Cosmos est déterminé par la définition du schéma, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api/) pour plus d'informations. -## Blockchains Cosmos supportées +## Blockchains Cosmos prises en charge -### Le hub Cosmos +### Cosmos Hub -#### Qu'est-ce que Cosmos Hub ? +#### Qu'est-ce qu'un Cosmos Hub ? -La [blockchain Cosmos Hub](https://hub.cosmos.network/) est la première blockchain de l'écosystème [Cosmos](https://cosmos.network/). Vous pouvez consulter la [documentation officielle](https://docs.cosmos.network/) pour de plus amples informations. +La [blockchain Cosmos Hub](https://hub.cosmos.network/) est la première blockchain du [Cosmos](https://cosmos.network/) écosystème. Vous pouvez visiter la [documentation officielle](https://docs.cosmos.network/) pour plus d'informations. -#### Réseaux +#### Les Réseaux -Le réseau principal de Cosmos Hub est `cosmoshub-4`. Le réseau de test actuel de Cosmos est `theta-testnet-001`.
Les autres réseaux de Cosmos Hub, à savoir `cosmoshub-3`, sont arrêtés, par conséquent aucune donnée n'est fournie pour eux. +Le mainnet de Cosmos Hub est `cosmoshub-4`. Le réseau de test actuel de Cosmos est `theta-testnet-001`.
Les autres réseaux de Cosmos Hub, à savoir `cosmoshub-3`, sont arrêtés, par conséquent aucune donnée n'est fournie pour eux. ### Osmosis -> Le support d'Osmosis dans Graph Node et sur le service hébergé est en version bêta : veuillez contacter l'équipe The Graph pour toute question sur la construction de subgraphes Osmosis ! +> Le support d'Osmosis dans Graph Node et sur le service hébergé est en version bêta : veuillez contacter l'équipe The Graph pour toute question sur la construction de subgraphs Osmosis ! -#### Qu'est-ce qu'Osmosis ? +#### Qu’est-ce que l’osmose ? -[Osmosis](https://osmosis.zone/) est un protocole de teneur de marché automatisé (AMM) décentralisé et inter-chaînes construit sur le SDK de Cosmos. Il permet aux utilisateurs de créer des pools de liquidité personnalisés et de négocier des jetons compatibles avec le CIB. Vous pouvez consulter la [documentation officielle](https://docs.osmosis.zone/) pour de plus amples d'informations. +[Osmosis](https://osmosis.zone/) est un protocole de teneur de marché automatisé (AMM) décentralisé et inter-chaînes construit sur le SDK Cosmos. Il permet aux utilisateurs de créer des pools de liquidités personnalisés et d'échanger des jetons compatibles IBC. Vous pouvez visiter la [documentation officielle](https://docs.osmosis.zone/) pour plus d'informations. -#### Réseaux +#### Les Réseaux -Le réseau principal d'Osmosis est `osmosis-1`. Le réseau de test actuel d'Osmosis est `osmo-test-4`. +Le mainnet d'Osmosis est `osmosis-1`. Le testnet actuel d'Osmosis est `osmo-test-4`. -## Exemples de subgraphes +## Exemples de subgraphs -Voici quelques exemples de subgraphes pour référence : +Voici quelques exemples de subgraphs pour référence : -[Exemple de filtrage par blocs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Exemple de filtrage de blocs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Exemple de récompenses de validateurs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Exemple de récompenses du valideur](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Exemple de délégation de validateurs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Exemple de délégations de validateurs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Exemple d'échange de jetons Osmosis](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Exemple d'échange de jetons d'osmose](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/fr/cookbook/grafting.mdx b/website/pages/fr/cookbook/grafting.mdx index 9657fd75cf4a..d47ee3fe7d2b 100644 --- a/website/pages/fr/cookbook/grafting.mdx +++ b/website/pages/fr/cookbook/grafting.mdx @@ -1,40 +1,56 @@ --- -title: Remplacer un contrat et conserver son historique grâce au « greffage » +title: Remplacer un contrat tout en conservant son histoire grâce à la greffe --- -Dans ce guide, vous apprendrez à construire et à déployer de nouveaux subgraphes en utilisant le graffage sur des subgraphes existants. +In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. -## Qu'est-ce que la méthode du greffage ? +## Qu'est-ce qu'une greffe ? -C'est une méthode qui réutilise les données d'un subgraphe existant et commence à les indexer à un bloc ultérieur. Elle est utile lors du développement pour contourner rapidement les erreurs simples dans les mappings ou pour remettre temporairement en service un subgraphe existant qui a échoué. Elle peut également être utilisée pour ajouter une fonctionnalité à un subgraphe dont l'indexation depuis la genèse prend un temps considérable. +C'est une méthode qui réutilise les données d'un subgraph existant et commence à les indexer à un bloc ultérieur. Elle est utile lors du développement pour contourner rapidement les erreurs simples dans les mappings ou pour remettre temporairement en service un subgraph existant qui a échoué. Elle peut également être utilisée pour ajouter une fonctionnalité à un subgraphe dont l'indexation depuis la genèse prend un temps considérable. -Le subgraphe greffé peut utiliser un schema GraphQL qui n'est pas identique à celui du subgraphe de base, mais simplement compatible avec lui. Il doit s'agir d'un schema de subgraphe valide en tant que tel, mais il peut s'écarter du schema du subgraphe de base de la manière suivante : +Le subgraph greffé peut utiliser un schema GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec lui. Il doit s'agir d'un schema de subgraph valide en tant que tel, mais il peut s'écarter du schema du subgraph de base de la manière suivante : -- Il ajoute ou supprime des types d'entités -- Il supprime les attributs des types d'entités -- Il ajoute des attributs nuls aux types d'entités -- It turns non-nullable attributes into nullable attributes -- Ajout de valeurs au type énuméré -- Ajout ou suppression d'interfaces -- Elle modifie les types d'entités pour lesquels une interface est mise en œuvre +- Il ajoute ou supprime des types d'entité +- Il supprime les attributs des types d'entité +- Il ajoute des attributs nullables aux types d'entités +- Il transforme les attributs non nullables en attributs nuls +- Cela ajoute des valeurs aux énumérations +- Il ajoute ou supprime des interfaces +- Cela change pour quels types d'entités une interface est implémentée -Pour de plus amples informations, vous pouvez consulter : +Pour plus d’informations, vous pouvez vérifier : -- [La méthode de greffage](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) +- [Greffage](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) -Dans ce tutoriel, nous allons aborder un cas d'utilisation de base. Nous allons remplacer un contrat existant par un contrat identique (avec une nouvelle adresse, mais le même code). Ensuite, nous grefferons le subgraphe existant sur le subgraphe "de base" qui suit le nouveau contrat. +Dans ce tutoriel, nous allons aborder un cas d'utilisation de base. Nous allons remplacer un contrat existant par un contrat identique (avec une nouvelle adresse, mais le même code). Ensuite, nous grefferons le subgraph existant sur le subgraph "de base" qui suit le nouveau contrat. -## Construction à partir d'un subgraphe existant +## Remarque importante sur le greffage lors de la mise à niveau vers le réseau -La construction de subgraphes est une partie essentielle de The Graph, décrite plus en profondeur [ici](http://localhost:3000/en/cookbook/quick-start/). Pour pouvoir construire et déployer le subgraphe existant utilisé dans ce tutoriel, le répertoire suivant est fourni : +> **Attention** : si vous mettez à niveau votre subgraph depuis Subgraph Studio ou le service hébergé vers le réseau décentralisé, il est fortement recommandé d'éviter d'utiliser le greffage pendant le processus de mise à niveau. -- [Référentiel d'exemples de subgraphes](https://github.com/t-proctor/grafting-tutorial) +### Pourquoi est-ce important? -> Remarque : le contrat utilisé dans le subgraphe a été tiré de ce [kit de démarrage pour hackathon](https://github.com/schmidsi/hackathon-starterkit). +La greffe est une fonctionnalité puissante qui permet de "greffer" un subgraph sur un autre, transférant ainsi les données historiques du subgraph existant vers une nouvelle version. Bien qu'il s'agisse d'un moyen efficace de préserver les données et de gagner du temps sur l'indexation, la greffe peut introduire des complexités et des problèmes potentiels lors de la migration d'un environnement hébergé vers le réseau décentralisé. Il n'est pas possible de greffer un subgraph du Graph Network vers le service hébergé ou le Subgraph Studio. -## Définition du manifeste du subgraphe +### Les meilleures pratiques -Le manifeste du subgraphe `subgraph.yaml` identifie les sources de données pour le subgraphe, les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraphe que vous pourrez utiliser : +**Migration initiale** : lorsque vous déployez pour la première fois votre subgraph sur le réseau décentralisé, faites-le sans greffe. Assurez-vous que le subgraph est stable et fonctionne comme prévu. + +**Mises à jour ultérieures** : une fois que votre subgraph est actif et stable sur le réseau décentralisé, vous pouvez utiliser le greffage pour les versions futures afin de rendre la transition plus fluide et de préserver les données historiques. + +En respectant ces lignes directrices, vous minimisez les risques et vous vous assurez que le processus de migration se déroule sans heurts. + +## Création d'un subgraph existant + +La construction de subgraphs est une partie essentielle de The Graph, décrite plus en profondeur [ici](http://localhost:3000/en/cookbook/quick-start/). Pour pouvoir construire et déployer le subgraph existant utilisé dans ce tutoriel, le répertoire suivant est fourni : + +- [Exemple de subgraph repo](https://github.com/t-proctor/grafting-tutorial) + +> Remarque : le contrat utilisé dans le subgraph a été tiré de ce [kit de démarrage pour hackathon](https://github.com/schmidsi/hackathon-starterkit). + +## Définition du manifeste du subgraph + +Le manifeste du subgraph `subgraph.yaml` identifie les sources de données pour le subgraph, les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Vous trouverez ci-dessous un exemple de manifeste de subgraph que vous pourrez utiliser : ```yaml specVersion: 0.0.4 @@ -63,33 +79,33 @@ dataSources: file: ./src/lock.ts ``` -- La source de données `Lock` est l'abi et l'adresse du contrat que nous obtiendrons lorsque nous compilerons et déploierons le contrat -- Le réseau doit correspondre à un réseau indexé qui est interrogé. Comme nous fonctionnons sur le réseau teste Goerli, le réseau est `goerli` -- La section `mapping` définit les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Dans ce cas, nous écoutons l'événement `Withdrawal` et appelons la fonction `handleWithdrawal` lorsqu'il est émis. +- La source de données `Lock` est l'adresse abi et le contrat que nous obtiendrons lorsque nous compilerons et déploierons le contrat +- Le réseau doit correspondre à un réseau indexé interrogé. Puisque nous utilisons Goerli testnet, le réseau est `goerli` +- La section `mapping` définit les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Dans ce cas, nous écoutons l'événement `Withdrawal` et appelons la fonction `handleWithdrawal` lorsqu'elle est émise. -## Définition du manifeste de greffage +## Définition de manifeste de greffage -Le greffage nécessite l'ajout de deux nouveaux éléments au manifeste du subgraphe original : +Le greffage nécessite l'ajout de deux nouveaux éléments au manifeste du subgraph original : ```yaml --- -features: - - grafting # nom de la fonctionnalité -graft: - base: Qm... # identifiant du subgraphe de base - block: 1502122 # numéro du bloc +caractéristiques: + - greffage # nom de l'élément +greffe: + base: Qm... # ID du subgraph de base + bloc: 1502122 # numéro du bloc ``` -- `features` : est une liste de tous [les noms de caractéristiques](developing/creating-a-subgraph/#experimental-features) utilisés. -- `graft` : est une carte du subgraphe de `base` et du bloc à greffer. Le `bloc` est le numéro de bloc à partir duquel l'indexation doit commencer. Le graphique copiera les données du subgraphe de base jusqu'au bloc donné inclus, puis continuera à indexer le nouveau subgraphe à partir de ce bloc. +- `features :` est une liste de tous les [noms de fonctionnalités](developing/creating-a-subgraph/#experimental-features) utilisés. +- `graft :` est une carte du subgraph `base` et du bloc sur lequel greffer. Le `block` est le numéro de bloc à partir duquel commencer l'indexation. Le graph copiera les données du subgraph de base jusqu'au bloc donné inclus, puis continuera à indexer le nouveau subgraph à partir de ce bloc. -Les valeurs de `base` et de `bloc` peuvent être trouvées en déployant deux subgraphes : un pour l'indexation de base et un avec la méthode du greffage +Les valeurs de `base` et de `bloc` peuvent être trouvées en déployant deux subgraphs : un pour l'indexation de base et un avec la méthode du greffage -## Déploiement du subgraphe de base +## Déploiement du subgraph de base -1. Allez dans l'[interface Graph Studio](https://thegraph.com/studio/) et créez un sous-graphe sur Goerli testnet appelé `graft-example` -2. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre sous-graphe dans le dossier `graft-example` du dépôt -3. Une fois terminé, vérifiez que le subgraphe s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground +1. Allez dans l'[interface Graph Studio](https://thegraph.com/studio/) et créez un subgraph sur Goerli testnet appelé `graft-example` +2. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre subgraph dans le dossier `graft-example` du dépôt +3. Une fois terminé, vérifiez que le subgraph s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground ```graphql { @@ -101,86 +117,86 @@ Les valeurs de `base` et de `bloc` peuvent être trouvées en déployant deux su } ``` -Il devrait renvoyer le résultat suivant : +Cela renvoie quelque chose comme ceci : ``` { - "data": { - "withdrawals": [ - { - "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", - "amount": "0", - "when": "1664367528" - }, - { - "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", - "amount": "0", - "when": "1664367648" - } - ] - } + "données": { + "retraits": [ + { + "identifiant": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", + "montant": "0", + "quand": "1664367528" + }, + { + "identifiant": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", + "montant": "0", + "quand": "1664367648" + } + ] + } } ``` -Une fois que vous avez vérifié que le subgraphe s'indexe correctement, vous pouvez rapidement le mettre à jour grâce à la méthode du graffage. +Une fois que vous avez vérifié que le subgraph s'indexe correctement, vous pouvez rapidement le mettre à jour grâce à la méthode du graffage. -## Déploiement du subgraphe greffé +## Déploiement du subgraph greffé -Le greffon de remplacement subgraph.yaml aura une nouvelle adresse de contrat. Cela peut arriver lorsque vous mettez à jour votre application décentralisé, redéployez un contrat, etc. +Le subgraph.yaml de remplacement du greffon aura une nouvelle adresse de contrat. Cela peut arriver lorsque vous mettez à jour votre dapp, redéployez un contrat, etc. -1. Allez dans l'[interface Graph Studio](https://thegraph.com/studio/) et créez un subgraphe sur le teste de Goerli appelé `graft-replacement` -2. Créez un nouveau manifeste. Le `subgraph.yaml` pour `graph-replacement` contient une adresse de contrat différente et de nouvelles informations sur la façon dont il doit se greffer. C'est le `bloc` du [dernier événement émis](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) qui a de l'intérêt par l'ancien contrat et la `base` de l'ancien sous-graphe. L'identification du subgraphe de `base` est la même que l'`identifiant de déploiement` de votre subgraphe d'origine `graph-example`. Vous pouvez la trouver dans l'interface utilisateur de Graph Studio. -3. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre subgraphe dans le dossier `graft-replacement` du répertoire -4. Une fois terminé, vérifiez que le subgraphe s'indexe correctement. Si vous exécutez la commande suivante dans The Graph Playground +1. Allez dans l'[interface Graph Studio](https://thegraph.com/studio/) et créez un subgraph sur le teste de Goerli appelé `graft-replacement` +2. Créez un nouveau manifeste. Le `subgraph.yaml` pour `graph-replacement` contient une adresse de contrat différente et de nouvelles informations sur la façon dont il doit se greffer. C'est le `bloc` du [dernier événement émis](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) qui a de l'intérêt par l'ancien contrat et la `base` de l'ancien subgraph. L'identification du subgraph de `base` est la même que l'`identifiant de déploiement` de votre subgraph d'origine `graph-example`. Vous pouvez la trouver dans l'interface utilisateur de Graph Studio. +3. Suivez les instructions de la section `AUTH& DEPLOY` sur la page de votre subgraph dans le dossier `graft-replacement` du répertoire +4. Une fois cette opération terminée, vérifiez que le subgraph est correctement indexé. Si vous exécutez la commande suivante dans The Graph Playground ```graphql { withdrawals(first: 5) { id - amount - when + montant + quand } } ``` -Il devrait renvoyer le résultat suivant : +Le résultat devrait être le suivant : ``` { "data": { "withdrawals": [ { - "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", - "amount": "0", - "when": "1664367528" + "id" : "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", + "montant" : "0", + "when" : "1664367528" }, { - "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", - "amount": "0", - "when": "1664367648" + "id" : "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", + "montant" : "0", + "when" : "1664367648" }, { - "id": "0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3-22", - "amount": "0", - "when": "1664371512" + "id" : "0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3-22", + "montant" : "0", + "when" : "1664371512" } ] } } ``` -Vous pouvez voir que le `subgraphe de remplacement` est indexé à partir des données plus anciennes du `graphe-exemple` et des données plus récentes de la nouvelle adresse du contrat. Le contrat original a émis deux événements de `retrait`, [événement 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) et [événement 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). Le nouveau contrat a émis un `retrait` après l'[événement 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). Les deux transactions précédemment indexées (événement 1 et événement 2) et la nouvelle transaction (événement 3) ont été combinées ensemble dans le subgraphe de `graft-replacemen`. +Vous pouvez voir que le `subgraph de remplacement` est indexé à partir des données plus anciennes du `graphe-exemple` et des données plus récentes de la nouvelle adresse du contrat. Le contrat original a émis deux événements de `retrait`, [événement 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) et [événement 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). Le nouveau contrat a émis un `retrait` après l'[événement 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). Les deux transactions précédemment indexées (événement 1 et événement 2) et la nouvelle transaction (événement 3) ont été combinées ensemble dans le subgraph de `graft-replacemen`. -Félicitations ! Vous avez réussi le greffage d'un subgraphe sur un autre. +Félicitations ! Vous avez réussi le greffage d'un subgraph sur un autre. -## Ressources supplémentaires +## Ressources complémentaires -Si vous voulez plus d'expérience avec le greffage, voici quelques exemples de contrats populaires : +Si vous souhaitez acquérir plus d'expérience en matière de greffes, voici quelques exemples de contrats populaires : - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3/protocols/uniswap-v3/config/templates/uniswap.v3.template.yaml), -Pour améliorer ses connaissances en la matière, envisagez d'apprendre d'autres façons de gérer les changements dans les sources de données sous-jacentes. Des alternatives telles que les [modèles de sources de données](developing/creating-a-subgraph/#data-source-templates) permettent d'obtenir des résultats similaires +Pour devenir encore plus expert en Graph, envisagez de découvrir d’autres façons de gérer les modifications dans les sources de données sous-jacentes. Des alternatives telles que les [Modèles de source de données](developing/creating-a-subgraph/#data-source-templates) peuvent obtenir des résultats similaires -> Remarque : Une grande partie de cet article a été reprise de l'[article Arweave](/cookbook/arweave/) publié précédemment +> Notez : Une grande partie de cet article a été reprise de l'[article Arweave](/cookbook/arweave/) publié précédemment diff --git a/website/pages/fr/cookbook/near.mdx b/website/pages/fr/cookbook/near.mdx index d2be20439d4f..a216b21b4fcf 100644 --- a/website/pages/fr/cookbook/near.mdx +++ b/website/pages/fr/cookbook/near.mdx @@ -1,284 +1,284 @@ --- -title: Construction de subgraphes sur NEAR +title: Construction de subgraphs sur NEAR --- -> Le support de NEAR dans Graph Node et sur le Service Hébergé est en version bêta : veuillez contacter near@thegraph.com pour toute question concernant la construction de subgraphes NEAR ! +> La prise en charge de NEAR dans Graph Node et sur le service hébergé est en version bêta : veuillez contacter near@thegraph.com pour toute question concernant la construction de subgraphs NEAR ! -Ce guide est une introduction à la construction de subgraphes indexant des contrats intelligents sur la [blockchain NEAR](https://docs.near.org/). +Ce guide est une introduction à la construction de subgraphs indexant des contrats intelligents sur la [blockchain NEAR](https://docs.near.org/). -## Qu'est-ce que NEAR ? +## Que signifie NEAR ? -[NEAR](https://near.org/) est une plateforme de contrats intelligents permettant de créer des applications décentralisées. Visitez la [documentation officielle](https://docs.near.org/docs/concepts/new-to-near) pour plus d'informations. +[NEAR](https://near.org/) est une plateforme de contrats intelligents permettant de créer des applications décentralisées. Consultez la [documentation officielle](https://docs.near.org/docs/concepts/new-to-near) pour plus d'informations. -## Que sont les subgraphes NEAR ? +## Que sont les subgraphs NEAR ? -Le Graph donne aux développeurs des outils pour traiter les événements de la blockchain et rendre les données résultantes facilement disponibles via une API GraphQL, connue individuellement comme un subgraphe. [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements NEAR, ce qui signifie que les développeurs NEAR peuvent désormais construire des subgraphes pour indexer leurs smart contracts. +Le Graph donne aux développeurs des outils pour traiter les événements de la blockchain et rendre les données résultantes facilement disponibles via une API GraphQL, connue individuellement comme un subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) est désormais capable de traiter les événements NEAR, ce qui signifie que les développeurs NEAR peuvent désormais construire des subgraphs pour indexer leurs smart contracts. -Les subgraphes sont basés sur des événements, ce qui signifie qu'ils écoutent et traitent les événements de la chaîne. Il existe actuellement deux types de gestionnaires pour les subgraphes NEAR : +Les subgraphs sont basés sur des événements, ce qui signifie qu'ils écoutent et traitent les événements de la chaîne. Il existe actuellement deux types de gestionnaires pour les subgraphs NEAR : -- Block handlers : ils sont exécutés à chaque nouveau bloc -- Receipt handlers: ils sont exécutés chaque fois qu'un message est exécuté sur un compte spécifié +- Gestionnaires de blocs : ceux-ci sont exécutés à chaque nouveau bloc +- Gestionnaires de reçus : exécutés à chaque fois qu'un message est exécuté sur un compte spécifié -Extrait de la [documentation NEAR](https://docs.near.org/docs/concepts/transaction#receipt) : +[Dans la documentation NEAR](https://docs.near.org/docs/concepts/transaction#receipt) : -> Un Reçu est le seul objet actionnable dans le système. Lorsque nous parlons de "traitement d'une transaction" sur la plate-forme NEAR, cela signifie éventuellement "application de reçus" à un moment donné. +> Un reçu est le seul objet actionnable dans le système. Lorsque nous parlons de "traitement d'une transaction" sur la plateforme NEAR, cela signifie en fin de compte "appliquer des reçus" à un moment ou à un autre. -## Construction d'un subgraphe NEAR +## Construction d'un subgraph NEAR -`@graphprotocol/graph-cli` est un outil en ligne de commande pour construire et déployer des subgraphes. +`@graphprotocol/graph-cli` est un outil en ligne de commande pour construire et déployer des subgraphs. -`@graphprotocol/graph-ts` est une bibliothèque de types spécifiques aux subgraphes. +`@graphprotocol/graph-ts` est une bibliothèque de types spécifiques aux subgraphs. -Le développement du subgraphe NEAR nécessite `graph-cli` à partir de la version `0.23.0` et `graph-ts` à partir de la version `0.23.0`. +Le développement du subgraph NEAR nécessite `graph-cli` à partir de la version `0.23.0` et `graph-ts` à partir de la version `0.23.0`. -> La construction d'un subgraphe NEAR est très similaire à la construction d'un subgraphe qui indexe Ethereum. +> La construction d'un subgraph NEAR est très similaire à la construction d'un subgraph qui indexe Ethereum. -La définition d'un subgraphe comporte trois aspects : +La définition d'un subgraph comporte trois aspects : -**subgraph.yaml** : le manifeste du subgraphe, définissant les sources de données d'intérêt et la manière dont elles doivent être traitées. NEAR est un nouveau `type` de source de données. +**subgraph.yaml** : le manifeste du subgraph, définissant les sources de données d'intérêt et la manière dont elles doivent être traitées. NEAR est un nouveau `type` de source de données. -**schema.graphql** : un fichier de schéma qui définit quelles données sont stockées pour votre subgraphe, et comment les interroger via GraphQL. Les exigences pour les subgraphes NEAR sont couvertes par la [documentation existante](/developing/creating-a-subgraph#the-graphql-schema). +**schema.graphql** : un fichier de schéma qui définit quelles données sont stockées pour votre subgraph, et comment les interroger via GraphQL. Les exigences pour les subgraphs NEAR sont couvertes par la [documentation existante](/developing/creating-a-subgraph#the-graphql-schema). -**Mappings AssemblyScript :**[ Code AssemblyScript](/developing/assemblyscript-api) qui traduit les données de l'événement en entités définies dans votre schéma. Le support de NEAR introduit des types de données spécifiques à NEAR et une nouvelle fonctionnalité d'analyse JSON. +**Mappages AssemblyScript :** [Code AssemblyScript](/developing/assemblyscript-api) qui traduit les données d'événement en entités définies dans votre schéma. La prise en charge de NEAR introduit des types de données spécifiques à NEAR et une nouvelle fonctionnalité d'analyse JSON. -Lors du développement du subgraphe, il y a deux commandes clés : +Lors du développement du subgraph, il y a deux commandes clés : ```bash $ graph codegen # génère des types à partir du fichier de schéma identifié dans le manifeste -$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphes dans un dossier /build +$ graph build # génère le Web Assembly à partir des fichiers AssemblyScript, et prépare tous les fichiers de subgraphs dans un dossier /build ``` -### Définition du manifeste du subgraphe +### Définition du manifeste du subgraph -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +Le manifeste de subgraph (`subgraph.yaml`) identifie les sources de données pour le subgraph, les déclencheurs d'intérêt et les fonctions qui doivent être exécutées en réponse à ces déclencheurs. Voici un exemple de manifeste de subgraph pour un subgraph NEAR: ```yaml specVersion: 0.0.2 schema: - file: ./src/schema.graphql # link to the schema file + file: ./src/schema.graphql # lien vers le fichier de schéma dataSources: - kind: near network: near-mainnet source: - account: app.good-morning.near # This data source will monitor this account - startBlock: 10662188 # Required for NEAR + account: app.good-morning.near # Cette source de données surveillera ce compte + startBlock: 10662188 # Requis pour NEAR mapping: apiVersion: 0.0.5 language: wasm/assemblyscript blockHandlers: - - handler: handleNewBlock # the function name in the mapping file + - handler: handleNewBlock # le nom de la fonction dans le fichier de mapping receiptHandlers: - - handler: handleReceipt # the function name in the mapping file - file: ./src/mapping.ts # link to the file with the Assemblyscript mappings + - handler: handleReceipt # le nom de la fonction dans le fichier de mappage + file: ./src/mapping.ts # lien vers le fichier contenant les mappings Assemblyscript ``` -- Les subgraphes NEAR introduisent un nouveau `type` de source de données (`near`) -- Le `réseau` doit correspondre à un réseau sur le nœud The Graph d'hébergement. Sur le Service Hébergé, le réseau principal de NEAR est `near-mainnet`, et le réseau de test de NEAR est `near-testnet` -- Les sources de données NEAR introduisent un champ facultatif `source.account`, qui est un identifiant lisible par l'homme correspondant à un [compte NEAR](https://docs.near.org/docs/concepts/account). Il peut s'agir d'un compte ou d'un sous-compte. -- Les sources de données NEAR introduisent un autre champ facultatif `source.accounts`, qui contient des suffixes et des préfixes facultatifs. Au moins le préfixe ou le suffixe doit être spécifié, ils correspondront à tout compte commençant ou finissant par la liste de valeurs respectivement. L'exemple ci-dessous correspondrait à : `[app|good].* [morning.near|morning.testnet]`. Si seule une liste de préfixes ou de suffixes est nécessaire, l'autre champ peut être omis. +- Les subgraphs NEAR introduisent un nouveau `type` de source de données (`near`) +- Le `réseau` doit correspondre à un réseau sur le nœud The Graph d'hébergement. Sur le Service Hébergé, le mainnet de NEAR est `near-mainnet`, et le réseau de test de NEAR est `near-testnet` +- Les sources de données NEAR introduisent un champ facultatif `source.account`, qui est un identifiant lisible par l'homme correspondant à un [ Compte NEAR](https://docs.near.org/docs/concepts/account). Cela peut être un compte ou un sous-compte. +- Les sources de données NEAR introduisent un champ `source.accounts` facultatif alternatif, qui contient des suffixes et des préfixes facultatifs. Au moins un préfixe ou un suffixe doit être spécifié, ils correspondront respectivement à n'importe quel compte commençant ou se terminant par la liste de valeurs. L'exemple ci-dessous correspondrait : `[app|good].*[morning.near|morning.testnet]`. Si seule une liste de préfixes ou de suffixes est nécessaire, l'autre champ peut être omis. ```yaml -accounts: - prefixes: - - app - - good +comptes: + préfixes: + - application + - bien suffixes: - - morning.near - - morning.testnet + - matin.près + - matin.testnet ``` -Les sources de données NEAR prennent en charge deux types de gestionnaires : +Les fichiers de données NEAR prennent en charge deux types de gestionnaires : -- `blockHandlers` : exécuté sur chaque nouveau bloc NEAR. Aucun `compte source` n'est requis. -- `receiptHandlers` : exécuté sur chaque reçu dont le `compte source` de la source de données est le destinataire. Notez que seules les correspondances exactes sont traitées ([les sous-comptes](https://docs.near.org/docs/concepts/account#subaccounts) doivent être ajoutés en tant que sources de données indépendantes). +- `blockHandlers` : s'exécute sur chaque nouveau bloc NEAR. Aucun `source.account` n'est requis. +- `receiptHandlers` : exécuté sur chaque reçu dont le `source.account` de la source de données est le destinataire. Notez que seules les correspondances exactes sont traitées (les [sous-comptes](https://docs.near.org/docs/concepts/account#subaccounts) doivent être ajoutés en tant que sources de données indépendantes). -### Définition du schema +### La Définition du schema -La définition du schema décrit la structure de la base de données de subgraphes résultante et les relations entre les entités. Ceci est indépendant de la source de données originale. Vous trouverez plus de détails sur la définition du schema des subgraphe [ici](/developing/creating-a-subgraph#the-graphql-schema). +La définition du schema décrit la structure de la base de données de subgraphs résultante et les relations entre les entités. Ceci est indépendant de la source de données originale. Vous trouverez plus de détails sur la définition du schema des subgraph [ici](/developing/creating-a-subgraph#the-graphql-schema). -### Mappages AssemblyScript +### Les Cartographies d'AssemblyScript -Les gestionnaires de traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). +Les gestionnaires pour le traitement des événements sont écrits en [AssemblyScript](https://www.assemblyscript.org/). -L'indexation NEAR introduit des types de données spécifiques à NEAR dans l'API [AssemblyScript](/developing/assemblyscript-api). +L'indexation NEAR introduit des types de données spécifiques à NEAR dans l'[API AssemblyScript](/developing/assemblyscript-api). ```typescript -class ExecutionOutcome { - gasBurnt: u64, - blockHash: Bytes, - id: Bytes, - logs: Array, - receiptIds: Array, - tokensBurnt: BigInt, - executorId: string, - } - -class ActionReceipt { - predecessorId: string, - receiverId: string, - id: CryptoHash, - signerId: string, - gasPrice: BigInt, - outputDataReceivers: Array, - inputDataIds: Array, - actions: Array, - } - -class BlockHeader { - height: u64, - prevHeight: u64,// Always zero when version < V3 - epochId: Bytes, - nextEpochId: Bytes, - chunksIncluded: u64, - hash: Bytes, - prevHash: Bytes, - timestampNanosec: u64, - randomValue: Bytes, - gasPrice: BigInt, - totalSupply: BigInt, - latestProtocolVersion: u32, - } - -class ChunkHeader { - gasUsed: u64, - gasLimit: u64, - shardId: u64, - chunkHash: Bytes, - prevBlockHash: Bytes, - balanceBurnt: BigInt, - } - -class Block { - author: string, - header: BlockHeader, - chunks: Array, - } - -class ReceiptWithOutcome { - outcome: ExecutionOutcome, - receipt: ActionReceipt, - block: Block, - } +classe ExecutionOutcome { + gaz brûlé : u64, + blockHash : octets, + identifiant : octets, + logs : Array, + Id de réception : tableau , + jetonsBurnt : BigInt, + exécuteurId : chaîne, + } + +classe ActionReceipt { + Id prédécesseur : chaîne, + Id récepteur : chaîne, + identifiant : CryptoHash, + signataire : chaîne, + prix du gaz : BigInt, + OutputDataReceivers : Array, + inputDataIds : tableau, + actions : Tableau, + } + +classe BlockHeader { + taille: u64, + prevHeight : u64,// Toujours zéro lorsque la version < V3 + epochId : octets, + nextEpochId : octets, + morceauxInclus: u64, + hachage : octets, + prevHash : octets, + horodatageNanosec : u64, + randomValue : octets, + prix du gaz : BigInt, + approvisionnement total : BigInt, + dernière version du protocole : u32, + } + +classe ChunkHeader { + gazUtilisé: u64, + limite de gaz : u64, + Id de fragment : u64, + chunkHash : octets, + prevBlockHash : octets, + balanceBurnt : BigInt, + } + +bloc de classe { + auteur : chaîne, + en-tête : BlockHeader, + morceaux : Array, + } + +classe ReçuAvecRésultat { + résultat : ExecutionOutcome, + reçu : ActionReceipt, + bloquer: bloquer, + } ``` -Ces types sont transmis aux gestionnaires de bloc et de facture : +Ces types sont passés au bloc & gestionnaires de reçus : -- Les gestionnaires de bloc recevront un `Block` -- Les gestionnaires de reçus recevront un `ReceiptWithOutcome` +- Les gestionnaires de blocs reçoivent un `Block` +- Les gestionnaires de reçus reçoivent un `ReceiptWithOutcome` -Sinon, le reste de l'[API AssemblyScript](/developing/assemblyscript-api) est disponible pour les développeurs de sous-graphes NEAR pendant l'exécution du mapping. +Sinon, le reste de l'[API AssemblyScript](/developing/assemblyscript-api) est disponible pour les développeurs de subgraphs NEAR pendant l'exécution du mapping. -Cela inclut une nouvelle fonction d'analyse JSON - les logs sur NEAR sont fréquemment émis sous forme de JSON de type string. Une nouvelle fonction `json.fromString(...)` est disponible dans le cadre de l'[API JSON](/developing/assemblyscript-api#json-api) pour permettre aux développeurs de traiter facilement ces logs. +Cela inclut une nouvelle fonction d'analyse JSON - les journaux sur NEAR sont fréquemment émis sous forme de JSON stringifiés. Une nouvelle fonction `json.fromString(...)` est disponible dans le cadre de l'[API JSON](/developing/assemblyscript-api#json-api) pour permettre aux développeurs pour traiter facilement ces journaux. -## Déploiement d'un subgraphe NEAR +## Déploiement d'un subgraph NEAR -Une fois que vous avez construit un subgraphe, il est temps de le déployer sur Graph Node pour l'indexation. Les subgraphes NEAR peuvent être déployés sur n'importe quel nœud The Graph `>=v0.26.x` (cette version n'a pas encore été marquée et publiée). +Une fois que vous avez construit un subgraph, il est temps de le déployer sur Graph Node pour l'indexation. Les subgraphs NEAR peuvent être déployés sur n'importe quel nœud The Graph `>=v0.26.x` (cette version n'a pas encore été marquée & et publiée). -Le Service Hébergé The Graph supporte actuellement l'indexation du réseau principal NEAR et réseau de test en version bêta, avec les noms de réseaux suivants : +Le Service Hébergé The Graph supporte actuellement l'indexation du mainnet NEAR et réseau de test en version bêta, avec les noms de réseaux suivants : - `near-mainnet` - `near-testnet` -Vous trouverez de plus amples d'informations sur la création et le déploiement de subgraphes sur le Service Hébergé [ici](/deploying/deploying-a-subgraph-to-hosted). +Vous trouverez de plus amples d'informations sur la création et le déploiement de subgraphs sur le Service Hébergé [ici](/deploying/deploying-a-subgraph-to-hosted). -En guise d'introduction rapide, la première étape consiste à "créer" votre subgraphe, ce qui ne doit être fait qu'une seule fois. Sur le Service Hébergé, cela peut être fait à partir de votre [tableau de bord](https://thegraph.com/hosted-service/dashboard) : "Ajouter un subgraphe". +En guise d'introduction rapide, la première étape consiste à "créer" votre subgraph, ce qui ne doit être fait qu'une seule fois. Sur le Service Hébergé, cela peut être fait à partir de votre [tableau de bord](https://thegraph.com/hosted-service/dashboard) : "Ajouter un subgraph". -Une fois votre subgraphe créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` : +Une fois votre subgraph créé, vous pouvez le déployer en utilisant la commande CLI `graph deploy` : ```sh -$ graph create --node subgraph/name # creates a subgraph on a local Graph Node (on the Hosted Service, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node subgraph/name # crée un subgraph sur un Graph Node local (sur le Service Hébergé, cela se fait via l'interface utilisateur) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # télécharge les fichiers de construction vers un point de terminaison IPFS spécifié, puis déploie le subgraph vers un Graph Node spécifié en fonction du hash IPFS du manifeste ``` -La configuration du nœud dépend de l'endroit où le subgraphe est déployé. +La configuration du nœud dépend de l'endroit où le subgraph est déployé. -### Service hébergé +### Le Service hébergé ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -### Local Graph Node (based on default configuration) +### Nœud Graph local ( en fonction de la configuration par défaut) ```sh -graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 +graph déployer --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Une fois que votre subgraphe a été déployé, il sera indexé par le nœud The Graph. Vous pouvez vérifier sa progression en interrogeant le sous-graphe lui-même : +Une fois que votre subgraph a été déployé, il sera indexé par le nœud The Graph. Vous pouvez vérifier sa progression en interrogeant le subgraph lui-même : ```graphql +_méta { + bloc { { - _meta { - block { number } } } ``` -### Indexing NEAR with a Local Graph Node +### Indexation NEAR avec un nœud de Graph local -L'exécution d'un nœud The Graph qui indexe NEAR présente les exigences opérationnelles suivantes : +L'exécution d'un nœud de Graph qui indexe NEAR répond aux exigences opérationnelles suivantes : -- Framework NEAR Indexer avec instrumentation Firehose -- Composant(s) de NEAR Firehose -- Nœud The Graph avec point de terminaison Firehose configuré +- Cadre d'indexation NEAR avec instrumentation Firehose +- Composant(s) du NEAR Firehose +- Nœud Gaph avec point d'extrémité Firehose configuré Nous fournirons bientôt plus d'informations sur l'utilisation des composants ci-dessus. -## Interrogation d'un subgraphe NEAR +## Interrogation d'un subgraph NEAR -Le point de terminaison GraphQL pour les subgraphes NEAR est déterminé par la définition du schéma, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api) pour plus d'informations. +Le point de terminaison GraphQL pour les subgraphs NEAR est déterminé par la définition du schéma, avec l'interface API existante. Veuillez consulter la [documentation de l'API GraphQL](/querying/graphql-api) pour plus d'informations. -## Exemples de subgraphes +## Des exemples de subgraphs -Voici quelques exemples de subgraphes pour référence : +Voici quelques exemples de subgraphs pour référence : -[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blocs](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[Des reçus de NEAR](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## FAQ +## QFP -### How does the beta work? +### Comment fonctionne la bêta ? -Le support de NEAR est en version bêta, ce qui signifie qu'il peut y avoir des changements dans l'API alors que nous continuons à travailler sur l'amélioration de l'intégration. Veuillez envoyer un e-mail à near@thegraph.com pour que nous puissions vous aider à construire des subgraphes NEAR et vous tenir au courant des derniers développements ! +Le support de NEAR est en version bêta, ce qui signifie qu'il peut y avoir des changements dans l'API alors que nous continuons à travailler sur l'amélioration de l'intégration. Veuillez envoyer un e-mail à near@thegraph.com pour que nous puissions vous aider à construire des subgraphs NEAR et vous tenir au courant des derniers développements ! -### Can a subgraph index both NEAR and EVM chains? +### Un subgraph peut-il indexer à la fois les chaînes NEAR et EVM ? -Non, un subgraphe ne peut supporter que les sources de données d'une seule chaîne/réseau. +Non, un subgraph ne peut supporter que les sources de données d'une seule chaîne/réseau. -### Can subgraphs react to more specific triggers? +### Les subgraphs peuvent-ils réagir à des déclencheurs plus spécifiques ? -Actuellement, seuls les déclencheurs de type "Block" et "Receipt" sont pris en charge. Nous étudions les déclencheurs pour les appels de fonctions vers un compte spécifique. Nous sommes également intéressés par la prise en charge des déclencheurs d'événements, dès que NEAR disposera d'un support natif pour les événements. +Actuellement, seuls les déclencheurs de blocage et de réception sont pris en charge. Nous étudions les déclencheurs pour les appels de fonction à un compte spécifique. Nous souhaitons également prendre en charge les déclencheurs d'événements, une fois que NEAR disposera d'un support natif pour les événements. -### Will receipt handlers trigger for accounts and their sub-accounts? +### Les gestionnaires de reçus se déclencheront-ils pour les comptes et leurs sous-comptes ? -Si un `compte` est spécifié, il ne correspondra qu'au nom exact du compte. Il est possible de faire correspondre des sous-comptes en spécifiant un champ `comptes`, avec des `suffixes` et des `préfixes` spécifiés pour faire correspondre les comptes et les sous-comptes. Par exemple, la phrase suivante correspondrait à tous les sous-comptes de `mintbase1.near` : +Si un `compte` est spécifié, il correspondra uniquement au nom exact du compte. Il est possible de faire correspondre des sous-comptes en spécifiant un champ `comptes`, avec des `suffixes` et des `préfixes` spécifiés pour faire correspondre les comptes et sous-comptes, par exemple ce qui suit correspondrait à tous les sous-comptes `mintbase1.near` : ```yaml -accounts: +comptes: suffixes: - mintbase1.near ``` ### Can NEAR subgraphs make view calls to NEAR accounts during mappings? -Cette fonctionnalité n'est pas supportée. Nous évaluons actuellement si cette fonctionnalité est nécessaire pour l'indexation. +Cette fonction n'est pas prise en charge. Nous sommes en train d'évaluer si cette fonctionnalité est nécessaire pour l'indexation. -### Can I use data source templates in my NEAR subgraph? +### Puis-je utiliser des modèles de sources de données dans mon subgraph NEAR ? -Cette fonctionnalité n'est actuellement pas supportée et nous évaluons si celle-ci est nécessaire à l'indexation. +Ceci n’est actuellement pas pris en charge. Nous évaluons si cette fonctionnalité est requise pour l'indexation. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Les subgraphs Ethereum supportent les versions "pending" et "current", comment puis-je déployer une version "pending" d'un subgraph NEAR ? -La fonctionnalité "pending" n'est pas encore prise en charge pour les subgraphes NEAR. Dans l'intervalle, vous pouvez déployer une nouvelle version dans un autre subgraphe "named", puis, lorsque celui-ci est synchronisé avec la tête de chaîne, vous pouvez redéployer dans votre sous-graphe principal "named", qui utilisera le même ID de déploiement sous-jacent, de sorte que le subgraphe principal sera instantanément synchronisé. +La fonctionnalité "pending" n'est pas encore prise en charge pour les subgraphs NEAR. Dans l'intervalle, vous pouvez déployer une nouvelle version dans un autre subgraph "named", puis, lorsque celui-ci est synchronisé avec la tête de chaîne, vous pouvez redéployer dans votre subgraph principal "named", qui utilisera le même ID de déploiement sous-jacent, de sorte que le subgraph principal sera instantanément synchronisé. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### Ma question n'a pas reçu de réponse, où puis-je obtenir plus d'aide concernant la création de subgraphs NEAR ? -S'il s'agit d'une question générale sur le développement de subgraphes, vous trouverez de nombreuses informations dans le reste de la [documentation](/cookbook/quick-start) destinée aux développeurs. Sinon, rejoignez le [discord The Graph](https://discord.gg/graphprotocol) et posez votre question dans le canal #near ou envoyez un courriel à near@thegraph.com. +S'il s'agit d'une question générale sur le développement de subgraphs, il y a beaucoup plus d'informations dans le reste de la [Documentation du développeur](/quick-start). Sinon, veuillez rejoindre [The Graph Protocol Discord](https://discord.gg/graphprotocol) et poser votre question sur le canal #near ou par e-mail à near@thegraph.com. -## Références +## Les Références -- [Développement sur NEAR](https://docs.near.org/docs/develop/basics/getting-started) +- [Documentation du développeur NEAR](https://docs.near.org/docs/develop/basics/getting-started) diff --git a/website/pages/fr/cookbook/subgraph-debug-forking.mdx b/website/pages/fr/cookbook/subgraph-debug-forking.mdx index 50e1f8478ace..51212f065b61 100644 --- a/website/pages/fr/cookbook/subgraph-debug-forking.mdx +++ b/website/pages/fr/cookbook/subgraph-debug-forking.mdx @@ -1,102 +1,102 @@ --- -title: Quick and Easy Subgraph Debugging Using Forks +title: Débogage rapide et facile des subgraph à l'aide de Forks --- -Comme c'est le cas pour de nombreux systèmes traitant de grandes quantités de données, les indexeurs The Graph (nœuds du The Graph) peuvent prendre un certain temps pour synchroniser votre subgraphe avec la blockchain cible. Le décalage entre les modifications rapides dans le but de déboguer et les longs temps d'attente nécessaires à l'indexation est extrêmement contre-productif et nous en sommes bien conscients. C'est pourquoi nous introduisons le **subgraph forking**, développé par [LimeChain](https://limechain.tech/), et dans cet article, je vais vous montrer comment cette fonctionnalité peut être utilisée pour accélérer considérablement le débogage des subgraphes ! +Comme c'est le cas pour de nombreux systèmes traitant de grandes quantités de données, les indexeurs The Graph (nœuds du The Graph) peuvent prendre un certain temps pour synchroniser votre subgraph avec la blockchain cible. Le décalage entre les modifications rapides dans le but de déboguer et les longs temps d'attente nécessaires à l'indexation est extrêmement contre-productif et nous en sommes bien conscients. C'est pourquoi nous introduisons le **subgraph forking**, développé par [LimeChain](https://limechain.tech/), et dans cet article, je vais vous montrer comment cette fonctionnalité peut être utilisée pour accélérer considérablement le débogage des subgraphs ! -## Ok, qu'est-ce que c'est ? +## D'accord, qu'est-ce que c'est ? -**Subgraph forking** est le processus qui consiste à récupérer paresseusement des entités du magasin d'un _autre_ subgraphe (généralement un magasin distant). +**Subgraph forking** est le processus qui consiste à récupérer paresseusement des entités du magasin d'un _autre_ subgraph (généralement un magasin distant). -Dans le contexte du débogage, un **subgraphe fork** vous permet de déboguer votre subgraphe défaillant au bloc _X_ sans avoir à attendre la synchronisation avec le bloc _X_. +Dans le contexte du débogage, un **subgraph fork** vous permet de déboguer votre subgraph défaillant au bloc _X_ sans avoir à attendre la synchronisation avec le bloc _X_. -## Ah bon ?! De quelle manière ? +## Quoi ? Comment ? -When you deploy a subgraph to a remote Graph node for indexing and it fails at block _X_, the good news is that the Graph node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +Lorsque vous déployez un subgraphe vers un nœud The Graph distant pour l'indexer et qu'il échoue au bloc _X_, la bonne nouvelle est que le nœud graphique continuera à servir les requêtes GraphQL en utilisant son magasin, qui est synchronisé avec le bloc _X_. C'est génial ! Cela signifie que nous pouvons tirer parti de ce magasin "à jour" pour corriger les bogues survenant lors de l'indexation du bloc _X_. -En bref, nous allons _fork le subgraphe défaillant_ à partir d'un nœud The Graph distant qui est garanti d'avoir le subgraphe indexé jusqu'au bloc _X_ afin de fournir au subgraphe déployé localement et débogué au bloc _X_ une vue à jour de l'état de l'indexation. +En bref, nous allons _fork le subgraph défaillant_ à partir d'un nœud The Graph distant qui est garanti d'avoir le subgraph indexé jusqu'au bloc _X_ afin de fournir au subgraph déployé localement et débogué au bloc _X_ une vue à jour de l'état de l'indexation. -## Je vous en prie, montrez-moi du code ! +## S'il vous plaît, montrez-moi du code ! To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. -Voici les gestionnaires définis pour l'indexation des `Gravatars`, sans le moindre bug : +Voici les gestionnaires définis pour l'indexation des `Gravatar`s, exempts de tout bogue : ```tsx -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id.toHex().toString()) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() +fonction d'exportation handleNewGravatar (événement : NewGravatar) : void { + laissez gravatar = new Gravatar(event.params.id.toHex().toString()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() } -export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let gravatar = Gravatar.load(event.params.id.toI32().toString()) - if (gravatar == null) { - log.critical('Gravatar not found!', []) - return - } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() +fonction d'exportation handleUpdatedGravatar (événement : UpdatedGravatar) : void { + laissez gravatar = Gravatar.load(event.params.id.toI32().toString()) + si (gravatar == null) { + log.critical('Gravatar introuvable !', []) + retour + } + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() } ``` Oops, how unfortunate, when I deploy my perfect looking subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) it fails with the _"Gravatar not found!"_ error. -La méthode habituelle pour tenter de résoudre ce problème est la suivante : +La méthode habituelle pour tenter de résoudre le problème est la suivante : -1. Faire un changement dans la source des mappings, qui, selon vous, résoudra le problème (alors que je sais que ce ne sera pas le cas). -2. Redéployez le sous-graphe vers le [Service Hébergé](https://thegraph.com/hosted-service/) (ou un autre nœud The Graph distant). -3. Attendez qu'il se synchronise. -4. S'il s'interrompt à nouveau, revenez au point 1, sinon... félicitation ! +1. Apportez une modification à la source des mappages, ce qui, selon vous, résoudra le problème (même si je sais que ce ne sera pas le cas). +2. Redéployez le subgraph vers le [Service Hébergé](https://thegraph.com/hosted-service/) (ou un autre nœud The Graph distant). +3. Attendez qu’il soit synchronisé. +4. S'il se casse à nouveau, revenez au point 1, sinon : Hourra ! -Ce processus est en effet assez proche d'un processus de débogage ordinaire, mais il y a une étape qui ralentit terriblement le processus : _3. Attendez qu'il se synchronise._ +C'est en effet assez familier avec un processus de débogage ordinaire, mais il y a une étape qui ralentit horriblement le processus : _3. Attendez qu'il se synchronise._ -En utilisant le **fork pour un subgraphe**, nous pouvons essentiellement éliminer cette étape. Voici à quoi cela ressemble : +En utilisant **le forçage de subgraphs**, nous pouvons essentiellement éliminer cette étape. Voici à quoi cela ressemble : -0. Créez un nœud The Graph local avec le kit de base **_approprié pour effectuer un fork_**. -1. Effectuez un changement dans la source des mappings, qui, selon vous, résoudra le problème. -2. Déployez sur le nœud Graph local, en utilisant le **_fork pour le subgraphe défaillant_** et en **_commençant par le bloc problématique_**. -3. If it breaks again, go back to 1, otherwise: Hooray! +0. Lancez un nœud Graph local avec l'ensemble **_fork-base_** approprié. +1. Apportez une modification à la source des mappings qui, selon vous, résoudra le problème. +2. Déployez sur le nœud Graph local, en utilisant le **_fork pour le subgraph défaillant_** et en **_commençant par le bloc problématique_**. +3. S'il casse à nouveau, revenez à 1, sinon : Hourra ! -Maintenant, vous avez peut-être 2 questions : +Maintenant, vous pouvez avoir 2 questions : -1. fork-base quoi ??? -2. Effectuer un fork pour qui ?! +1. base de fourche quoi ??? +2. Fourcher qui ?! -Et je réponds : +Je réponds : -1. `fork-base` est l'URL "de base", de sorte que lorsque l'_id du subgraphe_ est ajouté, l'URL résultante (`/`) est un point de terminaison GraphQL valide pour le magasin du subgraphe. -2. Pas besoin de s'inquiéter, effectuer un fork est facile : +1. `fork-base` est l'URL "de base", de sorte que lorsque l'_id du subgraph_ est ajouté, l'URL résultante (`/`) est un point de terminaison GraphQL valide pour le magasin du subgraph. +2. Fourcher est facile, pas besoin de transpirer : ```bash -$ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 +$ graph deploy --debug-fork -id> ; --ipfs http://localhost:5001 --node http://localhost:8020 ``` -N'oubliez pas non plus de définir le champ `dataSources.source.startBlock` dans le manifeste du subgraphe au numéro du bloc problématique, afin de ne pas indexer les blocs inutiles et de profiter du fork ! +N'oubliez pas non plus de définir le champ `dataSources.source.startBlock` dans le manifeste du subgraph au numéro du bloc problématique, afin de ne pas indexer les blocs inutiles et de profiter du fork ! -Donc, voici ce qu'il faut faire : +Voici donc ce que je fais : -0. Je crée un nœud de The Graph local ([voici comment faire](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) avec l'option `fork-base` définie sur : `https://api.thegraph.com/subgraphs/id/`, puisque je vais forker un subgraphe, le bogué que j'ai déployé plus tôt, à partir du [Service Hébergé](https://thegraph.com/hosted-service/). +0. Je crée un nœud de The Graph local ([voici comment faire](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) avec l'option `fork-base` définie sur : `https://api.thegraph.com/subgraphs/id/`, puisque je vais forker un subgraph, le bogué que j'ai déployé plus tôt, à partir du [Service Hébergé](https://thegraph.com/hosted-service/). ``` $ cargo run -p graph-node --release -- \ - --postgres-url postgresql://USERNAME[:PASSWORD]@localhost:5432/graph-node \ - --ethereum-rpc NETWORK_NAME:[CAPABILITIES]:URL \ - --ipfs 127.0.0.1:5001 - --fork-base https://api.thegraph.com/subgraphs/id/ + --postgres-url postgresql://USERNAME[:PASSWORD]@localhost:5432/graph-node \ + --ethereum-rpc NOM_RÉSEAU : [CAPABILITIES] :URL \ + --ipfs 127.0.0.1:5001 + --fork-base https://api.thegraph.com/subgraphs/id/ ``` -1. Après une inspection minutieuse, je remarque qu'il y a un décalage dans les représentations des `identifiants` utilisés lors de l'indexation des `Gravatars` dans mes deux gestionnaires. Alors que `handleNewGravatar` le convertit en hexadécimal (`event.params.id.toHex()`), `handleUpdatedGravatar` utilise un int32 (`event.params.id.toI32()`), ce qui provoque la panique de `handleUpdatedGravatar` avec "Gravatar non trouvé !". J'ai fait en sorte que les deux convertissent l'`identifiant` en hexadécimal. -2. After I made the changes I deploy my subgraph to the local Graph node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +1. Après une inspection minutieuse, je remarque qu'il y a un décalage dans les représentations des `identifiants` utilisés lors de l'indexation des `Gravatars` dans mes deux gestionnaires. Alors que `handleNewGravatar` le convertit en hexadécimal (`event.params.id.toHex()`), `handleUpdatedGravatar` utilise un int32 (`event.params.id.toI32()`), ce qui provoque la panique de `handleUpdatedGravatar` avec "Gravatar not found!". J'ai fait en sorte que les deux convertissent l'`identifiant` en hexadécimal. +2. Après avoir effectué les changements, je déploie mon subgraph sur le nœud Graph local, **_en effectuant un fork du subgraphe défaillant_** et en définissant `dataSources.source.startBlock` à `6190343` dans `subgraph.yaml` : ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -3. I inspect the logs produced by the local Graph node and, Hooray!, everything seems to be working. -4. Je déploie mon subgraphe maintenant sans bogue sur un nœud The Graph distant et je vis heureux pour toujours ! (pas de pommes de terre cependant) -5. La fin... +3. Je vérifie les journaux produits par le nœud local The Graph et, hourra, tout semble fonctionner. +4. Je déploie mon subgraph maintenant sans bogue sur un nœud The Graph distant et je vis heureux pour toujours ! (pas de pommes de terre cependant) +5. The end... diff --git a/website/pages/fr/cookbook/subgraph-uncrashable.mdx b/website/pages/fr/cookbook/subgraph-uncrashable.mdx index 16dbab9bf46a..2dfddd8d2cc2 100644 --- a/website/pages/fr/cookbook/subgraph-uncrashable.mdx +++ b/website/pages/fr/cookbook/subgraph-uncrashable.mdx @@ -1,24 +1,24 @@ --- -title: Générateur de code de subgraphe sécurisé +title: Safe Subgraph Code Generator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) est un outil de génération de code qui génère un ensemble de fonctions d'assistance à partir du schéma graphql d'un projet.Il garantit que toutes les interactions avec les entités de votre subgraph sont totalement sûres et cohérentes. -## Why integrate with Subgraph Uncrashable? +## Pourquoi intégrer Subgraph Uncrashable ? -- **Disponibilité continue**. Les entités mal gérées peuvent provoquer le crash des subgraphes, ce qui peut perturber les projets qui dépendent de The Graph. Configurez des fonctions d'assistance pour rendre vos subgraphes "incassables" et assurer la continuité des activités. +- **Disponibilité continue**. Les entités mal gérées peuvent provoquer le crash des subgraphs, ce qui peut perturber les projets qui dépendent de The Graph. Configurez des fonctions d'assistance pour rendre vos subgraphs "incassables" et assurer la continuité des activités. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Complètement sûr**. Les problèmes courants rencontrés dans le développement de subgraphs sont les problèmes de chargement d'entités non définies, ne pas définir ou initialiser toutes les valeurs des entités, et les conditions de concurrence lors du chargement et de la sauvegarde des entités. Assurez-vous que toutes les interactions avec les entités sont complètement atomiques. -- **Configurable par l'utilisateur** Définissez les valeurs par défaut et configurez le niveau de contrôles de sécurité qui convient aux besoins de votre projet individuel. Des journaux d'avertissement sont enregistrés indiquant où il y a une violation de la logique de subgraphe pour aider à corriger le problème afin d'assurer l'exactitude des données. +- **Configurable par l'utilisateur** Définissez les valeurs par défaut et configurez le niveau de contrôles de sécurité qui convient aux besoins de votre projet individuel. Des journaux d'avertissement sont enregistrés indiquant où il y a une violation de la logique de subgraph pour aider à corriger le problème afin d'assurer l'exactitude des données. -**Fonctionnalités principales** +**Key Features** - The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. - The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. -- Les journaux d'avertissement sont enregistrés sous forme de journaux indiquant où il y a une violation de la logique de subgraphe pour aider à corriger le problème afin de garantir l'exactitude des données. Ces journaux peuvent être consultés dans le service hébergé de The Graph dans la section "Journaux". +- Les journaux d'avertissement sont enregistrés sous forme de journaux indiquant où il y a une violation de la logique de subgraph pour aider à corriger le problème afin de garantir l'exactitude des données. Ces journaux peuvent être consultés dans le service hébergé de The Graph dans la section "Journaux". Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. @@ -26,4 +26,4 @@ Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen graphe codegen -u [options] [] ``` -Visitez la [subgraphe de documentation incassable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou regardez ceci [tutoriel vidéo](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) pour en savoir plus et commencer à développer des subgraphes plus sûrs. +Visitez la [subgraph de documentation incassable](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ou regardez ceci [tutoriel vidéo](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) pour en savoir plus et commencer à développer des subgraphs plus sûrs. diff --git a/website/pages/fr/cookbook/substreams-powered-subgraphs.mdx b/website/pages/fr/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..ceb3b82b7c0c 100644 --- a/website/pages/fr/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/fr/cookbook/substreams-powered-subgraphs.mdx @@ -2,17 +2,17 @@ title: Substreams-powered subgraphs --- -[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. +[Substreams](/substreams) est un nouveau framework de traitement des données blockchain, développé par StreamingFast pour The Graph Network. Un module de sous-flux peut générer des modifications d'entité, qui sont compatibles avec les entités Subgraph. Un subgraph peut utiliser un tel module Substreams comme source de données, apportant la vitesse d'indexation et les données supplémentaires des Substreams aux développeurs de subgraphs. -## Requirements +## Exigences -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +Ce livre de recettes nécessite [yarn](https://yarnpkg.com/), [les dépendances nécessaires au développement local de Substreams](https://substreams.streamingfast.io/developers-guide/installation-requirements) et la dernière version du graph CLI (>=0.52.0) : ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## Obtenir le livre de cuisine > This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). @@ -20,7 +20,7 @@ npm install -g @graphprotocol/graph-cli graph init --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## Définir un package Substreams A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. @@ -47,44 +47,44 @@ The core logic of the Substreams package is a `map_contract` module in `lib.rs`, ``` #[substreams::handlers::map] -fn map_contract(block: eth::v2::Block) -> Result { - let contracts = block - .transactions() - .flat_map(|tx| { - tx.calls - .iter() - .filter(|call| !call.state_reverted) - .filter(|call| call.call_type == eth::v2::CallType::Create as i32) - .map(|call| Contract { - address: format!("0x{}", Hex(&call.address)), - block_number: block.number, - timestamp: block.timestamp_seconds().to_string(), - ordinal: tx.begin_ordinal, - }) - }) - .collect(); - Ok(Contracts { contracts }) +fn map_contract(block: eth::v2::Block) -> Résultat { + laisser les contrats = bloquer + .transactions() + .flat_map(|tx| { + tx.appels + .iter() + .filter(|call| !call.state_reverted) + .filter(|call| call.call_type == eth::v2::CallType::Create as i32) + .map(|appel| Contrat { + adresse : format!("0x{}", Hex(&call.address)), + numéro_bloc : bloc.numéro, + horodatage : block.timestamp_seconds().to_string(), + ordinal : tx.begin_ordinal, + }) + }) + .collecter(); + Ok(Contrats { contrats }) } ``` -A Substreams package can be used by a subgraph as long as it has a module which outputs compatible entity changes. The example Substreams package has an additional `graph_out` module in `lib.rs` which returns a `substreams_entity_change::pb::entity::EntityChanges` output, which can be processed by Graph Node. +Un package Substreams peut être utilisé par un subgraph tant qu'il possède un module qui produit des changements d'entité compatibles. Le paquet Substreams de l'exemple a un module supplémentaire `graph_out` dans `lib.rs` qui renvoie une sortie `substreams_entity_change::pb::entity::EntityChanges`, qui peut être traitée par Graph Node. > The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. ``` #[substreams::handlers::map] -pub fn graph_out(contracts: Contracts) -> Result { - // hash map of name to a table - let mut tables = Tables::new(); - - for contract in contracts.contracts.into_iter() { - tables - .create_row("Contract", contract.address) - .set("timestamp", contract.timestamp) - .set("blockNumber", contract.block_number); - } - - Ok(tables.to_entity_changes()) +pub fn graph_out (contrats : contrats) -> Résultat { + // Carte de hachage du nom vers une table + laissez mut tables = Tables::new(); + + pour le contrat dans contracts.contracts.into_iter() { + les tables + .create_row("Contrat", contrat.adresse) + .set("horodatage", contrat.timestamp) + .set("blockNumber", contrat.block_number); + } + + Ok(tables.to_entity_changes()) } ``` @@ -140,44 +140,44 @@ graph TD; To prepare this Substreams package for consumption by a subgraph, you must run the following commands: ```bash -yarn substreams:protogen # generates types in /src/pb -yarn substreams:build # builds the substreams -yarn substreams:package # packages the substreams in a .spkg file +yarn substreams:protogen # génère des types dans /src/pb +yarn substreams:build # construit les substreams +yarn substreams:package # empaquette les substreams dans un fichier .spkg -# alternatively, yarn substreams:prepare calls all of the above commands +# alternativement, yarn substreams:prepare appelle toutes les commandes ci-dessus ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> Ces scripts sont définis dans le fichier `package.json` si vous voulez comprendre les commandes substreams sous-jacentes -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +Cela génère un fichier `spkg` basé sur le nom et la version du paquet provenant de `substreams.yaml`. Le fichier `spkg` contient toutes les informations dont Graph Node a besoin pour ingérer ce paquet Substreams. > If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. -## Defining a Substreams-powered subgraph +## Définition d'un subgraph alimenté par les courants de Substreams Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +Cette source de données doit spécifier le réseau indexé, le paquet Substreams (`spkg`) en tant qu'emplacement de fichier relatif, et le module au sein de ce paquet Substreams qui produit des changements d'entités compatibles avec le subgraph (dans ce cas, `map_entity_changes`, du paquet Substreams ci-dessus). Le mapping est spécifié, mais identifie simplement le type de mapping ("substreams/graph-entities") et l'apiVersion. > Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). ```yaml -specVersion: 0.0.4 -description: Ethereum Contract Tracking Subgraph (powered by Substreams) -repository: https://github.com/graphprotocol/graph-tooling -schema: - file: schema.graphql -dataSources: - - kind: substreams - name: substream_test - network: mainnet - source: - package: - moduleName: graph_out - file: substreams-test-v1.0.1.spkg - mapping: - kind: substreams/graph-entities - apiVersion: 0.0.5 +spécification de la version : 0.0.4 +description : Subgraph de suivi des contrats Ethereum (propulsé par Substreams) +référentiel  : https://github.com/graphprotocol/graph-tooling +schéma: + fichier : schéma.graphql +les sources de données : + - genre  : sous-flux + nom : substream_test + réseau : réseau principal + source: + emballeur : + Nom du module : graph_out + fichier  : substreams-test-v1.0.1.spkg + cartographie : + genre : sous-flux/entités graphs + Api : 0.0.5 ``` The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. diff --git a/website/pages/fr/cookbook/upgrading-a-subgraph.mdx b/website/pages/fr/cookbook/upgrading-a-subgraph.mdx index 2882b67d5bab..4803b220242a 100644 --- a/website/pages/fr/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/fr/cookbook/upgrading-a-subgraph.mdx @@ -1,51 +1,51 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: Mise à niveau d'un subgraph existant vers le réseau Graph --- -## Introduction +## Présentation -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +Ceci est un guide sur la façon de mettre à niveau votre subgraph du service hébergé vers le réseau décentralisé de The Graph. Plus de 1 000 sous-graphiques ont été mis à niveau avec succès vers The Graph Network, y compris des projets tels que Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido et bien d'autres ! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +Le processus de mise à niveau est rapide et vos subgraphs bénéficieront à jamais de la fiabilité et de la performance que vous ne pouvez obtenir que sur le Réseau Graph. -### Prerequisites +### Conditions préalables -- Vous avez déjà déployé un subgraphe sur le service hébergé. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. -- You have a wallet with ETH to publish your subgraph on-chain. +- Vous avez déjà déployé un subgraph sur le service hébergé. +- The subgraph is indexing a chain available on The Graph Network. +- Vous disposez d'un portefeuille avec ETH pour publier votre subgraph en chaîne. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. -## Upgrading an Existing Subgraph to The Graph Network +## Mise à niveau d'un subgraph existant vers le réseau Graph -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> Vous trouverez des commandes spécifiques pour votre subgraph dans le [Subgraph Studio] \(https://thegraph.com/studio/). -1. Obtenez la dernière version du graph-cli installé : +1. Obtenir la dernière version de graph-cli installée : ```sh npm install -g @graphprotocol/graph-cli ``` ```sh -yarn global add @graphprotocol/graph-cli +npm install -g @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +Assurez-vous que votre `apiVersion` dans subgraph.yaml est `0.0.5` ou supérieur. -2. À l'intérieur du répertoire principal du projet du subgraphe, authentifiez le pour déployer et construire sur le studio : +2. À l'intérieur du répertoire principal du projet du subgraph, authentifiez le pour déployer et construire sur le studio : ```sh -graph auth --studio +authentification graph --studio ``` -3. Générez les fichiers et construisez le subgraphe : +3. Générez les fichiers et construisez le subgraph : ```sh graph codegen && graph build ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +Si votre subgraph présente des erreurs de construction, reportez-vous au [Guide de migration AssemblyScript](/release-notes/assemblyscript-migration-guide/). -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. Connectez-vous à [Subgraph Studio] \(https://thegraph.com/studio/) avec votre portefeuille et déployez le subgraphe. Vous pouvez trouver votre `` dans l'interface utilisateur de Studio, qui est basée sur le nom de votre subgraph. ```sh graph deploy --studio @@ -55,80 +55,80 @@ graph deploy --studio ```sh { - users(first: 5) { + users(first : 5) { id liquidityPositions { id } } - bundles(first: 5) { + bundles(first : 5) { id ethPrice } } ``` -6. À ce stade, votre subgraphe est maintenant déployé sur Subgraph Studio, mais pas encore publié sur le réseau décentralisé. Vous pouvez désormais tester le sous-graphe pour vous assurer qu'il fonctionne comme prévu en utilisant l'URL de requête temporaire, comme indiqué en haut de la colonne de droite ci-dessus. Comme son nom l'indique déjà, il s'agit d'une URL temporaire qui ne doit pas être utilisée en production. +6. À ce stade, votre subgraph est maintenant déployé sur Subgraph Studio, mais pas encore publié sur le réseau décentralisé. Vous pouvez désormais tester le subgraph pour vous assurer qu'il fonctionne comme prévu en utilisant l'URL de requête temporaire, comme indiqué en haut de la colonne de droite ci-dessus. Comme son nom l'indique déjà, il s'agit d'une URL temporaire qui ne doit pas être utilisée en production. - Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- Étant donné que cela entraîne un coût, il est fortement recommandé de déployer et de tester votre subgraph dans Subgraph Studio, en utilisant l'« URL de requête de développement » avant de le publier. Voir un exemple de transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Les prix sont d'environ 0,0425 ETH à 100 gwei. +- Chaque fois que vous devrez mettre à jour votre subgraph, des frais de mise à jour vous seront facturés. Comme cela engendre un coût, il est fortement recommandé de déployer et de tester votre subgraph sur Goerli avant de le déployer sur le réseau principal. Dans certains cas, cela peut également nécessiter un GRT s'il n'y a pas de signal sur ce subgraph. Dans le cas où il y a un signal/curation sur cette version du subgraph (en utilisant l'auto-migration), les taxes seront divisées. -7. Publiez le sous-graphe sur le réseau décentralisé de The Graph en cliquant sur le bouton "Publier". +7. Publiez le subgraph sur le réseau décentralisé de The Graph en cliquant sur le bouton "Publier". You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +Et c'est tout! Une fois la publication terminée, vous pourrez visualiser vos subgraphs en direct sur le réseau décentralisé via [The Graph Explorer](https://thegraph.com/explorer). -Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. +N'hésitez pas à utiliser le [canal #Curators](https://discord.gg/s5HfGMXmbW) sur Discord pour informer les curateurs que votre subgraph est prêt à être signalé. Il serait également utile que vous leur fassiez part de votre volume de requêtes prévu. Ainsi, ils pourront estimer la quantité de TRG qu'ils doivent signaler sur votre subgraph. -### Créer une clé API +### Créer une clé d'API You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). ![API key creation page](/img/api-image.png) -At the end of each week, an invoice will be generated based on the query fees that have been incurred during this period. This invoice will be paid automatically using the GRT available in your balance. Your balance will be updated after the cost of your query fees are withdrawn. Query fees are paid in GRT via the Arbitrum network. You will need to add GRT to the Arbitrum billing contract to enable your API key via the following steps: +À la fin de chaque semaine, une facture sera générée sur la base des frais de requête encourus pendant cette période. Cette facture sera payée automatiquement grâce au GRT disponible dans votre solde. Votre solde sera mis à jour une fois le coût de vos frais de requête retiré. Les frais de requête sont payés en GRT via le réseau Arbitrum. Vous devrez ajouter GRT au contrat de facturation Arbitrum pour activer votre clé API en suivant les étapes suivantes : -- Achetez des GRT sur un échange de votre choix. -- Envoyez le GRT à votre portefeuille. -- Sur la page de facturation de Studio, cliquez sur ajouter des GRT. +- Achetez GRT sur la bourse de votre choix. +- Envoyez le TRG à votre portefeuille. +- À la page Facturation de Studio, cliquez sur Ajouter un TRG. ![Add GRT in billing](/img/Add-GRT-New-Page.png) -- Follow the steps to add your GRT to your billing balance. -- Your GRT will be automatically bridged to the Arbitrum network and added to your billing balance. +- Suivez les étapes pour ajouter votre TRG à votre solde de facturation. +- Votre GRT sera automatiquement relié au réseau Arbitrum et ajouté à votre solde de facturation. ![Billing pane](/img/New-Billing-Pane.png) > Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. -### Securing your API key +### Sécurisation de votre clé API -Il est recommandé de sécuriser l'API en limitant son utilisation de deux manières : +Il est recommandé de sécuriser l'API en limitant son utilisation de deux manières : -1. Les subgraphes autorisés -2. Domaine autorisé +1. Les subgraphs autorisés +2. Le Domaine autorisé -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +Vous pouvez sécuriser votre clé API [here](https://thegraph.com/studio/apikeys/test/). ![Subgraph lockdown page](/img/subgraph-lockdown.png) -### Interroger votre subgraphe sur le réseau décentralisé +### Interroger votre subgraph sur le réseau décentralisé -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +Vous pouvez maintenant vérifier l'état d'indexation des indexeurs sur le réseau dans Graph Explorer (exemple [here]\(https://thegraph.com/explorer/subgraph? id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The Également dans l'onglet Indexeur, vous pouvez voir quels indexeurs ont récupéré votre subgraph. ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) -Dès que le premier indexeur a complètement indexé votre subgraphe, vous pouvez commencer à interroger le subgraphe sur le réseau décentralisé. Afin de récupérer l'URL de requête pour votre subgraphe, vous pouvez le copier/coller en cliquant sur le symbole à côté de l'URL de requête. Vous verrez quelque chose comme ceci : +Dès que le premier indexeur a complètement indexé votre subgraph, vous pouvez commencer à interroger le subgraph sur le réseau décentralisé. Afin de récupérer l'URL de requête pour votre subgraph, vous pouvez le copier/coller en cliquant sur le symbole à côté de l'URL de requête. Vous verrez quelque chose comme ceci : `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. -Vous pouvez maintenant utiliser cette URL de requête dans votre dapp pour y envoyer vos requêtes GraphQL. +Vous pouvez maintenant utiliser cette URL de requête dans votre dapp pour envoyer vos requêtes GraphQL. -Félicitations ! Vous êtes maintenant un pionnier de la décentralisation ! +Toutes nos félicitations! Vous êtes désormais un pionnier de la décentralisation ! > Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: @@ -146,80 +146,80 @@ More information about the nature of the network and how to handle re-orgs are d If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. -1. Apportez des modifications à votre subgraphe actuel. Une bonne idée est de tester les petites corrections sur le Subgraph Studio en les publiant sur Goerli. -2. Déployez ce qui suit et spécifiez la nouvelle version dans la commande (par exemple v0.0.1, v0.0.2, etc) : +1. Apportez des modifications à votre subgraph actuel. Une bonne idée est de tester les petites corrections sur le Subgraph Studio en les publiant sur Goerli. +2. Déployez les éléments suivants et spécifiez la nouvelle version dans la commande (par exemple v0.0.1, v0.0.2, etc.) : ```sh graph deploy --studio ``` -3. Testez la nouvelle version dans le Subgraph Studio en effectuant des requêtes dans le terrain de jeu -4. Publiez la nouvelle version sur The Graph Network. N'oubliez pas que cela nécessite du gaz (comme décrit dans la section ci-dessus). +3. Testez la nouvelle version dans le Subgraph Studio en effectuant des requêtes dans l'aire de jeu +4. Publier la nouvelle version sur le réseau graph. N'oubliez pas que cela nécessite du gaz (comme décrit dans la section ci-dessus). ### Owner Update Fee: Deep Dive -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> Remarque : La curation sur Arbitrum n'utilise pas de courbes de liens. Apprenez-en plus sur Arbitrum [here] \(/arbitrum/arbitrum-faq/). An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. -Prenons un exemple, ce n'est le cas que si votre subgraphe fait l'objet d'une curation active : +Prenons un exemple, ce n'est le cas que si votre subgraph fait l'objet d'une curation active : -- 100,000 GRT sont signalés en utilisant la migration automatique sur la v1 d'un subgraphe +- 100,000 GRT sont signalés en utilisant la migration automatique sur la v1 d'un subgraph - Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned - The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_Bien que ce mécanisme soit actuellement actif sur le réseau, la communauté discute actuellement des moyens de réduire le coût des mises à jour pour les développeurs de subgraphs._ -### Maintenir une version stable d'un subgraphe +### Maintenir une version stable d'un subgraph If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. -### Mise à jour des métadonnées d'un subgraphe +### Mise à jour des métadonnées d'un subgraph -You can update the metadata of your subgraphs without having to publish a new version. The metadata includes the subgraph name, image, description, website URL, source code URL, and categories. Developers can do this by updating their subgraph details in the Subgraph Studio where you can edit all applicable fields. +Vous pouvez mettre à jour les métadonnées de vos subgraphs sans avoir à publier une nouvelle version. Les métadonnées comprennent le nom du subgraph, l'image, la description, l'URL du site Web, l'URL du code source et les catégories. Les développeurs peuvent le faire en mettant à jour les détails de leur subgraph dans le Subgraph Studio, où vous pouvez modifier tous les champs applicables. Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. -## Meilleures pratiques pour le déploiement d'un subgraphe sur le réseau The Graph +## Meilleures pratiques pour le déploiement d'un subgraph sur le réseau The Graph -1. Utilisation d'un nom d'ENS pour le développement de subgraphe : +1. Utilisation d'un nom d'ENS pour le développement de subgraph : - Set up your ENS [here](https://app.ens.domains/) - Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). -2. The more filled out your profiles are, the better the chances for your subgraphs to be indexed and curated. +2. Plus vos profils seront complets, plus vos subgraphes auront de chances d'être indexés et conservés. -## Désactiver un subgraph sur le réseau The Graph +## Dépréciation d'un subgraph sur le réseau de graph -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +Suivez les étapes [here](/managing/deprecating-a-subgraph) pour déprécier votre subgraph et le retirer du réseau The Graph Network. -## Interrogation d'un subgraphe + facturation sur le reseau The Graph +## Interrogation d'un subgraph + facturation sur le reseau The Graph The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +Pour que The Graph Network soit réellement décentralisé, les frais de requête doivent être payés en tant qu'élément central des incitations du protocole. Pour plus d'informations sur l'abonnement aux API et le paiement des frais de requête, consultez la documentation sur la facturation \[[here](/billing/). -### Estimer les frais d'interrogation sur le réseau +### Estimation des frais d'interrogation sur le réseau -Bien que cette fonctionnalité ne soit pas encore disponible dans l'interface utilisateur du produit, vous pouvez définir votre budget maximum par requête en divisant le montant que vous êtes prêt à payer par mois par le volume de requêtes prévu. +Bien que cette fonction ne soit pas encore disponible dans l'interface utilisateur du produit, vous pouvez définir votre budget maximum par requête en divisant le montant que vous êtes prêt à payer par mois par le volume de requêtes attendu. While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. -Remember that it's a dynamic and growing market, but how you interact with it is in your control. There is no maximum or minimum price specified in the protocol or the Gateways. For example, you can look at the price paid by a few of the dapps on the network (on a per-week basis), below. See the last column, which shows query fees in GRT. +N'oubliez pas qu'il s'agit d'un marché dynamique et en pleine expansion, mais la manière dont vous interagissez avec lui est sous votre contrôle. Il n'y a pas de prix maximum ou minimum spécifié dans le protocole ou les passerelles. Par exemple, vous pouvez regarder le prix payé par quelques-unes des dapps sur le réseau (sur une base hebdomadaire), ci-dessous. Regardez la dernière colonne, qui montre les frais de requête en GRT. ![QueryFee](/img/QueryFee.png) -## Ressources supplémentaires +## Ressources additionnelles -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +Si vous êtes encore confus, n'ayez crainte ! Consultez les ressources suivantes ou regardez notre guide vidéo sur la mise à niveau des subgraphs vers le réseau décentralisé ci-dessous : - [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around +- [Contrat de curation] \(https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - le contrat sous-jacent autour duquel le GNS s'articule - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` - [Subgraph Studio documentation](/deploying/subgraph-studio) diff --git a/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx index 621f6321a0d4..185e3e522d3d 100644 --- a/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx +++ b/website/pages/fr/deploying/deploying-a-subgraph-to-hosted.mdx @@ -1,76 +1,76 @@ --- -title: Deploying a Subgraph to the Hosted Service +title: Déploiement d'un subgraph dans le service hébergé --- -> If a network is not supported on the Hosted Service, you can run your own [graph-node](https://github.com/graphprotocol/graph-node) to index it. +> Si un réseau n'est pas pris en charge sur le service hébergé, vous pouvez exécuter votre propre [graph-node](https://github.com/graphprotocol/graph-node) pour l'indexer. -This page explains how to deploy a subgraph to the Hosted Service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). +Cette page explique comment déployer un subgraph dans le service hébergé. Pour déployer un subgraph, vous devez d'abord installer le [Graph CLI](https://github.com/graphprotocol/graph-cli). Si vous n'avez pas encore créé de subgraph, consultez [créer un subgraph](/developing/creating-a-subgraph). -## Create a Hosted Service account +## Créer un compte de service hébergé -Before using the Hosted Service, create an account in our Hosted Service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [Hosted Service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. +Avant d'utiliser le service hébergé, créez un compte dans notre service hébergé. Vous aurez besoin d'un compte [Github](https://github.com/) pour cela ; si vous n’en avez pas, vous devez d’abord le créer. Ensuite, accédez au [Service hébergé](https://thegraph.com/hosted-service/), cliquez sur _'Inscrivez-vous avec Github'. _ et terminez le flux d'autorisation de Github. -## Store the Access Token +## Stocker le jeton d'accès -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. +Après avoir créé un compte, accédez à votre [tableau de bord](https://thegraph.com/hosted-service/dashboard). Copiez le jeton d'accès affiché sur le tableau de bord et exécutez `graph auth --product hosted-service `. Le jeton d'accès sera ainsi stocké sur votre ordinateur. Vous ne devez effectuer cette opération qu'une seule fois, ou si vous régénérez le jeton d'accès. -## Create a Subgraph on the Hosted Service +## Créer un subgraph sur le service hébergé -Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: +Avant de déployer le subgraph, vous devez le créer dans The Graph Explorer. Accédez au [tableau de bord](https://thegraph.com/hosted-service/dashboard), cliquez sur le bouton _'Ajouter un subgraph'_ et remplissez les informations ci-dessous, le cas échéant : -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. +**Image** - Sélectionnez une image à utiliser comme image de prévisualisation et comme vignette pour le subgraph. -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ +**Nom du sous-graphe** - Avec le nom du compte sous lequel le sous-graphe est créé, ce champ définit également le nom de style `nom du compte/nom du sous-graphe` utilisé pour les déploiements et les points de terminaison GraphQL. _Ce champ ne peut pas être modifié ultérieurement._ -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ +**Compte** : le compte sous lequel le subgraph est créé. Il peut s'agir du compte d'un individu ou d'une organisation. _Les subgraphs ne pourront pas être déplacés ultérieurement entre les comptes._ -**Subtitle** - Text that will appear in subgraph cards. +**Sous-titre** : texte qui apparaîtra dans les cartes subgraphs. -**Description** - Description of the subgraph, visible on the subgraph details page. +**Description** - Description du Subgraph, visible sur la page de détails du subgraph. -**GitHub URL** - Link to the subgraph repository on GitHub. +**GitHub URL** - Lien vers le dépôt du subgraph sur GitHub. -**Hide** - Switching this on hides the subgraph in the Graph Explorer. +**Cacher** - Cette option permet de cacher le subgraph dans the Graph Explorer. -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Defining a Subgraph section](/developing/defining-a-subgraph). +Après avoir enregistré le nouveau subgraph, un écran s'affiche avec de l'aide sur la façon d'installer la CLI Graph, de générer l'échafaudage pour un nouveau subgraph et de déployer votre subgraph. Les deux premières étapes ont été couvertes dans la section [Définition d'un subgraph](/developing/defining-a-subgraph). -## Deploy a Subgraph on the Hosted Service +## Déployer un subgraph sur le service hébergé -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell the Graph Explorer to start indexing your subgraph using these files. +Le déploiement de votre subgraph téléchargera les fichiers du subgraph que vous avez construit avec `yarn build` vers IPFS et indiquera à the Graph Explorer de commencer à indexer votre subgraph à l'aide de ces fichiers. -You deploy the subgraph by running `yarn deploy` +Vous déployez le subgraph en exécutant `yarn deploy` -After deploying the subgraph, the Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. +Après avoir déployé le subgraph, the Graph Explorer affichera l'état de synchronisation de votre subgraph. Selon la quantité de données et le nombre d'événements qui doivent être extraits des blocs historiques, en commençant par le bloc Genesis, la synchronisation peut prendre de quelques minutes à plusieurs heures. -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. +L'état du subgraph passe à `Synced` une fois que le nœud the Graph a extrait toutes les données des blocs historiques. Le nœud de the Graph continuera à inspecter les blocs de votre subgraph au fur et à mesure que ces blocs seront exploités. -## Redeploying a Subgraph +## Redéployer un Subgraph -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. +Lorsque vous apportez des modifications à la définition de votre subgraph, par exemple pour corriger un problème dans les mappages d'entités, exécutez à nouveau la commande `yarn deploy` ci-dessus pour déployer la version mise à jour de votre subgraph. -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. +Si votre subgraph précédemment déployé est toujours en statut `Synchronisation`, il sera immédiatement remplacé par la version nouvellement déployée. Si le subgraph précédemment déployé est déjà entièrement synchronisé, Graph Node marquera la nouvelle version déployée comme `Version en attente`, la synchronisera en arrière-plan et ne remplacera la version actuellement déployée par la nouvelle qu'une fois la synchronisation de la nouvelle version terminée. Cela permet de s'assurer que vous disposez d'un subgraph avec lequel travailler pendant la synchronisation de la nouvelle version. -## Deploying the subgraph to multiple networks +## Déploiement du subgraph sur plusieurs réseaux -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +Dans certains cas, vous souhaiterez déployer le même subgraph sur plusieurs réseaux sans dupliquer tout son code. Le principal défi qui en découle est que les adresses contractuelles sur ces réseaux sont différentes. -### Using graph-cli +### Utiliser graph-cli -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: +Le `graph build` (depuis `v0.29.0`) et le `graph déployer` (depuis `v0.32.0`) acceptent deux nouvelles options : ```sh Options: ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") + --network Configuration du réseau à utiliser à partir du fichier de configuration des réseaux + --network-file Chemin du fichier de configuration des réseaux (par défaut : "./networks.json")" ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +Vous pouvez utiliser l'option `--network` pour spécifier une configuration de réseau à partir d'un fichier standard `json` (par défaut `networks.json`) afin de mettre à jour facilement votre subgraph pendant le développement. -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. +**Remarque :** La commande `init` générera désormais automatiquement un `networks.json` basé sur les informations fournies. Vous pourrez alors mettre à jour des réseaux existants ou ajouter des réseaux supplémentaires. -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: +Si vous n'avez pas de fichier `networks.json`, vous devrez en créer un manuellement avec la structure suivante : ```json { @@ -100,7 +100,7 @@ If you don't have a `networks.json` file, you'll need to manually create one wit **Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `goerli` networks, and this is your `subgraph.yaml`: +Supposons maintenant que vous souhaitiez pouvoir déployer votre subgraph sur les réseaux `mainnet` et `goerli`, et voici votre `subgraph.yaml` : ```yaml # ... @@ -115,7 +115,7 @@ dataSources: kind: ethereum/events ``` -This is what your networks config file should look like: +Voici à quoi devrait ressembler votre fichier de configuration réseau : ```json { @@ -132,44 +132,44 @@ This is what your networks config file should look like: } ``` -Now we can run one of the following commands: +Nous pouvons maintenant exécuter l'une des commandes suivantes : ```sh -# Using default networks.json file +# Utilisation du fichier networks.json par défaut yarn build --network goerli -# Using custom named file +# Utilisation d'un fichier personnalisé yarn build --network goerli --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `goerli` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +La commande `build` mettra à jour votre `subgraph.yaml` avec la configuration `goerli` puis recompilera le subgraph. Votre fichier `subgraph.yaml` devrait maintenant ressembler à ceci : ```yaml # ... dataSources: - - kind: ethereum/contract - name: Gravity - network: goerli + - type: ethereum/contrat + nom: Gravity + réseau: goerli source: - address: '0xabc...' - abi: Gravity + adresse: '0xabc...' + abi: Gravité mapping: kind: ethereum/events ``` -Now you are ready to `yarn deploy`. +Vous êtes maintenant prêt à `yarn deploy`. **Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: ```sh -# Using default networks.json file +# Utilisation du fichier networks.json par défaut yarn deploy --network goerli -# Using custom named file +# Utilisation d'un fichier personnalisé yarn deploy --network goerli --network-file path/to/config ``` -### Using subgraph.yaml template +### Utilisation du modèle subgraph.yaml One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). @@ -177,8 +177,8 @@ To illustrate this approach, let's assume a subgraph should be deployed to mainn ```json { - "network": "mainnet", - "address": "0x123..." + "network": "réseau principal", + "adresse": "0x123..." } ``` @@ -186,8 +186,8 @@ and ```json { - "network": "goerli", - "address": "0xabc..." + "réseau": "goerli", + "adresse": "0xabc..." } ``` @@ -205,23 +205,23 @@ dataSources: address: '{{address}}' abi: Gravity mapping: - kind: ethereum/events + kind: ethereum/événements ``` -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: +Afin de générer un manifeste sur l'un ou l'autre des réseaux, vous pouvez ajouter deux commandes supplémentaires à `package.json` ainsi qu'une dépendance sur `mustache` : ```json { - ... - "scripts": { - ... - "prepare:mainnet": "mustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", - "prepare:goerli": "mustache config/goerli.json subgraph.template.yaml > subgraph.yaml" - }, - "devDependencies": { - ... - "mustache": "^3.1.0" - } + ... + "scripts": { + ... + "prepare:mainnet": "moustache config/mainnet.json subgraph.template.yaml > subgraph.yaml", + "prepare:goerli": "mustache config/goerli.json subgraph.template.yaml > subgraph.yaml" + }, + "dépendances dev": { + ... + "moustache": "^3.1.0" + } } ``` @@ -229,17 +229,17 @@ To deploy this subgraph for mainnet or Goerli you would now simply run one of th ```sh # Mainnet: -yarn prepare:mainnet && yarn deploy +fil préparer:mainnet && déploiement de fil -# Goerli: -yarn prepare:goerli && yarn deploy +# Goerli : +fil préparer:goerli && déploiement de fil ``` -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). +Un exemple concret de ce type d'action peut être trouvé [ici](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). **Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. -## Checking subgraph health +## Vérification de l'état des subgraphs If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. @@ -247,45 +247,45 @@ Graph Node exposes a graphql endpoint which you can query to check the status of ```graphql { - indexingStatusForCurrentVersion(subgraphName: "org/subgraph") { - synced - health - fatalError { - message - block { - number - hash - } - handler - } - chains { - chainHeadBlock { - number - } - latestBlock { - number - } - } - } + indexingStatusForCurrentVersion(subgraphName : "org/subgraph") { + synchronisé + santé + erreur fatale { + message + bloc "{ nombre + hacher + } + gestionnaire + } + Chaînes { + chaîneHeadBlock "{ + nombre + } + dernierBloc +"{ + nombre + } + } + } } ``` This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. -## Hosted service subgraph archive policy +## Politique d'archivage des subgraphs des services hébergés The Hosted Service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. -To improve the performance of the service for active subgraphs, the Hosted Service will archive subgraphs that are inactive. +Afin d'améliorer les performances du service pour les subgraphs actifs, le service hébergé archivera les subgraphs inactifs. -**A subgraph is defined as "inactive" if it was deployed to the Hosted Service more than 45 days ago, and if it has received 0 queries in the last 45 days.** +**Un subgraph est défini comme "inactif" s'il a été déployé dans le service hébergé il y a plus de 45 jours et s'il n'a reçu aucune requête au cours des 45 derniers jours.** -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's Hosted Service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. +Les développeurs seront avertis par courriel si l'un de leurs subgraphs a été marqué comme inactif 7 jours avant qu'il ne soit supprimé. S'ils souhaitent "activer" leur subgraph, ils peuvent le faire en effectuant une requête dans le terrain de jeu graphQL du service hébergé de leur subgraph. Les développeurs peuvent toujours redéployer un subgraph archivé s'il est à nouveau nécessaire. -## Subgraph Studio subgraph archive policy +## Politique d'archivage des subgraphs de Subgraph Studio -When a new version of a subgraph is deployed, the previous version is archived (deleted from the graph-node DB). This only happens if the previous version is not published to The Graph's decentralized network. +Lorsqu'une nouvelle version d'un subgraph est déployée, la version précédente est archivée (supprimée de la base de données du graph-node). Cela ne se produit que si la version précédente n'est pas publiée sur le réseau décentralisé du Graph. When a subgraph version isn’t queried for over 45 days, that version is archived. -Every subgraph affected with this policy has an option to bring the version in question back. +Chaque subgraph concerné par cette politique dispose d'une option de restauration de la version en question. diff --git a/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx index 1e22151f4f45..0766cc93bb8b 100644 --- a/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/fr/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Déploiement d'un subgraphe dans le Studio Subgraph --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/fr/deploying/hosted-service.mdx b/website/pages/fr/deploying/hosted-service.mdx index ba28a2d3448d..40d0a6cd52e7 100644 --- a/website/pages/fr/deploying/hosted-service.mdx +++ b/website/pages/fr/deploying/hosted-service.mdx @@ -1,51 +1,62 @@ --- -title: What is the Hosted Service? +title: Qu'est-ce que le Service Héberge ? --- -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> Veuillez noter que le service hébergé prendra fin en 2023, mais il restera disponible pour les réseaux qui ne sont pas pris en charge sur le réseau décentralisé. Les développeurs sont encouragés à [mettre à niveau leurs subgraphs vers The Graph Network](/cookbook/upgrading-a-subgraph) à mesure que davantage de réseaux sont pris en charge. Chaque réseau verra ses équivalents de service hébergé disparaître progressivement pour garantir que les développeurs disposent de suffisamment de temps pour mettre à niveau les subgraphs vers le réseau décentralisé. En savoir plus sur la cessation du service hébergé [ici](https://thegraph.com/blog/sunsetting-hosted-service). -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). +Cette section vous guidera dans le déploiement d'un subgraph sur le [service hébergé](https://thegraph.com/hosted-service/). -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +Si vous n'avez pas de compte sur le service hébergé, vous pouvez vous inscrire avec votre compte GitHub. Une fois authentifié, vous pouvez commencer à créer des subgraphs via l'interface utilisateur et les déployer depuis votre terminal. Le service hébergé supporte un certain nombre de réseaux, tels que Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, et plus encore. -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). +Pour une liste complète, voir [Réseaux pris en charge](/developing/supported-networks/#hosted-service). -## Créer un subgraphe +## Créer un subgraph -Suivez d'abord les instructions [ici](/developing/defining-a-subgraph) pour installer la CLI Graphe. Créez un subraphe en passant `graph init --product hosted-service` +Suivez d'abord les instructions [ici](/developing/defining-a-subgraph) pour installer la CLI Graph. Créez un subraph en passant `graph init --product hosted-service` -### À partir d'un contrat existant +### From an Existing Contract -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. +Si vous disposez déjà d'un contrat intelligent déployé sur le réseau de votre choix, l'amorçage d'un nouveau subgraph à partir de ce contrat peut être un bon moyen de commencer à utiliser le service hébergé. -Utilisez cette commande pour créer un subgraphe qui indexe tous les événements d'un contrat existant.Cela tentera de récupérer le contrat ABI depuis [Etherscan](https://etherscan.io/). +Utilisez cette commande pour créer un subgraph qui indexe tous les événements d'un contrat existant.Cela tentera de récupérer le contrat ABI depuis [Etherscan](https://etherscan.io/). ```sh graph init \ - --product hosted-service - --from-contract \ - / [] +--produit-service hébergé +--À partir - d'un contrat \ + / [] ``` -En outre, vous pouvez utiliser les arguments optionnels suivants. . Si l'ABI ne peut pas être extrait d'Etherscan, il revient à demander un chemin de fichier local. S'il manque des arguments facultatifs dans la commande, celle-ci vous guide dans un formulaire interactif. +En outre, vous pouvez utiliser les arguments optionnels suivants. Si l'ABI ne peut pas être extrait d'Etherscan, il revient à demander un chemin de fichier local. S'il manque des arguments facultatifs dans la commande, celle-ci vous guide dans un formulaire interactif. ```sh ---network \ ---abi \ +--réseaux +--abi ``` -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** +Le `` dans ce cas est le nom de votre utilisateur ou de votre organisation GitHub, `` est le nom de votre subgraph et `<DIRECTORY>< /code> est le nom facultatif du répertoire dans lequel graph init` placera l'exemple de manifeste de subgraph. Le `` est l'adresse de votre contrat existant. `` est le nom du réseau sur lequel le contrat réside. `` est un chemin local vers un fichier ABI de contrat. **`--network`-- et `--abi` -- sont facultatifs.** -### À partir d'un exemple de subgraphe +### À partir d'un exemple de subgraph -Le second mode `graph init` prend en charge est la création d'un nouveau projet à partir d'un exemple de subgraphe. La commande suivante le fait : +The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: ``` -graph init --from-example --product hosted-service / [] +graph init --par-exemple --produit-service hébergé / [] ``` -Le subgraphe d'exemple est basé sur le contrat Gravity de Dani Grant qui gère les avatars d'utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. Le subgraphe gère ces événements en créant des entités `Gravatar` dans le stockage des nœuds de The Graph et en veillant à ce qu'elles soient mises à jour en fonction des événements. Continuez sur le [manifeste de subgraphes](/developing/creating-a-subgraph#the-subgraph-manifest) pour mieux comprendre les événements de vos contrats intelligents auxquels prêter attention, les mappages, etc. +Le subgraph d'exemple est basé sur le contrat Gravity de Dani Grant qui gère les avatars d'utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. Le subgraph gère ces événements en créant des entités `Gravatar` dans le stockage des nœuds de The Graph et en veillant à ce qu'elles soient mises à jour en fonction des événements. Continuez sur le [manifeste de subgraphs](/developing/creating-a-subgraph#the-subgraph-manifest) pour mieux comprendre les événements de vos contrats intelligents auxquels prêter attention, les mappages, etc. -## Supported Networks on the hosted service +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +initialisation du graph \ + --produit service hébergé + --from-contract \ + / [] +``` + +## Réseaux pris en charge par le service hébergé Vous pouvez trouver la liste des réseaux supportés [ici](/developing/supported-networks). diff --git a/website/pages/fr/deploying/subgraph-studio-faqs.mdx b/website/pages/fr/deploying/subgraph-studio-faqs.mdx index 7e98deb7f396..aafc961eef75 100644 --- a/website/pages/fr/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/fr/deploying/subgraph-studio-faqs.mdx @@ -1,5 +1,5 @@ --- -title: FAQ Subgraph Studio +title: Subgraph Studio FAQ --- ## 1. What is Subgraph Studio? @@ -22,10 +22,10 @@ Après avoir créé une clé API, dans la section Sécurité, vous pouvez défin Oui, les subgraphes qui ont été publiés sur le réseau principal peuvent être transférés vers un nouveau portefeuille ou un Multisig. Vous pouvez le faire en cliquant sur les trois points à côté du bouton "Publier" sur la page de détails du subgraphe et en sélectionnant "Transférer la propriété". -Remarquez que vous ne pourrez plus voir ou modifier le subgraphe dans Studio une fois qu'il aura été transféré. +Notez que vous ne pourrez plus voir ou modifier le subgraph dans Studio une fois qu'il aura été transféré. ## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? You can find the query URL of each subgraph in the Subgraph Details section of The Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in the Subgraph Studio. -N'oubliez pas que vous pouvez créer une clé API et interroger tout subgraphe déployé sur le réseau, même si vous en étés le propriétaire. Les requêtes via la nouvelle clé API sont des requêtes payantes, comme toutes les autres sur le réseau. +N'oubliez pas que vous pouvez créer une clé API et interroger n'importe quel subgraph publié sur le réseau, même si vous créez vous-même un subgraph. Ces requêtes via la nouvelle clé API, sont des requêtes payantes comme n'importe quelle autre sur le réseau. diff --git a/website/pages/fr/deploying/subgraph-studio.mdx b/website/pages/fr/deploying/subgraph-studio.mdx index 37370271e294..f02382247e5f 100644 --- a/website/pages/fr/deploying/subgraph-studio.mdx +++ b/website/pages/fr/deploying/subgraph-studio.mdx @@ -1,95 +1,89 @@ --- -title: How to Use the Subgraph Studio +title: Déployer dans Subgraph Studio --- -Welcome to your new launchpad 👩🏽‍🚀 +Bienvenue dans votre nouveau site de Lancement 👩🏽‍🚀 -The Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). +Le Subgraph Studio est votre endroit pour construire et créer des subgraphs, ajouter des métadonnées et les publier dans le nouvel explorateur décentralisé (en savoir plus à ce sujet [ici](/network/explorer)). -What you can do in the Subgraph Studio: +Ce que vous pouvez faire dans Subgraph Studio : - Créer un subgraphe via l'interface utilisateur de Studio - Déployer un subgraphe à l'aide de la CLI -- Publier un subgraphe avec l'interface utilisateur de Studio -- Test it in the playground -- Integrate it in staging using the query URL -- Créez et gérez vos clés API pour des subgraphes spécifiques +- Publier un subgraph avec l'interface utilisateur de Studio +- Testez-le dans le playground +- Intégrez-le dans l'environnement de préproduction à l'aide de l'URL de requête +- Créez et gérez vos clés API pour des subgraphs spécifiques -Here in the Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. +Ici, dans le Studio Subgraph, vous avez un contrôle total sur vos subgraphs. Non seulement vous pouvez tester vos subgraphs avant de les publier, mais vous pouvez également restreindre vos clés API à des domaines spécifiques et autoriser uniquement certains indexeurs à effectuer des requêtes à partir de leurs clés API. -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: +Les requêtes des subgraphs génère des frais d'interrogation, utilisés pour récompenser les indexeurs [Indexeurs](/network/indexing) sur le réseau Graph. Si vous êtes un développeur dapp ou un développeur de subgraphs, le Studio vous permettra de créer de meilleurs subgraphs pour alimenter vos subgraphs ou celles de votre communauté. Le Studio est composé de 5 parties principales : - Contrôles de votre compte utilisateur -- Une liste de subgraphes que vous avez créés -- Une section pour gérer, afficher les détails et visualiser l'état d'un subgraphe spécifique -- Une section pour gérer vos clés API dont vous aurez besoin pour interroger un subgraphe +- Une liste de subgraphs que vous avez créés +- Une section pour gérer, afficher les détails et visualiser l'état d'un subgraph spécifique +- Une section pour gérer vos clés API dont vous aurez besoin pour interroger un subgraph - Une section pour gérer votre facturation ## Comment créer votre compte 1. Connectez-vous avec votre portefeuille - vous pouvez le faire via MetaMask ou WalletConnect -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. +1. Dès que vous êtes connecté, vous verrez votre clé de déploiement unique sur la page d'accueil de votre compte. Cela vous permettra soit de publier vos subgraphs, soit de gérer vos clés API + facturation. Vous disposerez d'une clé de déploiement unique qui pourra être regénérée si vous pensez qu'elle a été compromise. -## How to Create your Subgraph in Subgraph Studio +## Comment créer un subgraph dans Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: + -- Nom de votre subgraphe -- Image -- Description -- Catégories (par exemple `DeFi`, `NFTs`, `Gouvernance`) -- Site web +## Compatibilité des subgraphs avec le réseau de The Graph -## Subgraph Compatibility with The Graph Network +Le réseau The Graph n'est pas encore en mesure de prendre en charge toutes les sources de données & fonctionnalités disponibles sur le service hébergé. Pour être pris en charge par les indexeurs sur le réseau, les subgraphs doivent : -The Graph Network is not yet able to support all of the data-sources & features available on the Hosted Service. In order to be supported by Indexers on the network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: +- Indexer un [réseau supporté](/developing/supported-networks) +- Ne doit utiliser aucune des fonctionnalités suivantes : - ipfs.cat & ipfs.map - Erreurs non fatales - - Greffage + - La greffe -More features & networks will be added to The Graph Network incrementally. +Plus de fonctions & de réseaux seront ajoutés progressivement au réseau Graph. -### Flux du cycle de vie des subgraphes +### Flux du cycle de vie des subgraphs ![Flux du cycle de vie des subgraphes](/img/subgraph-lifecycle.png) -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (pst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. +Après avoir créé votre subgraph, vous pourrez le déployer à l'aide de la [CLI](https://github.com/graphprotocol/graph-cli) ou de l'interface de ligne de commande. Le déploiement d'un subgraph avec la CLI poussera le subgraph vers le Studio où vous pourrez tester les subgraphs à l'aide du playground (aire de jeu). Cela vous permettra éventuellement de publier sur le réseau The Graph. Pour plus d'informations sur la configuration de la CLI, [consultez ceci](/developing/defining-a-subgraph#install-the-graph-cli) (P.S., assurez-vous d'avoir votre clé de déploiement à portée de main ). N'oubliez pas que déployer n'est **pas la même chose que** publier. Lorsque vous déployez un subgraph, vous le transférez simplement vers le Studio pour pouvoir le tester. Par contre, lorsque vous publiez un subgraph, vous le publiez sur la chaîne. -## Testing your Subgraph in Subgraph Studio +## Tester votre subgraph dans Subgraph Studio -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. +Si vous voulez tester votre subgraph avant de le publier sur le réseau, vous pouvez le faire dans le subgraph **Playground (aire de jeu)** ou consulter vos journaux de bord. Les journaux de subgraph vous indiqueront **où** votre subgraph échoue. -## Publish your Subgraph in Subgraph Studio +## Publiez votre subgraph dans Subgraph Studio -You’ve made it this far - congrats! +Vous êtes arrivé jusqu'ici - félicitations ! -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [blog](https://thegraph.com/blog/building-with-subgraph-studio). +Pour publier votre subgraph avec succès, vous devrez suivre les étapes décrites dans ce [blog](https://thegraph.com/blog/building-with-subgraph-studio). -Check out the video overview below as well: +Jetez un œil à l'aperçu vidéo ci-dessous également : -Remember, while you’re going through your publishing flow, you’ll be able to push to either mainnet or Goerli. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Goerli, which is free to do. This will allow you to see how the subgraph will work in The Graph Explorer and will allow you to test curation elements. +Rappelez-vous, pendant que vous suivez le processus de publication, vous pourrez publier soit sur mainnet, soit sur Goerli. Si vous êtes un développeur de subgraphs débutant, nous vous recommandons vivement de commencer par publier sur Goerli, qui est gratuit. Cela vous permettra de voir comment le subgraph fonctionnera dans The Graph Explorer et vous permettra de tester les éléments de curation. -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! +Les indexeurs doivent soumettre des enregistrements obligatoires de preuve d'indexation à partir d'un hash de bloc spécifique. Parce que la publication d'un subgraph est une action effectuée sur la blockchain, n'oubliez pas que la transaction peut prendre jusqu'à quelques minutes pour être complétée. Toute adresse que vous utilisez pour publier le contrat sera la seule à pouvoir publier les futures versions. Choisissez judicieusement ! -Les subgraphes avec des signaux de curation sont présentés aux indexeurs afin qu'ils puissent être indexés sur le réseau décentralisé. Vous pouvez publier des subgraphes et des signaux en une seule transaction, ce qui vous permet de générer le premier signal de curation sur le subraphe et d'économiser sur les coûts de gaz. En ajoutant votre signal au signal fourni ultérieurement par les curateurs, votre subgraphe aura également plus de chances de répondre aux requêtes. +Les subgraphes avec des signaux de curation sont présentés aux indexeurs afin qu'ils puissent être indexés sur le réseau décentralisé. Vous pouvez publier des subgraphs et des signaux en une seule transaction, ce qui vous permet de générer le premier signal de curation sur le subraphe et d'économiser sur les coûts de gaz. En ajoutant votre signal au signal fourni ultérieurement par les curateurs, votre subgraph aura également plus de chances de répondre aux requêtes. -**Maintenant que vous avez publié votre subgraphe, voyons comment vous allez le gérer régulièrement.** Notez que vous ne pouvez pas publier votre subgraphe sur le réseau si la synchronisation a échoué. Cela est généralement dû au fait que le subgraphe contient des bogues - les journaux vous indiqueront où se trouvent ces problèmes! +**Maintenant que vous avez publié votre subgraph, voyons comment vous allez le gérer régulièrement.** Notez que vous ne pouvez pas publier votre subgraph sur le réseau si la synchronisation a échoué. Cela est généralement dû au fait que le subgraphe contient des bogues - les journaux vous indiqueront où se trouvent ces problèmes! -## Versionner votre subgraphe avec la CLI +## Versionner votre subgraph avec la CLI -Les développeurs peuvent souhaiter mettre à jour leur subgraphe, pour diverses raisons. Lorsque c'est le cas, vous pouvez déployer une nouvelle version de votre subgraphe dans le Studio à l'aide de la CLI (il ne sera que privé à ce point) et si vous en êtes satisfait, vous pouvez publier ce nouveau déploiement sur The Graph Explorer. Cela créera une nouvelle version de votre subgraphe sur laquelle les curateurs pourront commencer à signaler et les indexeurs pourront indexer cette nouvelle version. +Les développeurs peuvent souhaiter mettre à jour leur subgraph, pour diverses raisons. Lorsque c'est le cas, vous pouvez déployer une nouvelle version de votre subgraph dans le Studio à l'aide de la CLI (" il ne sera que privé à ce point) et si vous en êtes satisfait, vous pouvez publier ce nouveau déploiement sur The Graph Explorer. Cela créera une nouvelle version de votre subgraph sur laquelle les curateurs pourront commencer à signaler et les indexeurs pourront indexer cette nouvelle version. -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in The Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +Jusqu'à récemment, les développeurs étaient obligés de déployer et de publier une nouvelle version de leur subgraph dans l'explorateur pour mettre à jour les métadonnées de leurs subgraphs. Désormais, les développeurs peuvent mettre à jour les métadonnées de leurs subgraphs **sans publier de nouvelle version**. Les développeurs peuvent mettre à jour les détails de leurs subgraphs dans le Studio (sous la photo de profil, le nom, la description, etc.) en vérifiant une option appelée **. Mettre à jour les détails** dans l'explorateur de subgraphs. Si cette case est cochée, une transaction on-chain sera générée pour mettre à jour les détails du subgraph dans l'explorateur sans avoir à publier une nouvelle version avec un nouveau déploiement. -Veuillez noter qu'il y a des coûts associés à la publication d'une nouvelle version d'un subgraphe sur le réseau. En plus des frais de transaction, les développeurs doivent également financer une partie de la taxe de curation sur le signal automigrant. Vous ne pouvez pas publier une nouvelle version de votre subgraphe si les curateurs ne l'ont pas signalé. Pour plus d'informations sur les risques de curation, veuillez en savoir plus [ici](/network/curating). +Veuillez noter qu'il y a des coûts associés à la publication d'une nouvelle version d'un subgraph sur le réseau. En plus des frais de transaction, les développeurs doivent également financer une partie de la taxe de curation sur le signal d'automigration. Vous ne pouvez pas publier une nouvelle version de votre subgraph si les curateurs ne l'ont pas signalé. Pour plus d'informations sur les risques de curation, veuillez en savoir plus [ici](/network/curating). -### Archivage automatique des versions de subgraphes +### Archivage automatique des versions de subgraphs -Chaque fois que vous déployez une nouvelle version de subgraphe dans Subgraph Studio, la version précédente sera archivée. Les versions archivées ne seront pas indexées/synchronisées et ne pourront donc pas être interrogées. Vous pouvez désarchiver une version archivée de votre sous-graphe dans l'interface utilisateur de Studio. Veuillez noter que les versions précédentes des subgraphes non publiés déployés dans le Studio seront automatiquement archivées. +Chaque fois que vous déployez une nouvelle version de subgraph dans Subgraph Studio, la version précédente sera archivée. Les versions archivées ne seront pas indexées/synchronisées et ne pourront donc pas être interrogées. Vous pouvez désarchiver une version archivée de votre subgraph dans l'interface utilisateur de Studio. Veuillez noter que les versions précédentes des subgraphs non publiés déployés dans le Studio seront automatiquement archivées. -![Subgraph Studio - Unarchive](/img/Unarchive.png) +![Subgraph Studio - Désarchiver](/img/Unarchive.png) diff --git a/website/pages/fr/developing/creating-a-subgraph.mdx b/website/pages/fr/developing/creating-a-subgraph.mdx index 05756b8dc7be..6dd9f640df22 100644 --- a/website/pages/fr/developing/creating-a-subgraph.mdx +++ b/website/pages/fr/developing/creating-a-subgraph.mdx @@ -1,71 +1,71 @@ --- -title: Creating a Subgraph +title: Comment créer un subgraph --- -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. +Un subgraph récupère des données depuis une blockchain, les manipule puis les enregistre afin que ces données soient aisément accessibles via GraphQL. ![Defining a Subgraph](/img/defining-a-subgraph.png) -The subgraph definition consists of a few files: +Un subgraph se constitue des fichiers suivants : -- `subgraph.yaml`: a YAML file containing the subgraph manifest +- `subgraph.yaml` : un fichier YAML qui contient le manifeste du subgraph -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL +- `schema.graphql`: un schéma GraphQL qui définit les données stockées pour votre subgraph et comment les interroger via GraphQL -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) +- `Mappages AssemblyScript` : [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) qui traduit les données d'événement en entités définies dans votre schéma (par exemple `mapping.ts` dans ce tutoriel) > In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. +Avant d'aller en détail à propos du contenu du manifeste, installons l'[interface de ligne de commande de TheGraph](https://github.com/graphprotocol/graph-cli). Nous en aurons besoin pour la création et le déploiement du subgraph. -## Install the Graph CLI +## Installation du Graph CLI -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +La CLI Graph est écrite en JavaScript et vous devrez installer soit `yarn` ou `npm` pour l'utiliser ; on suppose que vous avez du fil dans ce qui suit. -Once you have `yarn`, install the Graph CLI by running +Une fois que vous avez `yarn`, installez la CLI Graph en exécutant -**Installer avec yarn :** +**Installation avec yarn :** ```bash -yarn global add @graphprotocol/graph-cli +npm install -g @graphprotocol/graph-cli ``` -**Installer avec npm :** +**Installation avec npm :** ```bash npm install -g @graphprotocol/graph-cli ``` -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. +Une fois installée, la commande `graph init` est utilisée pour créer un nouveau projet soit depuis un contrat existant ou un exemple de subgraph. Il vous est possible de créer un subgraph sur Subgraph Studio grâce à `graph init --product subgraph-studio`. Si vous avez déjà déployé un contrat sur le réseau cible, il peut s'avérer judicieux d'utiliser ce contrat comme base pour votre subgraph. -## From An Existing Contract +## D'un contrat existant -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. +La commande suivante crée un subgraph qui indexe tous les événements d'un contrat existant. Il essaie de récupérer l'ABI du contrat via Etherscan et utilise un chemin de fichier local en cas d'échec. Si l'un des arguments facultatifs manque, il vous guide à travers un formulaire interactif. ```sh graph init \ --product subgraph-studio - --from-contract \ + --du-contract \ [--network ] \ [--abi ] \ [] ``` -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. +The `` est l'ID de votre subgraph dans Subgraph Studio, il peut être trouvé sur la page d'information de votre subgraph. -## From An Example Subgraph +## A partir d'un exemple de subgraph -Le second mode `graph init` prend en charge est la création d'un nouveau projet à partir d'un exemple de subgraphe. La commande suivante le fait : +Le second mode `graph init` prend en charge est la création d'un nouveau projet à partir d'un exemple de subgraph. La commande suivante le fait : ```sh -graph init --studio +studio graph init -- ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. +Le subgraph d'exemple est basé sur le contrat Gravity de Dani Grant qui gère les avatars d'utilisateurs et émet des événements `NewGravatar` ou `UpdateGravatar` chaque fois que des avatars sont créés ou mis à jour. Le subgraphe gère ces événements en créant des entités `Gravatar` dans le stockage des nœuds de The Graph et en veillant à ce qu'elles soient mises à jour en fonction des événements. Les sections suivantes décrivent les fichiers qui composent le manifeste de subgraph pour cet exemple. -## Add New dataSources To An Existing Subgraph +## Ajouter de nouvelles sources de données à un subgraph existant -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. +Depuis `v0.31.0`, le `graph-cli` prend en charge l'ajout de nouvelles sources de données à un subgraph existant via la commande `graph add`. ```sh graph add
[] @@ -78,22 +78,22 @@ Options: --network-file Networks config file path (default: "./networks.json") ``` -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. +La commande `add` récupérera l'ABI depuis Etherscan (sauf si un chemin ABI est spécifié avec l'option `--abi`) et créera une nouvelle `dataSource` de la même manière que la commande `graph init` crée un `dataSource` `--from-contract`, mettant à jour le schéma et les mappages en conséquence. -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: +L'option `--merge-entities` identifie la façon dont le développeur souhaite gérer les conflits de noms d'`entité` et d'`événement` : -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. +- Si `true` : le nouveau `dataSource` doit utiliser les `eventHandlers` & `entités`. +- Si `false` : une nouvelle entité & le gestionnaire d'événements doit être créé avec `${dataSourceName}{EventName}`. -The contract `address` will be written to the `networks.json` for the relevant network. +L'`adresse` du contrat sera écrite dans le `networks.json` du réseau concerné. -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. +> **Remarque :** Lorsque vous utilisez la Cli interactive, après avoir exécuté avec succès `graph init`, vous serez invité à ajouter une nouvelle `dataSource`. -## The Subgraph Manifest +## Le manifeste du subgraph -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +Le manifeste du subgraph `subgraph.yaml` définit les contrats intelligents que votre subgraph indexe, les événements de ces contrats auxquels prêter attention et comment mapper les données d'événements aux entités que Graph Node stocke et permet d'interroger. La spécification complète des manifestes de subgraphs peut être trouvée [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph, `subgraph.yaml` is: +Pour l'exemple de subgraph, `subgraph.yaml` est : ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -134,93 +142,97 @@ dataSources: file: ./src/mapping.ts ``` -The important entries to update for the manifest are: +Les entrées importantes à mettre à jour pour le manifeste sont : -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. -- `features`: a list of all used [feature](#experimental-features) names. +- `fonctionnalités` : une liste de tous les noms de [fonctionnalités](#experimental-features) utilisés. - `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source` : l'adresse du contrat intelligent, les sources du subgraph, et l'Abi du contrat intelligent à utiliser. L'adresse est facultative ; son omission permet d'indexer les événements correspondants de tous les contrats. + +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. +- `dataSources.context` : paires clé-valeur qui peuvent être utilisées dans les mappages de subgraphs. Prend en charge différents types de données comme `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Octets`, `Liste` et `BigInt`. Chaque variable doit spécifier son `type` et ses `données`. Ces variables de contexte sont ensuite accessibles dans les fichiers de mappage, offrant des options plus configurables pour le développement de subgraphs. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.mapping.entities` : les entités que la source de données écrit dans le magasin. Le schéma de chaque entité est défini dans le fichier schema.graphql. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.abis` : un ou plusieurs fichiers ABI nommés pour le contrat source ainsi que tout autre contrat intelligent avec lequel vous interagissez à partir des mappages. + +- `dataSources.mapping.eventHandlers` : répertorie les événements de contrat intelligent auxquels ce subgraph réagit et les gestionnaires du mappage —./src/mapping.ts dans l'exemple qui transforment ces événements en entités dans le magasin. - `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. - `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +Un seul subgraph peut indexer les données de plusieurs contrats intelligents. Ajoutez une entrée pour chaque contrat à partir duquel les données doivent être indexées dans le tableau `dataSources`. -The triggers for a data source within a block are ordered using the following process: +Les déclencheurs d'une source de données au sein d'un bloc sont classés à l'aide du processus suivant : -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. +1. Les déclencheurs d'événements et d'appels sont d'abord classés par index de transaction au sein du bloc. +2. Les déclencheurs d'événements et d'appels au sein d'une même transaction sont classés selon une convention : les déclencheurs d'événements d'abord, puis les déclencheurs d'appel, chaque type respectant l'ordre dans lequel ils sont définis dans le manifeste. 3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. -These ordering rules are subject to change. +Ces règles de commande sont susceptibles de changer. ### Getting The ABIs -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: +Le(s) fichier(s) ABI doivent correspondre à votre(vos) contrat(s). Il existe plusieurs façons d'obtenir des fichiers ABI : -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +- Si vous construisez votre propre projet, vous aurez probablement accès à vos ABI les plus récents. +- Si vous créez un subgraph pour un projet public, vous pouvez télécharger ce projet sur votre ordinateur et obtenir l'ABI en utilisant la [`compilation truffle `](https://truffleframework.com/docs/truffle/overview) ou en utilisant solc pour compiler. +- Vous pouvez également trouver l'ABI sur [Etherscan](https://etherscan.io/), mais ce n'est pas toujours fiable, car l'ABI qui y est téléchargé peut être obsolète. Assurez-vous d'avoir le bon ABI, sinon l'exécution de votre subgraph échouera. ## The GraphQL Schema -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. +Le schéma de votre subgraph se trouve dans le fichier `schema.graphql`. Les schémas GraphQL sont définis à l'aide du langage de définition d'interface GraphQL. Si vous n'avez jamais écrit de schéma GraphQL, il est recommandé de consulter cette introduction sur le système de types GraphQL. La documentation de référence pour les schémas GraphQL est disponible dans la section [API GraphQL](/querying/graphql-api). -## Defining Entities +## Définir des entités -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. +Avant de définir des entités, il est important de prendre du recul et de réfléchir à la manière dont vos données sont structurées et liées. Toutes les requêtes seront effectuées sur le modèle de données défini dans le schéma du subgraph et les entités indexées par le subgraph. Pour cette raison, il est bon de définir le schéma du subgraph d'une manière qui correspond aux besoins de votre dapp. Il peut être utile d'imaginer les entités comme des « objets contenant des données », plutôt que comme des événements ou des fonctions. -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. +Avec The Graph, vous définissez simplement les types d'entités dans `schema.graphql`, et Graph Node générera des champs de niveau supérieur pour interroger des instances uniques et des collections de ce type d'entité. Chaque type qui doit être une entité doit être annoté avec une directive `@entity`. Par défaut, les entités sont mutables, ce qui signifie que les mappages peuvent charger des entités existantes, les modifier et stocker une nouvelle version de cette entité. La mutabilité a un prix, et pour les types d'entités dont on sait qu'elles ne seront jamais modifiées, par exemple parce qu'elles contiennent simplement des données extraites textuellement de la chaîne, il est recommandé de les marquer comme immuables avec `@entity (immuable : vrai)`. Les mappages peuvent apporter des modifications aux entités immuables tant que ces modifications se produisent dans le même bloc dans lequel l'entité a été créée. Les entités immuables sont beaucoup plus rapides à écrire et à interroger et doivent donc être utilisées autant que possible. -### Good Example +### Bon exemple -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. +L'entité `Gravatar` ci-dessous est structurée autour d'un objet Gravatar et constitue un bon exemple de la façon dont une entité pourrait être définie. ```graphql -type Gravatar @entity(immutable: true) { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String - accepted: Boolean +tapez Gravatar @entity(immuable : true) { + identifiant : octets ! + propriétaire : octets + displayName : chaîne + imageUrl : chaîne + accepté : booléen } ``` ### Bad Example -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. +Les exemples d'entités `GravatarAccepted` et `GravatarDeclined` ci-dessous sont basés sur des événements. Il n'est pas recommandé de mapper des événements ou des appels de fonction à des entités 1:1. ```graphql -type GravatarAccepted @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String +tapez GravatarAccepted @entity { + identifiant : octets ! + propriétaire : octets + displayName : chaîne + imageUrl : chaîne } -type GravatarDeclined @entity { - id: Bytes! - owner: Bytes - displayName: String - imageUrl: String +tapez GravatarDeclined @entity { + identifiant : octets ! + propriétaire : octets + displayName : chaîne + imageUrl : chaîne } ``` -### Optional and Required Fields +### Champs facultatifs et obligatoires -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: +Les champs d'entité peuvent être définis comme obligatoires ou facultatifs. Les champs obligatoires sont indiqués par le `!` dans le schéma. Si un champ obligatoire n'est pas défini dans le mappage, vous recevrez cette erreur lors de l'interrogation du champ : ``` Null value resolved for non-null field 'name' @@ -230,20 +242,21 @@ Each entity must have an `id` field, which must be of type `Bytes!` or `String!` For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. -### Built-In Scalar Types +### Types scalaires intégrés -#### GraphQL Supported Scalars +#### Scalaires pris en charge par GraphQL -We support the following scalars in our GraphQL API: +Nous prenons en charge les scalaires suivants dans notre API GraphQL : | Type | Description | | --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Octets` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `String` | Scalaire pour les valeurs `chaîne`. Les caractères nuls ne sont pas pris en charge et sont automatiquement supprimés. | +| `Boolean` | Scalar pour `boolean` values. | +| `Int` | La spécification GraphQL définit `Int` pour avoir une taille de 32 octets. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Grands entiers. Utilisé pour les types `uint32`, `int64`, `uint64`, ..., `uint256` d'Ethereum. Remarque : Tout ce qui se trouve en dessous de `uint32`, tel que `int32`, `uint24` ou `int8`, est représenté par `i32 et bio`. Accédez à [API GraphQL - Requêtes](/querying/graphql-api#queries) pour une description de l'API de recherche en texte intégral et d'autres exemples d'utilisation. ```graphql -query { - bandSearch(text: "breaks & electro & detroit") { - id - name - description - wallet - } +requête { + bandSearch(texte : "breaks & électro & detroit") { + identifiant + nom + description + portefeuille + } } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Gestion des fonctionnalités](#experimental-features) :** À partir de `specVersion` `0.0.4` et au-delà, `fullTextSearch` doit être déclaré sous la section `fonctionnalités` dans le manifeste du subgraph. ### Languages supported Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". -Supported language dictionaries: - -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms - -Supported algorithms for ordering results: - -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +Dictionnaires de langues pris en charge : + +| Code | Dictionnaire | +| ------ | ------------ | +| simple | Général | +| da | Danois | +| nl | Dutch | +| en | Anglais | +| fi | Finlandais | +| fr | Français | +| de | Allemand | +| hu | Hongrois | +| it | Italien | +| no | Norvégien | +| pt | Portugais | +| ro | Roumain | +| ru | Russe | +| es | Espagnol | +| sv | Suédois | +| tr | Turc | + +### Algorithmes de classement + +Algorithmes de classement: + +| Algorithm | Description | +| --- | --- | +| rang | Utilisez la qualité de correspondance (0-1) de la requête en texte intégral pour trier les résultats. | +| proximitéRang | Similaire au classement mais inclut également la proximité des matchs. | -## Writing Mappings +## Écriture de mappages -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. +Les mappages prennent les données d'une source particulière et les transforment en entités définies dans votre schéma. Les mappages sont écrits dans un sous-ensemble de [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) appelé [AssemblyScript](https : //github.com/AssemblyScript/assemblyscript/wiki) qui peut être compilé en WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript est plus strict que TypeScript normal, mais fournit une syntaxe familière. -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. +Pour chaque gestionnaire d'événements défini dans `subgraph.yaml` sous `mapping.eventHandlers`, créez une fonction exportée du même nom. Chaque gestionnaire doit accepter un seul paramètre appelé `event` avec un type correspondant au nom de l'événement qui est géré. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +Dans le subgraph d'exemple, `src/mapping.ts` contient des gestionnaires pour les événements `NewGravatar` et `UpdatedGravatar` : ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -506,11 +519,11 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. +Le premier gestionnaire prend un événement `NewGravatar` et crée une nouvelle entité `Gravatar` avec `new Gravatar(event.params.id.toHex())`, remplissant les champs d'entité en utilisant les paramètres d'événement correspondants. Cette instance d'entité est représentée par la variable `gravatar`, avec une valeur d'identifiant de `event.params.id.toHex()`. -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. +Le deuxième gestionnaire essaie de charger le `Gravatar` existant à partir du magasin Graph Node. S'il n'existe pas encore, il est créé à la demande. L'entité est ensuite mise à jour pour correspondre aux nouveaux paramètres d'événement avant d'être réenregistrée dans le magasin à l'aide de `gravatar.save()`. -### Recommended IDs for Creating New Entities +### ID recommandés pour la création de nouvelles entités Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. @@ -552,25 +565,25 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +De plus, une classe est générée pour chaque type d'entité dans le schéma GraphQL du subgraph. Ces classes fournissent un chargement d'entités de type sécurisé, un accès en lecture et en écriture aux champs d'entité ainsi qu'une méthode `save()` pour écrire les entités à stocker. Toutes les classes d'entités sont écrites dans `/schema.ts`, permettant aux mappages de les importer avec ```javascript -import { Gravatar } from '../generated/schema' +importer { Gravatar } du '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Remarque :** La génération de code doit être effectuée à nouveau après chaque modification du schéma GraphQL ou des ABI inclus dans le manifeste. Elle doit également être effectuée au moins une fois avant de construire ou de déployer le subgraph. Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. -## Data Source Templates +## Modèles de sources de données -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. +Un modèle courant dans les contrats intelligents compatibles EVM est l'utilisation de contrats de registre ou d'usine, dans lesquels un contrat crée, gère ou référence un nombre arbitraire d'autres contrats qui ont chacun leur propre état et leurs propres événements. -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. +Les adresses de ces sous-traitants peuvent ou non être connues à l'avance et bon nombre de ces contrats peuvent être créés et/ou ajoutés au fil du temps. C'est pourquoi, dans de tels cas, définir une seule source de données ou un nombre fixe de sources de données est impossible et une approche plus dynamique est nécessaire : des _modèles de sources de données_. -### Data Source for the Main Contract +### Source de données pour le contrat principal -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +Tout d’abord, vous définissez une source de données régulière pour le contrat principal. L'extrait ci-dessous montre un exemple simplifié de source de données pour le contrat d'usine d'échange [Uniswap](https://uniswap.org). Notez le gestionnaire d'événements `NewExchange(address,address)`. Ceci est émis lorsqu'un nouveau contrat d'échange est créé en chaîne par le contrat d'usine. ```yaml dataSources: @@ -600,58 +613,58 @@ dataSources: Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. ```yaml -dataSources: - - kind: ethereum/contract - name: Factory - # ... other source fields for the main contract ... -templates: - - name: Exchange - kind: ethereum/contract - network: mainnet +les sources de données: + - genre: ethereum/contrat + nom: Usine + # ... autres champs sources du contrat principal ... +modèles: + - nom: Bourse + genre: ethereum/contrat + réseau: réseau principal source: - abi: Exchange - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/exchange.ts - entities: - - Exchange + abi: Échange + cartographie: + genre: ethereum/événements + Version api: 0.0.6 + langage: wasm/assemblyscript + fichier: ./src/mappings/exchange.ts + entités: + - Échange abis: - - name: Exchange - file: ./abis/exchange.json - eventHandlers: - - event: TokenPurchase(address,uint256,uint256) - handler: handleTokenPurchase - - event: EthPurchase(address,uint256,uint256) - handler: handleEthPurchase - - event: AddLiquidity(address,uint256,uint256) - handler: handleAddLiquidity - - event: RemoveLiquidity(address,uint256,uint256) - handler: handleRemoveLiquidity + - nom: Bourse + fichier: ./abis/exchange.json + Gestionnaires d'événements: + - événement: TokenPurchase (adresse, uint256, uint256) + gestionnaire: handleTokenPurchase + - événement: EthPurchase (adresse, uint256, uint256) + gestionnaire: handleEthPurchase + - événement: AddLiquidity (adresse, uint256, uint256) + gestionnaire: handleAddLiquidity + - événement: RemoveLiquidity (adresse, uint256, uint256) + gestionnaire: handleRemoveLiquidity ``` -### Instantiating a Data Source Template +### Instanciation d'un modèle de source de données -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. +Dans la dernière étape, vous mettez à jour votre mappage de contrat principal pour créer une instance de source de données dynamique à partir de l'un des modèles. Dans cet exemple, vous modifieriez le mappage de contrat principal pour importer le modèle `Exchange` et appeleriez la méthode `Exchange.create(address)` dessus pour commencer à indexer le nouveau contrat d'échange. ```typescript -import { Exchange } from '../generated/templates' +importer { Exchange } depuis '../generated/templates' -export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract - Exchange.create(event.params.exchange) +fonction d'exportation handleNewExchange (événement : NewExchange) : void { + // Commence à indexer l'échange ; `event.params.exchange` est le + // adresse du nouveau contrat d'échange + Exchange.create(event.params.exchange) } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> **Remarque :** Une nouvelle source de données traitera uniquement les appels et les événements du bloc dans lequel elle a été créée et de tous les blocs suivants, mais ne traitera pas les données historiques, c'est-à-dire les données. qui est contenu dans les blocs précédents. > -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. +> Si les blocs précédents contiennent des données pertinentes pour la nouvelle source de données, il est préférable d'indexer ces données en lisant l'état actuel du contrat et en créant des entités représentant cet état au moment de la création de la nouvelle source de données. ### Data Source Context -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: +Les contextes de source de données permettent de transmettre une configuration supplémentaire lors de l'instanciation d'un modèle. Dans notre exemple, disons que les échanges sont associés à une paire de transactions particulière, qui est incluse dans l'événement `NewExchange`. Ces informations peuvent être transmises à la source de données instanciée, comme suit : ```typescript import { Exchange } from '../generated/templates' @@ -666,163 +679,202 @@ export function handleNewExchange(event: NewExchange): void { Inside a mapping of the `Exchange` template, the context can then be accessed: ```typescript -import { dataSource } from '@graphprotocol/graph-ts' +importer { dataSource } depuis '@graphprotocol/graph-ts' -let context = dataSource.context() -let tradingPair = context.getString('tradingPair') +laissez contexte = dataSource.context() +laissez tradingPair = context.getString('tradingPair') ``` -There are setters and getters like `setString` and `getString` for all value types. +Il existe des setters et des getters comme `setString` et `getString` pour tous les types de valeur. -## Start Blocks +## Blocs de démarrage -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +Le `startBlock` est un paramètre facultatif qui vous permet de définir à partir de quel bloc de la chaîne la source de données commencera l'indexation. La définition du bloc de départ permet à la source de données d'ignorer potentiellement des millions de blocs non pertinents. En règle générale, un développeur de subgraphs définira `startBlock` sur le bloc dans lequel le contrat intelligent de la source de données a été créé. ```yaml -dataSources: - - kind: ethereum/contract - name: ExampleSource - network: mainnet - source: - address: '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' - abi: ExampleContract - startBlock: 6627917 - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - file: ./src/mappings/factory.ts - entities: - - User - abis: - - name: ExampleContract - file: ./abis/ExampleContract.json - eventHandlers: - - event: NewEvent(address,address) - handler: handleNewEvent +les sources de données: + - genre : ethereum/contrat + nom : ExempleSource + réseau : réseau principal + source: + adresse : '0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95' + abi : Exemple de contrat + bloc de démarrage : 6627917 + cartographie : + genre : ethereum/événements + Version api : 0.0.6 + langage : wasm/assemblyscript + fichier : ./src/mappings/factory.ts + entités : + - Utilisateur + abis : + - nom : ExempleContrat + fichier : ./abis/ExampleContract.json + Gestionnaires d'événements : + - événement : NewEvent(adresse,adresse) + gestionnaire : handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Remarque :** Le bloc de création de contrat peut être rapidement consulté sur Etherscan : > -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 1. Recherchez le contrat en saisissant son adresse dans la barre de recherche. +> 2. Cliquez sur le hachage de la transaction de création dans la section `Contract Creator`. > 3. Load the transaction details page where you'll find the start block for that contract. ## Call Handlers While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. +Les gestionnaires d'appels ne se déclencheront que dans l'un des deux cas suivants : lorsque la fonction spécifiée est appelée par un compte autre que le contrat lui-même ou lorsqu'elle est marquée comme externe dans Solidity et appelée dans le cadre d'une autre fonction du même contrat. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Remarque :** Les gestionnaires d'appels dépendent actuellement de l'API de suivi de parité. Certains réseaux, tels que la chaîne BNB et Arbitrum, ne prennent pas en charge cette API. Si un subgraph indexant l’un de ces réseaux contient un ou plusieurs gestionnaires d’appels, il ne démarrera pas la synchronisation. Les développeurs de subgraphs devraient plutôt utiliser des gestionnaires d'événements. Ceux-ci sont bien plus performants que les gestionnaires d'appels et sont pris en charge sur tous les réseaux evm. -### Defining a Call Handler +### Définir un gestionnaire d'appels -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. +Pour définir un gestionnaire d'appels dans votre manifeste, ajoutez simplement un tableau `callHandlers` sous la source de données à laquelle vous souhaitez vous abonner. ```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: mainnet +les sources de données: + - genre: ethereum/contrat + nom: Gravité + réseau: réseau principal source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction + adresse: '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi: Gravité + cartographie: + genre: ethereum/événements + Version api: 0.0.6 + langage: wasm/assemblyscript + entités: -Gravatar - Transaction abis: - - name: Gravity - file: ./abis/Gravity.json - callHandlers: - - function: createGravatar(string,string) - handler: handleCreateGravatar + - nom: Gravité + fichier: ./abis/Gravity.json + Gestionnaires d'appels: + - fonction: createGravatar(string,string) + gestionnaire: handleCreateGravatar ``` -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. +La `fonction` est la signature de fonction normalisée permettant de filtrer les appels. La propriété `handler` est le nom de la fonction de votre mappage que vous souhaitez exécuter lorsque la fonction cible est appelée dans le contrat de source de données. -### Mapping Function +### Fonction de cartographie -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Chaque gestionnaire d'appel prend un seul paramètre dont le type correspond au nom de la fonction appelée. Dans l'exemple de subgraph ci-dessus, le mappage contient un gestionnaire lorsque la fonction `createGravatar` est appelée et reçoit un paramètre `CreateGravatarCall` comme argument : ```typescript -import { CreateGravatarCall } from '../generated/Gravity/Gravity' -import { Transaction } from '../generated/schema' - -export function handleCreateGravatar(call: CreateGravatarCall): void { - let id = call.transaction.hash - let transaction = new Transaction(id) - transaction.displayName = call.inputs._displayName - transaction.imageUrl = call.inputs._imageUrl - transaction.save() +importer { CreateGravatarCall } depuis '../generated/Gravity/Gravity' +importer { Transaction } depuis '../generated/schema' + +fonction d'exportation handleCreateGravatar (appel : CreateGravatarCall) : void { + laissez id = call.transaction.hash + let transaction = nouvelle transaction (id) + transaction.displayName = call.inputs._displayName + transaction.imageUrl = call.inputs._imageUrl + transaction.save() } ``` -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. +La fonction `handleCreateGravatar` prend un nouveau `CreateGravatarCall` qui est une sous-classe de `ethereum. Call`, fournie par `@graphprotocol/graph-ts`, qui inclut les entrées et sorties saisies de l’appel. Le type `CreateGravatarCall` est généré pour vous lorsque vous exécutez `graph codegen`. ## Block Handlers -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +En plus de s'abonner à des événements de contrat ou à des appels de fonction, un subgraph peut souhaiter mettre à jour ses données à mesure que de nouveaux blocs sont ajoutés à la chaîne. Pour y parvenir, un subgraph peut exécuter une fonction après chaque bloc ou après des blocs correspondant à un filtre prédéfini. ### Supported Filters +#### Call Filter + ```yaml -filter: - kind: call +filtre: + genre: appeler ``` -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ +_Le gestionnaire défini sera appelé une fois pour chaque bloc contenant un appel au contrat (source de données) sous lequel le gestionnaire est défini._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Remarque :** Le filtre `call` dépend actuellement de l'API de traçage de parité. Certains réseaux, tels que la chaîne BNB et Arbitrum, ne prennent pas en charge cette API. Si un subgraph indexant l'un de ces réseaux contient un ou plusieurs gestionnaires de blocs avec un filtre `call`, il ne démarrera pas la synchronisation. -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. +L'absence de filtre pour un gestionnaire de bloc garantira que le gestionnaire est appelé à chaque bloc. Une source de données ne peut contenir qu'un seul gestionnaire de bloc pour chaque type de filtre. ```yaml -dataSources: - - kind: ethereum/contract - name: Gravity - network: dev - source: - address: '0x731a10897d267e19b34503ad902d0a29173ba4b1' - abi: Gravity - mapping: - kind: ethereum/events - apiVersion: 0.0.6 - language: wasm/assemblyscript - entities: - - Gravatar - - Transaction - abis: - - name: Gravity - file: ./abis/Gravity.json - blockHandlers: - - handler: handleBlock - - handler: handleBlockWithCallToContract - filter: - kind: call +les sources de données: + - genre : ethereum/contrat + nom: Gravité + réseau : développeur + source: + adresse : '0x731a10897d267e19b34503ad902d0a29173ba4b1' + abi : Gravité + cartographie : + genre : ethereum/événements + Version api : 0.0.6 + langage : wasm/assemblyscript + entités : + -Gravatar + - Transaction + abis : + - nom : Gravité + fichier : ./abis/Gravity.json + gestionnaires de blocs : + - gestionnaire : handleBlock + - gestionnaire : handleBlockWithCallToContract + filtre: + genre : appele ``` -### Mapping Function +#### Filtre d'interrogation + +> **Nécessite `specVersion` >= 0.0.8** + +> **Remarque :** Les filtres d'interrogation ne sont disponibles que sur les sources de données de `genre : ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +Le gestionnaire défini sera appelé une fois pour tous les blocs `n`, où `n` est la valeur fournie dans le champ `every`. Cette configuration permet au sugraph d'effectuer des opérations spécifiques à intervalles réguliers. + +#### Once Filter + +> **Nécessite `specVersion` >= 0.0.8** + +> **Remarque :** Les filtres Once ne sont disponibles que sur les sources de données de `genre : Ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +fonction d'exportation handleOnce (bloc : ethereum.Block) : void { + laissez data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Configurer les données ici' + data.save() +} +``` + +### Fonction de cartographie + +La fonction de mappage recevra un `ethereum.Block` comme seul argument. Comme les fonctions de mappage pour les événements, cette fonction peut accéder aux entités de subgraphs existantes dans le magasin, appeler des contrats intelligents et créer ou mettre à jour des entités. ```typescript -import { ethereum } from '@graphprotocol/graph-ts' +importer { ethereum } depuis '@graphprotocol/graph-ts' -export function handleBlock(block: ethereum.Block): void { - let id = block.hash - let entity = new Block(id) - entity.save() +fonction d'exportation handleBlock (bloc : ethereum.Block) : void { + laissez l'identifiant = block.hash + laisser l'entité = nouveau bloc (id) + entité.save() } ``` -## Anonymous Events +## Événements anonymes -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: +Si vous devez traiter des événements anonymes dans Solidity, cela peut être réalisé en fournissant le sujet 0 de l'événement, comme dans l'exemple : ```yaml eventHandlers: @@ -831,35 +883,35 @@ eventHandlers: handler: handleGive ``` -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. +Un événement ne sera déclenché que lorsque la signature et le sujet 0 correspondent. Par défaut, `topic0` est égal au hachage de la signature de l'événement. -## Transaction Receipts in Event Handlers +## Reçus de transaction dans les gestionnaires d'événements -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. +À partir de `specVersion` `0.0.5` et `apiVersion` `0.0.7`, les gestionnaires d'événements peuvent avoir accès au reçu du transaction qui les a émis. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +Pour ce faire, les gestionnaires d'événements doivent être déclarés dans le manifeste du subgraph avec la nouvelle clé `receipt: true`, qui est facultative et vaut par défaut false. ```yaml -eventHandlers: - - event: NewGravatar(uint256,address,string,string) - handler: handleNewGravatar - receipt: true +gestionnaires d'événements: + - événement: NewGravatar(uint256,adresse,chaîne,chaîne) + gestionnaire: handleNewGravatar + reçu: vrai ``` -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. +Dans la fonction de gestionnaire, le reçu est accessible dans le champ `Event.receipt`. Lorsque la clé `receipt` est définie sur `false` ou omise dans le manifeste, une valeur `null` sera renvoyée à la place. -## Experimental features +## Fonctionnalités expérimentales -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +À partir de `specVersion` `0.0.4`, les fonctionnalités de subgraph doivent être explicitement déclarées dans la section `features` au niveau supérieur du fichier manifeste, en utilisant leur `camelCase`, comme indiqué dans le tableau ci-dessous : -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Erreurs non fatales](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Greffage](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | +| Fonctionnalité | Name | +| --- | --- | +| [Erreurs non fatales](#non-fatal-errors) | `erreursnonfatales` | +| [Recherche en texte intégral](#defining-fulltext-search-fields) | `recherche en texte intégral` | +| [La greffe](#grafting-onto-existing-subgraphs) | `greffage` | +| [IPFS sur les contrats Ethereum](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +Par exemple, si un subgraph utilise les fonctionnalités **Recherche en texte intégral** et **Erreurs non fatales** features, le `features` dans le manifeste doit être : ```yaml specVersion: 0.0.4 @@ -870,37 +922,33 @@ features: dataSources: ... ``` -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +Notez que l'utilisation d'une fonctionnalité sans la déclarer entraînera une **erreur de validation** lors du déploiement du sous-graphe, mais aucune erreur ne se produira si une fonctionnalité est déclarée mais n'est pas utilisée. -### IPFS on Ethereum Contracts +### IPFS sur les contrats Ethereum -A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. +Un cas d'utilisation courant pour combiner IPFS avec Ethereum est de stocker des données sur IPFS qui seraient trop coûteuses à maintenir en chaîne et de référencer le hachage IPFS dans les contrats Ethereum. -Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. +Compte tenu de ces hachages IPFS, les subgraphs peuvent lire les fichiers correspondants depuis IPFS en utilisant `ipfs.cat` et `ipfs.map`. Pour ce faire de manière fiable, il est nécessaire que ces fichiers soient épinglés sur un nœud IPFS à haute disponibilité, afin que le [service hébergé](https://thegraph.com/hosted-service) nœud IPFS peut les retrouver lors de l'indexation. -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Remarque :** Le réseau Graph ne prend pas encore en charge `ipfs.cat` et `ipfs.map`, et les développeurs ne doivent pas déployer subgraphs utilisant cette fonctionnalité au réseau via le Studio. -> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. +> **[Gestion des fonctionnalités](#experimental-features) :** `ipfsOnEthereumContracts` doit être déclaré sous `features **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +L'activation des erreurs non fatales nécessite la définition de l'indicateur de fonctionnalité suivant sur le manifeste du subgraph : ```yaml -specVersion: 0.0.4 -description: Gravatar for Ethereum -features: - - nonFatalErrors - ... +l'activation des erreurs non fatales nécessite la définition de l'indicateur de fonctionnalité suivant sur le manifeste du subgraph ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +La requête doit également choisir d'interroger les données présentant des incohérences potentielles via l'argument `subgraphError`. Il est également recommandé d'interroger `_meta` pour vérifier si le subgraph a ignoré des erreurs, comme dans l'exemple : ```graphql foos(first: 100, subgraphError: allow) { @@ -912,151 +960,153 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +Si le subgraph rencontre une erreur, cette requête renverra à la fois les données et une erreur graphql avec le message `"indexing_error"`, comme dans cet exemple de réponse : ```graphql -"data": { - "foos": [ - { - "id": "0xdead" - } - ], - "_meta": { - "hasIndexingErrors": true - } +"données": { + "foos": [ + { + "identifiant": "0xdead" + } + ], + "_meta": { + "hasIndexingErrors": vrai + } }, -"errors": [ - { - "message": "indexing_error" - } +"les erreurs": [ + { + "message": "erreur_indexation" + } ] ``` -### Grafting onto Existing Subgraphs +### Greffe sur des subgraphs existants -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +> **Remarque :** il n'est pas recommandé d'utiliser le greffage lors de la mise à niveau initiale vers The Graph Network. Apprenez-en plus [ici](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +Lorsqu'un subgraph est déployé pour la première fois, il commence à indexer les événements au niveau du bloc Genesis de la chaîne correspondante (ou au `startBlock` défini avec chaque source de données). Dans certaines circonstances ; il est avantageux de réutiliser les données d'un subgraph existant et de commencer l'indexation à un bloc beaucoup plus tard. Ce mode d'indexation est appelé _Grafting_. Le greffage est, par exemple, utile pendant le développement pour surmonter rapidement de simples erreurs dans les mappages ou pour faire fonctionner à nouveau temporairement un subgraph existant après son échec. + +Un subgraph est greffé sur un subgraph de base lorsque le manifeste du soubgraph dans `subgraph.yaml` contient un bloc `graft` au niveau supérieur : ```yaml description: ... -graft: - base: Qm... # Subgraph ID of base subgraph - block: 7345624 # Block number +greffer: + base: Qm... # ID de subgraph du subgraph de base + bloc: 7345624 # Numéro de bloc ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +Lorsqu'un subgraph dont le manifeste contient un bloc `graft` est déployé, Graph Node copiera les données du subgraph `base` jusqu'au `bloc` donné inclus. puis continuez à indexer le nouveau subgraph à partir de ce bloc. Le subgraph de base doit exister sur l'instance Graph Node cible et doit avoir été indexé jusqu'au moins au bloc donné. En raison de cette restriction, le greffage ne doit être utilisé que pendant le développement ou en cas d'urgence pour accélérer la production d'un subgraph équivalent non greffé. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Étant donné que le greffage copie plutôt que l'indexation des données de base, il est beaucoup plus rapide d'amener le susgraph dans le bloc souhaité que l'indexation à partir de zéro, bien que la copie initiale des données puisse encore prendre plusieurs heures pour de très gros subgraphs. Pendant l'initialisation du subgraph greffé, le nœud graphique enregistrera des informations sur les types d'entités qui ont déjà été copiés. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Le subgraph greffé peut utiliser un schéma GraphQL qui n'est pas identique à celui du subgraph de base, mais simplement compatible avec celui-ci. Il doit s'agir d'un schéma de subgraph valide à part entière, mais il peut s'écarter du schéma du subgraph de base des manières suivantes : -- It adds or removes entity types +- Il ajoute ou supprime des types d'entités - It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums +- Il ajoute des attributs nullables aux types d'entités +- Il transforme les attributs non nullables en attributs nullables +- Il ajoute des valeurs aux énumérations - It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Cela change pour quels types d'entités une interface est implémentée -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Gestion des fonctionnalités](#experimental-features) :** le `greffage` doit être déclaré sous `features`dans le manifeste du subgraph. -## File Data Sources +## Sources de données de fichiers -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. -### Vue d'ensemble +### Aperçu -Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. +Plutôt que de récupérer les fichiers "en ligne" pendant l'exécution du gestionnaire, cela introduit des modèles qui peuvent être générés en tant que nouvelles sources de données pour un identifiant de fichier donné. Ces nouvelles sources de données récupèrent les fichiers, réessayent en cas d'échec et exécutent un gestionnaire dédié lorsque le fichier est trouvé. This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. -> This replaces the existing `ipfs.cat` API +> Cela remplace l'API `ipfs.cat` existante -### Upgrade guide +### Guide de mise à niveau -#### Update `graph-ts` and `graph-cli` +#### Mettre à jour `graph-ts` et `graph-cli` -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +Les sources de données de fichiers nécessitent graph-ts >=0.29.0 et graph-cli >=0.33.1 -#### Add a new entity type which will be updated when files are found +#### Ajouter un nouveau type d'entité qui sera mis à jour lorsque des fichiers seront trouvés File data sources cannot access or update chain-based entities, but must update file specific entities. -This may mean splitting out fields from existing entities into separate entities, linked together. +Cela peut impliquer de diviser les champs des entités existantes en entités distinctes, liées entre elles. -Original combined entity: +Entité combinée d'origine : ```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - externalURL: String! - ipfsURI: String! - image: String! - name: String! - description: String! - type: String! - updatedAtTimestamp: BigInt - owner: User! +tapez Jeton @entité { + je l'ai fait! + ID de jeton : BigInt ! + tokenURI : chaîne ! + externalURL : chaîne ! + ipfsURI : chaîne ! + image : Ficelle ! + nom : Chaîne ! + description : Ficelle ! + tapez : chaîne ! + updateAtTimestamp : BigInt + propriétaire : Utilisateur ! } ``` -New, split entity: +Nouvelle entité scindée : ```graphql -type Token @entity { - id: ID! - tokenID: BigInt! - tokenURI: String! - ipfsURI: TokenMetadata - updatedAtTimestamp: BigInt - owner: String! +tapez Jeton @entité { + je l'ai fait! + ID de jeton : BigInt ! + tokenURI : chaîne ! + ipfsURI : TokenMetadata + updateAtTimestamp : BigInt + propriétaire : Chaîne ! } -type TokenMetadata @entity { - id: ID! - image: String! - externalURL: String! - name: String! - description: String! +tapez TokenMetadata @entity { + je l'ai fait! + image : Ficelle ! + externalURL : chaîne ! + nom : Chaîne ! + description : Ficelle ! } ``` -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! +Si la relation est 1:1 entre l'entité parent et l'entité de source de données de fichier résultante, le modèle le plus simple consiste à lier l'entité parent à une entité de fichier résultante en utilisant le CID IPFS comme recherche. Contactez Discord si vous rencontrez des difficultés pour modéliser vos nouvelles entités basées sur des fichiers ! -> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> Vous pouvez utiliser des [filtres imbriqués](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) pour filtrer les entités parent sur le base de ces entités imbriquées. -#### Add a new templated data source with `kind: file/ipfs` +#### Ajoutez une nouvelle source de données modélisée avec `kind: file/ipfs` ou `kind: file/arweave` -This is the data source which will be spawned when a file of interest is identified. +Il s'agit de la source de données qui sera générée lorsqu'un fichier d'intérêt est identifié. ```yaml -templates: - - name: TokenMetadata - kind: file/ipfs - mapping: - apiVersion: 0.0.7 - language: wasm/assemblyscript - file: ./src/mapping.ts - handler: handleMetadata - entities: - - TokenMetadata +modèles: + - nom: TokenMetadata + genre: fichier/ipfs + cartographie: + Version api: 0.0.7 + langage: wasm/assemblyscript + fichier: ./src/mapping.ts + gestionnaire: handleMetadata + entités: + - TokenMétadonnées abis: - - name: Token - file: ./abis/Token.json + - nom: Jeton + fichier: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> Actuellement, les `abis` sont requis, bien qu'il ne soit pas possible d'appeler des contrats à partir de sources de données de fichiers The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. -#### Create a new handler to process files +#### Créer un nouveau gestionnaire pour traiter les fichiers -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). +Ce gestionnaire doit accepter un paramètre `Bytes`, qui sera le contenu du fichier, lorsqu'il sera trouvé, qui pourra ensuite être traité. Il s'agira souvent d'un fichier JSON, qui peut être traité avec les assistants `graph-ts` ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json -api)). The CID of the file as a readable string can be accessed via the `dataSource` as follows: @@ -1091,14 +1141,16 @@ export function handleMetadata(content: Bytes): void { } ``` -#### Spawn file data sources when required +#### Générer des sources de données de fichiers si nécessaire + +Vous pouvez désormais créer des sources de données de fichiers lors de l'exécution de gestionnaires basés sur une chaîne : -You can now create file data sources during execution of chain-based handlers: +- Importez le modèle à partir des `modèles` générés automatiquement +- appeler `TemplateName.create(cid : string)` à partir d'un mappage, où le cid est un identifiant de contenu valide pour IPFS ou Arweave -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +Pour Arweave, à partir de la version 0.33.0, Graph Node peut récupérer des fichiers stockés sur Arweave en fonction de leur [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) à partir d'une passerelle Arweave ([fichier exemple](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave prend en charge les transactions téléchargées via Bundlr, et Graph Node peut également récupérer des fichiers sur la base des [manifestes Bundlr](https://docs.bundlr.network/learn/gateways#indexing). Exemple: @@ -1106,7 +1158,7 @@ Exemple: import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//Cet exemple de code concerne un sous-graphe de Crypto coven. Le hachage ipfs ci-dessus est un répertoire contenant les métadonnées des jetons pour toutes les NFT de l'alliance cryptographique. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -1116,7 +1168,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + //Ceci crée un chemin vers les métadonnées pour un seul Crypto coven NFT. Il concatène le répertoire avec "/" + nom de fichier + ".json" token.ipfsURI = tokenIpfsHash @@ -1129,50 +1181,50 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +Cela créera une nouvelle source de données de fichier, qui interrogera le point d'extrémité IPFS ou Arweave configuré du nœud de graphique, en réessayant si elle n'est pas trouvée. Lorsque le fichier est trouvé, le gestionnaire de la source de données de fichier est exécuté. -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. +Cet exemple utilise le CID comme recherche entre l'entité `Token` parent et l'entité `TokenMetadata` résultante. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Auparavant, c'est à ce stade qu'un développeur de subgraphs aurait appelé `ipfs.cat(CID)` pour récupérer le fichier -Congratulations, you are using file data sources! +Félicitations, vous utilisez des sources de données de fichiers ! #### Deploying your subgraphs -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +Vous pouvez maintenant `construire` et `déployer` votre subgraph sur n'importe quel nœud de graph >=v0.30.0-rc.0. #### Limitations -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +Les entités et les gestionnaires de sources de données de fichiers sont isolés des autres entités du subgraph, ce qui garantit que leur exécution est déterministe et qu'il n'y a pas de contamination des sources de données basées sur des chaînes. Pour être plus précis : -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers +- Les entités créées par les sources de données de fichiers sont immuables et ne peuvent pas être mises à jour +- Les gestionnaires de sources de données de fichiers ne peuvent pas accéder à des entités provenant d'autres sources de données de fichiers +- Les entités associées aux sources de données de fichiers ne sont pas accessibles aux gestionnaires basés sur des chaînes -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> Cette contrainte ne devrait pas poser de problème pour la plupart des cas d'utilisation, mais elle peut en compliquer certains. N'hésitez pas à nous contacter via Discord si vous rencontrez des problèmes pour modéliser vos données basées sur des fichiers dans un subgraph ! -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. +En outre, il n'est pas possible de créer des sources de données à partir d'une source de données de fichier, qu'il s'agisse d'une source de données onchain ou d'une autre source de données de fichier. Cette restriction pourrait être levée à l'avenir. #### Best practices -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. +Si vous liez des métadonnées NFT aux jetons correspondants, utilisez le hachage IPFS des métadonnées pour référencer une entité Metadata à partir de l'entité Token. Enregistrez l'entité Metadata en utilisant le hachage IPFS comme identifiant. -You can use [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. +Vous pouvez utiliser [Contexte de la source de données](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) lors de la création de sources de données de fichiers pour transmettre des informations supplémentaires qui seront mises à la disposition du gestionnaire de la source de données de fichiers. -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. +Si vous avez des entités qui sont actualisées plusieurs fois, créez des entités uniques basées sur des fichiers en utilisant le hachage & IPFS ; l'ID de l'entité, et référencez-les en utilisant un champ dérivé dans l'entité basée sur la chaîne. -> We are working to improve the above recommendation, so queries only return the "most recent" version +> Nous travaillons à l'amélioration de la recommandation ci-dessus, afin que les requêtes ne renvoient que la version "la plus récente" -#### Known issues +#### Problèmes connus -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. +Les sources de données de fichiers nécessitent actuellement des ABI, même si les ABI ne sont pas utilisées ([problème](https://github.com/graphprotocol/graph-cli/issues/961)). La solution consiste à ajouter n'importe quel ABI. -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. +Les gestionnaires des sources de données de fichiers ne peuvent pas se trouver dans des fichiers qui importent des liaisons de contrat `eth_call`, échouant avec "importation inconnue : `ethereum::ethereum.call` n'a pas été défini" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). La solution de contournement consiste à créer des gestionnaires de sources de données de fichiers dans un fichier dédié. -#### Examples +#### Exemples [Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) -#### Références +#### Les Références -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) +[Sources de données du fichier GIP](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/fr/developing/developer-faqs.mdx b/website/pages/fr/developing/developer-faqs.mdx index 6abf37316a59..5abbd3867365 100644 --- a/website/pages/fr/developing/developer-faqs.mdx +++ b/website/pages/fr/developing/developer-faqs.mdx @@ -2,39 +2,39 @@ title: FAQs pour les développeurs --- -## 1. What is a subgraph? +## 1. Qu'est-ce qu'un subgraph ? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using the Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available to be queried by subgraph consumers. +Un subgraph est une API personnalisée construite sur des données de blockchain. Les subgraphs sont interrogés à l'aide du langage de requête GraphQL et sont déployés sur un nœud de graph à l'aide de Graphe CLI . Dès qu'ils sont déployés et publiés sur le réseau décentralisé de The Graph, Les indexeurs traitent les subgraphs et les rendent disponibles pour être interrogés par les consommateurs de subgraphs. -## 2. Can I delete my subgraph? +## 2. Puis-je supprimer mon subgraph ? -Il n'est pas possible de supprimer des subgraphes une fois qu'ils sont créés. +Il n'est pas possible de supprimer des subgraphs une fois qu'ils sont créés. -## 3. Can I change my subgraph name? +## 3. Puis-je changer le nom de mon subgraph ? -Non. Une fois qu'un subgraphe est créé, son nom ne peut plus être modifié. Assurez-vous d'y réfléchir attentivement avant de créer votre subgraphe afin qu'il soit facilement consultable et identifiable par d'autres dapps. +Non. Une fois qu'un subgraph est créé, son nom ne peut plus être modifié. Assurez-vous d'y réfléchir attentivement avant de créer votre subgraph afin qu'il soit facilement consultable et identifiable par d'autres dapps. -## 4. Can I change the GitHub account associated with my subgraph? +## 4. Puis-je modifier le compte GitHub associé à mon subgraph ? -Non. Dès qu'un subgraphe est créé, le compte GitHub associé ne peut pas être modifié. Assurez-vous d'y réfléchir attentivement avant de créer votre subgraphe. +Non. Dès qu'un subgraph est créé, le compte GitHub associé ne peut pas être modifié. Assurez-vous d'y réfléchir attentivement avant de créer votre subgraph. -## 5. Am I still able to create a subgraph if my smart contracts don't have events? +## 5. Suis-je toujours en mesure de créer un subgraph si mes smart contracts n'ont pas d'événements ? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are by far the fastest way to retrieve useful data. +Il est fortement recommandé de structurer vos smart contracts pour avoir des événements associés aux données que vous souhaitez interroger. Les gestionnaires d'événements du subgraph sont déclenchés par des événements de contrat et constituent le moyen le plus rapide de récupérer des données utiles. -Si les contrats avec lesquels vous travaillez ne contiennent pas d'événements, votre subgraphe peut utiliser des gestionnaires d'appels et de blocs pour déclencher l'indexation. Bien que cela ne soit pas recommandé, les performances seront considérablement plus lentes. +Si les contrats avec lesquels vous travaillez ne contiennent pas d'événements, votre subgraph peut utiliser des gestionnaires d'appels et de blocs pour déclencher l'indexation. Bien que cela ne soit pas recommandé, les performances seront considérablement plus lentes. -## 6. Is it possible to deploy one subgraph with the same name for multiple networks? +## 6. Est-il possible de déployer un subgraph portant le même nom pour plusieurs réseaux ? -Vous aurez besoin de noms distincts pour plusieurs réseaux. Bien que vous ne puissiez pas avoir différents subgraphes sous le même nom, il existe des moyens pratiques d'avoir une seule base de code pour plusieurs réseaux. Retrouvez plus d'informations à ce sujet dans notre documentation : [Déploiement d'un subgraphe](/deploying/deploying-a-subgraph-to-hosted#redeploying-an-subgraph) +Vous aurez besoin de noms distincts pour plusieurs réseaux. Bien que vous ne puissiez pas avoir différents subgraphs sous le même nom, il existe des moyens pratiques d'avoir une seule base de code pour plusieurs réseaux. Retrouvez plus d'informations à ce sujet dans notre documentation : [Déploiement d'un subgraph](/deploying/deploying-a-subgraph-to-hosted#redeploying-an-subgraph) -## 7. How are templates different from data sources? +## 7. En quoi les modèles sont-ils différents des sources de données ? -Les modèles vous permettent de créer des sources de données à la volée, pendant l'indexation de votre subgraphe. Peut-être que votre contrat générera de nouveaux contrats à mesure que les gens interagissent avec lui, et puisque vous connaissez la forme de ces contrats (ABI, événements, etc.) à l'avance, vous pouvez définir comment vous souhaitez les indexer dans un modèle et lorsqu'ils sont générés, votre subgraphe créera une source de données dynamique en fournissant l'adresse du contrat. +Les modèles vous permettent de créer des sources de données à la volée, pendant l'indexation de votre subgraph. Il se peut que votre contrat engendre de nouveaux contrats au fur et à mesure que les gens interagissent avec lui, et puisque vous connaissez la forme de ces contrats (ABI, événements, etc.) à l'avance, vous pouvez définir comment vous souhaitez les indexer dans un modèle et lorsqu'ils sont générés, votre subgraph créera une source de données dynamique en fournissant l'adresse du contrat. Consultez la section "Instanciation d'un modèle de source de données" sur : [Modèles de source de données](/developing/creating-a-subgraph#data-source-templates). -## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? +## 8. Comment m'assurer que j'utilise la dernière version de graph-node pour mes déploiements locaux ? Vous pouvez exécuter la commande suivante : @@ -42,58 +42,58 @@ Vous pouvez exécuter la commande suivante : docker pull graphprotocol/graph-node:dernier ``` -**REMARQUE :** docker / docker-compose utilisera toujours la version de nœud The Graph extraite la première fois que vous l'avez exécuté, il est donc important de le faire pour vous assurer que vous êtes à jour avec la dernière version de nœud The Graph. +**REMARQUE :** docker / docker-compose utilisera toujours la version de graph-node extraite la première fois que vous l'avez exécuté, il est donc important de le faire pour vous assurer que vous êtes à jour avec la dernière version de graph-node. -## 9. How do I call a contract function or access a public state variable from my subgraph mappings? +## 9. Comment appeler une fonction de contrat ou accéder à une variable d'état publique à partir de mes mappages de subgraphs ? -Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/developing/assemblyscript-api). +Jetez un œil à l'état `Accès au contrat intelligent` dans la section [API AssemblyScript](/developing/assemblyscript-api). -## 10. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another datasource in `subgraph.yaml` after running `graph init`? +## 10. Est-il possible de configurer un subgraph en utilisant `graph init` à partir de `graph-cli` avec deux contrats ? Ou dois-je ajouter manuellement une autre source de données dans `subgraph.yaml` après avoir exécuté `graph init` ? Malheureusement, ce n'est actuellement pas possible. `graph init` est conçu comme un point de départ de base, à partir duquel vous pouvez ajouter manuellement d'autres sources de données. -## 11. I want to contribute or add a GitHub issue. Where can I find the open source repositories? +## 11. Je souhaite contribuer ou ajouter un problème GitHub. Où puis-je trouver les référentiels open source ? -- [nœud The Graph](https://github.com/graphprotocol/graph-node) +- [graph-node](https://github.com/graphprotocol/graph-node) - [graph-cli](https://github.com/graphprotocol/graph-cli) -- [graph test](https://github.com/graphprotocol/graph-ts) +- [graph-ts](https://github.com/graphprotocol/graph-ts) -## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? +## 12. Quelle est la méthode recommandée pour créer des identifiants « générés automatiquement » pour une entité lors du traitement des événements ? Si une seule entité est créée lors de l'événement et s'il n'y a rien de mieux disponible,alors le hachage de transaction + index de journal serait unique. Vous pouvez les masquer en les convertissant en octets, puis en les redirigeant vers `crypto.keccak256`, mais cela ne le rendra pas plus unique. -## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? +## 13. Lorsqu'on écoute plusieurs contrats, est-il possible de sélectionner l'ordre des contrats pour écouter les événements ? -Dans un subgraphe, les événements sont toujours traités dans l'ordre dans lequel ils apparaissent dans les blocs, que ce soit sur plusieurs contrats ou non. +Dans un subgraph, les événements sont toujours traités dans l'ordre dans lequel ils apparaissent dans les blocs, que ce soit sur plusieurs contrats ou non. -## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? +## 14. Est-il possible de différencier les réseaux (réseau principal, Goerli, local) à partir des gestionnaires d'événements ? Oui. Vous pouvez le faire en important `graph-ts` comme dans l'exemple ci-dessous : ```javascript -import { dataSource } from '@graphprotocol/graph-ts' +importez { dataSource } de '@graphprotocol/graph-ts' dataSource.network() dataSource.address() ``` -## 15. Do you support block and call handlers on Goerli? +## 15. Prenez-vous en charge les gestionnaires de blocs et d'appels sur Goerli ? -Yes. Goerli supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. +Oui. Goerli prend en charge les gestionnaires de blocs, les gestionnaires d'appels et les gestionnaires d'événements. Il convient de noter que les gestionnaires d'événements sont beaucoup plus performants que les deux autres et qu'ils sont pris en charge par tous les réseaux compatibles avec l'EVM. -## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? +## 16. Puis-je importer ethers.js ou d'autres bibliothèques JS dans mes mappages de subgraphs ? -Pas pour le moment, car les mappages sont écrits en AssemblyScript. Une autre solution possible consiste à stocker les données brutes dans des entités et à exécuter une logique qui nécessite des bibliothèques JS sur le client. +Pas pour le moment, car les mappages sont écrits en AssemblyScript. Une autre solution possible consiste à stocker les données brutes dans des entités et à exécuter une logique qui nécessite des bibliothèques JS du client. -## 17. Is it possible to specify what block to start indexing on? +## 17. Est-il possible de spécifier sur quel bloc démarrer l'indexation ? -Oui. `dataSources.source.startBlock` dans le fichier `subgraphe.yaml` spécifie le nombre de blocs à partir desquels la source de données commence l'indexation +Oui. `dataSources.source.startBlock` dans le fichier `subgraph.yaml` spécifie le numéro du bloc à partir duquel la source de données commence l'indexation. Dans la plupart des cas, nous proposons d'utiliser le bloc dans lequel le contrat a été créé : Blocs de départ -## 18. Are there some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +## 18. Existe-t-il des astuces pour améliorer les performances de l'indexation ? La synchronisation de mon subgraph prend beaucoup de temps -Oui, vous devriez jeter un œil à la fonctionnalité optionnelle de bloc de démarrage pour commencer l'indexation à partir du bloc sur lequel le contrat a été déployé : [Blocs de démarrage](/developing/creating-a-subgraph#start-blocks) +Yes, you should take a look at the optional start block feature to start indexing from the block that the contract was deployed: [Start blocks](/developing/creating-a-subgraph#start-blocks) -## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +## 19. Existe-t-il un moyen d'interroger directement le subgraph pour déterminer le dernier numéro de bloc qu'il a indexé ? Oui ! Essayez la commande suivante, en remplaçant "organization/subgraphName" par l'organisation sous laquelle elle est publiée et le nom de votre subgraphe : @@ -101,42 +101,38 @@ Oui ! Essayez la commande suivante, en remplaçant "organization/subgraphName" curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/ index-node/graphql ``` -## 20. What networks are supported by The Graph? +## 20. Quels réseaux sont pris en charge par The Graph ? Vous pouvez trouver la liste des réseaux supportés [ici](/developing/supported-networks). -## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +## 21. Est-il possible de dupliquer un subgraph sur un autre compte ou point de terminaison sans redéployer ? -Vous devez redéployer le subgraphe, mais si l'ID de subgraphe (hachage IPFS) ne change pas, il n'aura pas à se synchroniser depuis le début. +Vous devez redéployer le subgraph, mais si l'ID de subgraph (hachage IPFS) ne change pas, il n'aura pas à se synchroniser depuis le début. -## 22. Is this possible to use Apollo Federation on top of graph-node? +## 22. Est-il possible d'utiliser Apollo Federation au-dessus du graph-node ? La fédération n'est pas encore supportée, bien que nous souhaitions la prendre en charge à l'avenir. Pour le moment, vous pouvez utiliser l'assemblage de schémas, soit sur le client, soit via un service proxy. -## 23. Is there a limit to how many objects The Graph can return per query? +## 23. Y a-t-il une limite au nombre d'objets que The Graph peut renvoyer par requête ? Par défaut, les réponses aux requêtes sont limitées à 100 éléments par collection. Si vous souhaitez en recevoir plus, vous pouvez aller jusqu'à 1000 articles par collection et au-delà, vous pouvez paginer avec : ```graphql -someCollection(first: 1000, skip: ) { ... } +quelquesCollection(first: 1000, skip: ) { ... } ``` -## 24. If my dapp frontend uses The Graph for querying, do I need to write my query key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? +## 24. Si mon interface dapp utilise The Graph pour les requêtes, dois-je écrire ma clé de requête directement dans l'interface ? Et si nous payons des frais de requête pour les utilisateurs : les utilisateurs malveillants rendront-ils nos frais de requête très élevés ? -Actuellement, l'approche recommandée pour une dapp consiste à ajouter la clé à l'interface et à l'exposer aux utilisateurs finaux. Cela dit, vous pouvez limiter cette clé à un nom d'hôte, comme _yourdapp.io_ et subgraph. La passerelle est actuellement gérée par Edge & Nœud. Une partie de la responsabilité d'une passerelle est de surveiller les comportements abusifs et de bloquer le trafic des clients malveillants. +Actuellement, l'approche recommandée pour une dapp consiste à ajouter la clé à l'interface et à l'exposer aux utilisateurs finaux. Cela dit, vous pouvez limiter cette clé à un nom d'hôte, comme _yourdapp.io_ et subgraph. La passerelle est actuellement gérée par Edge & Node. Une partie de la responsabilité d'une passerelle est de surveiller les comportements abusifs et de bloquer le trafic des clients malveillants. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? -Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). +Rendez-vous sur le service hébergé afin de trouver les subgraphs que vous ou d'autres personnes avez déployés sur le service hébergé. Vous pouvez le trouver [ici](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? -The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. +The Graph ne facturera jamais le service hébergé. The Graph est un protocole décentralisé, et faire payer un service centralisé n'est pas conforme aux valeurs du Graphe. Le service hébergé a toujours été une étape temporaire pour aider à passer au réseau décentralisé. Les développeurs disposeront d'un délai suffisant pour passer au réseau décentralisé lorsqu'ils le souhaiteront. -## 27. When will the Hosted Service be shut down? +## 27. How do I update a subgraph on mainnet? -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? - -If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +Si vous êtes un développeur de subgraphs, vous pouvez déployer une nouvelle version de votre subgraph sur Subgraph Studio à l'aide de la CLI. Ce sera privé à ce stade, mais si vous en êtes satisfait, vous pouvez le publier sur Graph Explorer décentralisé. Cela créera une nouvelle version de votre subgraph sur laquelle les conservateurs pourront commencer à signaler. diff --git a/website/pages/fr/developing/graph-ts/api.mdx b/website/pages/fr/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..a45e92e0cc8e --- /dev/null +++ b/website/pages/fr/developing/graph-ts/api.mdx @@ -0,0 +1,853 @@ +--- +title: API AssemblyScript +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +Cette page documente les API intégrées qui peuvent être utilisées lors de l'écriture de mappages de subgraphs. Deux types d'API sont disponibles prêtes à l'emploi : + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Référence API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Primitives de bas niveau pour traduire entre différents systèmes de types tels que Ethereum, JSON, GraphQL et AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Notes de version | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Types intégrés + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +importer { BigDecimal } depuis '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +importer { BigInt } depuis '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } du '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Octets + +```typescript +importez { Bytes } du '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Addresse + +```typescript +import { Address } du '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +importer { store } depuis '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Création d'entités + +Ce qui suit est un modèle courant pour créer des entités à partir d’événements Ethereum. + +```typescript +// Importer la classe d'événement Transfer générée à partir de l'ABI ERC20 +import { Transférer en tant que TransferEvent } depuis '../generated/ERC20/ERC20' + +// Importer le type d'entité Transfer généré à partir du schéma GraphQL +importer {Transférer} depuis '../generated/schema' + +// Gestionnaire d'événements de transfert +fonction d'exportation handleTransfer (événement : TransferEvent) : void { + // Créez une entité de transfert, en utilisant le hachage de transaction comme ID d'entité + laissez id = event.transaction.hash + let transfer = nouveau transfert (id) + + // Définir les propriétés de l'entité, en utilisant les paramètres de l'événement + transfert.from = event.params.from + transfert.to = event.params.to + transfert.montant = event.params.montant + + // Enregistre l'entité dans le magasin + transfert.save() +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Chaque entité doit avoir un identifiant unique pour éviter les collisions avec d'autres entités. Il est assez courant que les paramètres d'événement incluent un identifiant unique pouvant être utilisé. Remarque : L'utilisation du hachage de transaction comme ID suppose qu'aucun autre événement dans la même transaction ne crée d'entités avec ce hachage comme ID. + +#### Chargement d'entités depuis le magasin + +Si une entité existe déjà, elle peut être chargée depuis le magasin avec les éléments suivants : + +```typescript +let id = event.transaction.hash // ou quelle que soit la manière dont l'ID est construit +laisser transférer = Transfer.load (id) +si (transfert == null) { + transfert = nouveau transfert (id) +} + +// Utiliser l'entité Transfer comme avant +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Recherche d'entités créées dans un bloc + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +L'API du magasin facilite la récupération des entités créées ou mises à jour dans le bloc actuel. Une situation typique est qu'un gestionnaire crée une transaction à partir d'un événement en chaîne et qu'un gestionnaire ultérieur souhaite accéder à cette transaction si elle existe. Dans le cas où la transaction n'existe pas, le ubgraph devra se rendre dans la base de données juste pour découvrir que l'entité n'existe pas ; si l'auteur du subgraph sait déjà que l'entité doit avoir été créée dans le même bloc, l'utilisation de loadInBlock évite cet aller-retour dans la base de données. Pour certains subgraphs, ces recherches manquées peuvent contribuer de manière significative au temps d'indexation. + +```typescript +let id = event.transaction.hash // ou quelle que soit la manière dont l'ID est construit +laisser transférer = Transfer.loadInBlock (id) +si (transfert == null) { + transfert = nouveau transfert (id) +} + +// Utiliser l'entité Transfer comme avant +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Recherche d'entités dérivées + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +Cela permet de charger des champs d'entités dérivés à partir d'un gestionnaire d'événements. Par exemple, étant donné le schéma suivant : + +```graphql +tapez Jeton @entité { + je l'ai fait! + titulaire : Titulaire ! + couleur: Ficelle +} + +tapez Titulaire @entity { + je l'ai fait! + jetons : [Jeton !] ! @derivedFrom(champ : "titulaire") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +laissez titulaire = Holder.load('test-id') +// Charge les entités Token associées à un détenteur donné +laissez les jetons = titulaire.tokens.load() +``` + +#### Mise à jour des entités existantes + +Il existe deux manières de mettre à jour une entité existante : + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +La modification des propriétés est simple dans la plupart des cas, grâce aux paramètres de propriétés générés : + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +Il est également possible de supprimer des propriétés avec l'une des deux instructions suivantes : + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Cela ne fonctionnera pas +entité.numéros.push(BigInt.fromI32(1)) +entité.save() + +// Cela fonctionnera +laissez les nombres = entité.numéros +nombres.push(BigInt.fromI32(1)) +entité.numéros = nombres +entité.save() +``` + +#### Supprimer des entités du magasin + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +L'API Ethereum donne accès aux contrats intelligents, aux variables d'état public, aux fonctions de contrat, aux événements, aux transactions, aux blocs et aux données d'encodage/décodage Ethereum. + +#### Prise en charge des types Ethereum + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +L’exemple suivant illustre cela. Étant donné un schéma de subgraph comme + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Événements et données de bloc/transaction + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Accès à l'état du contrat intelligent + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +Un modèle courant consiste à accéder au contrat dont provient un événement. Ceci est réalisé avec le code suivant : + +```typescript +// Importer la classe de contrat générée et la classe d'événement de transfert générée +importer {{ ERC20Contract, Transfer as TransferEvent } depuis '../generated/ERC20Contract/ERC20Contract' +// Importer la classe d'entité générée +importer { Transfer } depuis '../generated/schema' + +fonction d'exportation handleTransfer (événement : TransferEvent) { + // Lier le contrat à l'adresse qui a émis l'événement + laissez contrat = ERC20Contract.bind (event.address) + + // Accédez aux variables d'état et aux fonctions en les appelant + laissez erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Tout autre contrat faisant partie du subgraph peut être importé à partir du code généré et peut être lié à une adresse valide. + +#### Gestion des appels retournés + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +laissez gravité = Gravity.bind (event.address) +laissez callResult = gravitation.try_gravatarToOwner (gravatar) +if (callResult.reverted) { + log.info('getGravatar annulé', []) +} autre { + laisser le propriétaire = callResult.value +} +``` + +Notez qu'un nœud Graph connecté à un client Geth ou Infura peut ne pas détecter tous les retours, si vous comptez sur cela, nous vous recommandons d'utiliser un nœud Graph connecté à un client Parity. + +#### Encodage/décodage ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +Pour plus d'informations: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +importer { log } du '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message à afficher : {}, {}, {}', [value.toString(), anotherValue.toString(), 'déjà une chaîne']) +``` + +#### Enregistrer une ou plusieurs valeurs + +##### Enregistrer une seule valeur + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +laissez maValeur = 'A' + +fonction d'exportation handleSomeEvent (événement : SomeEvent) : void { + // Affiche : "Ma valeur est : A" + log.info('Ma valeur est : {}', [myValue]) +``` + +##### Journalisation d'une seule entrée à partir d'un tableau existant + +Dans l'exemple ci-dessous, seule la première valeur du tableau d'arguments est journalisée, bien que le tableau contienne trois valeurs. + +```typescript +laissez monTableau = ['A', 'B', 'C'] + +fonction d'exportation handleSomeEvent (événement : SomeEvent) : void { + // Affiche : "Ma valeur est : A" (Même si trois valeurs sont passées à `log.info`) + log.info('Ma valeur est : {}', myArray) +} +``` + +#### Journalisation de plusieurs entrées d'un tableau existant + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +laissez monTableau = ['A', 'B', 'C'] + +fonction d'exportation handleSomeEvent (événement : SomeEvent) : void { + // Affiche : "Ma première valeur est : A, la deuxième valeur est : B, la troisième valeur est : C" + log.info('Ma première valeur est : {}, la deuxième valeur est : {}, la troisième valeur est : {}', myArray) +} +``` + +##### Enregistrer une entrée spécifique à partir d'un tableau existant + +Pour afficher une valeur spécifique dans le tableau, la valeur indexée doit être fournie. + +```typescript +fonction d'exportation handleSomeEvent (événement : SomeEvent) : void { + // Affiche : "Ma troisième valeur est C" + log.info('Ma troisième valeur est : {}', [myArray[2]]) +} +``` + +##### Journalisation des informations sur les événements + +L'exemple ci-dessous enregistre le numéro de bloc, le hachage de bloc et le hachage de transaction d'un événement : + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +importer { ipfs } du '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Étant donné un hachage ou un chemin IPFS, la lecture d'un fichier depuis IPFS se fait comme suit : + +```typescript +// Place cela dans un gestionnaire d'événements dans le mappage +laissez hachage = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +laissez data = ipfs.cat (hachage) + +// Chemins comme `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// qui incluent des fichiers dans des répertoires sont également pris en charge +laissez chemin = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +laissez data = ipfs.cat (chemin) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +importer { JSONValue, Value } depuis '@graphprotocol/graph-ts' + +fonction d'exportation processItem (valeur : JSONValue, userData : valeur) : void { + // Voir la documentation JSONValue pour plus de détails sur la gestion + // avec des valeurs JSON + laissez obj = valeur.toObject() + laissez id = obj.get('id') + laissez titre = obj.get('titre') + + si (!id || !titre) { + { + return + } + + // Les rappels peuvent également créer des entités + laissez newItem = nouvel élément (id) + newItem.title = titre.toString() + newitem.parent = userData.toString() // Définit le parent sur "parentId" + newitem.save() +} + +// Place cela dans un gestionnaire d'événements dans le mappage +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Vous pouvez également utiliser `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +importer { crypto } du '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Référence des conversions de types + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Métadonnées de la source de données + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entité et DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +Ce contexte est ensuite accessible dans vos fichiers de mappage de subgraphs, permettant des subgraphs plus dynamiques et configurables. diff --git a/website/pages/fr/developing/graph-ts/common-issues.mdx b/website/pages/fr/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/fr/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/fr/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/fr/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..4322a5964f4f 100644 --- a/website/pages/fr/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/fr/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: FAQ sur les subgraphs alimentés par les sous-flux --- -## What are Substreams? +## Que sont les sous-flux ? -Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. +Développé par [StreamingFast] \(https://www.streamingfast.io/), Substreams est un moteur de traitement exceptionnellement puissant capable de consommer de riches flux de données blockchain. Substreams vous permet d'affiner et de façonner les données de la blockchain pour une digestion rapide et transparente par les applications des utilisateurs finaux. Plus précisément, Substreams est un moteur agnostique à la blockchain, parallélisé et à flux continu, qui sert de couche de transformation des données de la blockchain. Alimenté par le [Firehose](https://firehose.streamingfast.io/), il permet aux développeurs d'écrire des modules Rust, de s'appuyer sur des modules communautaires, de fournir une indexation extrêmement performante et de [sink] \(https://substreams.streamingfast.io/developers-guide/sink-targets) leurs données n'importe où. -Go to the [Substreams Documentation](/substreams) to learn more about Substreams. +Rendez-vous sur le site [Substreams Documentation](/substreams) pour en savoir plus sur Substreams. -## What are Substreams-powered subgraphs? +## Qu'est-ce qu'un subgraph alimenté par des courants de fond ? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +Les [subgraphs alimentés par Substreams](/cookbook/substreams-powered-subgraphs/) combinent la puissance de Substreams avec la capacité d'interrogation des subgraphs. Lors de la publication d'un subgraph alimenté par Substreams, les données produites par les transformations Substreams peuvent [output entity changes] \(https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), qui sont compatibles avec les entités du subgraph. -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +Si vous êtes déjà familiarisé avec le développement de subgraphs, notez que les subgraphs alimentés par Substreams peuvent ensuite être interrogés, comme s'ils avaient été produits par la couche de transformation AssemblyScript, avec tous les avantages de Subgraph, comme la fourniture d'une API GraphQL dynamique et flexible. -## How are Substreams-powered subgraphs different from subgraphs? +## En quoi les subgraphs alimentés par les courants secondaires sont-ils différents des subgraphs ? -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +Les subgraphs sont constitués de sources de données qui spécifient les événements de la chaîne et la manière dont ces événements doivent être traités par des gestionnaires écrits en Assemblyscript. Ces événements sont traités de manière séquentielle, en fonction de l'ordre dans lequel ils se produisent dans la chaîne. -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +En revanche, les subgraphs alimentés par des flux secondaires disposent d'une source de données unique référençant un paquet de flux secondaires, qui est traité par le nœud de graphe. Les subgraphs ont accès à des données granulaires supplémentaires sur la chaîne par rapport aux subgraphs conventionnels et peuvent également bénéficier d'un traitement massivement parallélisé, ce qui peut se traduire par des temps de traitement beaucoup plus rapides. -## What are the benefits of using Substreams-powered subgraphs? +## Quels sont les avantages de l'utilisation de subgraphs alimentés par des courants descendants ? -Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. +Les subgraphs alimentés par Substreams combinent tous les avantages de Substreams avec la capacité d'interrogation des subgraphs. Ils apportent au graphe une plus grande composabilité et une indexation très performante. Ils permettent également de nouveaux cas d'utilisation des données ; par exemple, une fois que vous avez construit votre subgraph alimenté par Substreams, vous pouvez réutiliser vos [modules Substreams](https://substreams.streamingfast.io/developers-guide/modules) pour sortir vers différents [sinks] \(https://substreams.streamingfast.io/developers-guide/sink-targets) tels que PostgreSQL, MongoDB et Kafka. -## What are the benefits of Substreams? +## Quels sont les avantages de Substreams ? -There are many benefits to using Substreams, including: +L'utilisation de Substreams présente de nombreux avantages, notamment: -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- Composable : Vous pouvez empiler les modules Substreams comme des blocs LEGO et construire des modules communautaires pour affiner les données publiques. -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- Indexation haute performance : Indexation plus rapide d'un ordre de grandeur grâce à des grappes d'opérations parallèles à grande échelle (comme BigQuery). -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- Sortez vos données n'importe où : Transférez vos données où vous le souhaitez : PostgreSQL, MongoDB, Kafka, subgraphs, fichiers plats, Google Sheets. -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- Programmable : Utilisez du code pour personnaliser l'extraction, effectuer des agrégations au moment de la transformation et modéliser vos résultats pour plusieurs puits. -- Access to additional data which is not available as part of the JSON RPC +- Accès à des données supplémentaires qui ne sont pas disponibles dans le cadre de la RPC JSON - All the benefits of the Firehose. ## What is the Firehose? -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +Développé par [StreamingFast] \(https://www.streamingfast.io/), le Firehose est une couche d'extraction de données de blockchain conçue à partir de zéro pour traiter l'historique complet des blockchains à des vitesses jusqu'alors inconnues . Obtenez une approche basée sur les fichiers et le streaming, il s'agit d'un composant essentiel de la suite de technologies open-source de StreamingFast et de la base de Substreams. -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Consultez la [documentation] \(https://firehose.streamingfast.io/) pour en savoir plus sur le Firehose. -## What are the benefits of the Firehose? +## Quels sont les avantages du Firehose ? There are many benefits to using Firehose, including: -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- Temps de latence le plus faible et pas d'interrogation : Les nœuds Firehose sont conçus pour faire la course afin de diffuser les données en bloc en premier, selon le principe "streaming-first". - Prevents downtimes: Designed from the ground up for High Availability. -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- Ne manquez jamais le rythme : Le curseur du flux Firehose est conçu pour gérer les bifurcations et pour reprendre là où vous vous êtes arrêté dans n'importe quelle condition. -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- Modèle de données le plus riche :   Meilleur modèle de données qui inclut les changements de solde, l'arbre d'appel complet, les transactions internes, les journaux, les changements de stockage, les coûts du gaz, etc. -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- Exploite les fichiers plats : Les données de la blockchain sont extraites dans des fichiers plats, la ressource informatique la moins chère et la plus optimisée disponible. -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## Où les développeurs peuvent-ils trouver plus d'informations sur les subgraphs alimentés par Substreams et sur Substreams ? -The [Substreams documentation](/substreams) will teach you how to build Substreams modules. +La [documentation Substreams](/substreams) vous apprendra à construire des modules Substreams. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +La [documentation sur les subgraphs alimentés par des flux partiels] \(/cookbook/substreams-powered-subgraphs/) vous montrera comment les emballer pour les déployer sur The Graph. -## What is the role of Rust modules in Substreams? +## Quel est le rôle des modules Rust dans Substreams ? -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +Les modules Rust sont l'équivalent des mappeurs AssemblyScript dans les subgraphs. Ils sont compilés dans WASM de la même manière, mais le modèle de programmation permet une exécution parallèle. Ils définissent le type de transformations et d'agrégations que vous souhaitez appliquer aux données brutes de la blockchain. -See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. +Voir [documentation des modules] \(https://substreams.streamingfast.io/developers-guide/modules) pour plus de détails. -## What makes Substreams composable? +## Qu'est-ce qui rend Substreams composable ? -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +Lors de l'utilisation de Substreams, la composition a lieu au niveau de la couche de transformation, ce qui permet de réutiliser les modules mis en cache. -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +Par exemple, Alice peut créer un module de prix DEX, Bob peut l'utiliser pour créer un agrégateur de volume pour certains jetons qui l'intéressent, et Lisa peut combiner quatre modules de prix DEX individuels pour créer un oracle de prix. Une seule requête Substreams regroupera tous ces modules individuels, les reliera entre eux, pour offrir un flux de données beaucoup plus raffiné. Ce flux peut ensuite être utilisé pour alimenter un subgraph et être interrogé par les consommateurs. -## How can you build and deploy a Substreams-powered Subgraph? +## Comment pouvez-vous créer et déployer un Subgraph basé sur Substreams ? -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +Après avoir [defining](/cookbook/substreams-powered-subgraphs/) un Subgraph alimenté par Substreams, vous pouvez utiliser la CLI Graph pour le déployer dans [Subgraph Studio](https://thegraph.com/studio/). -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## Où puis-je trouver des exemples de subgraphs et de subgraphs alimentés par des substreams ? -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +Vous pouvez visiter [ce repo Github] \(https://github.com/pinax-network/awesome-substreams) pour trouver des exemples de Substreams et de subgraphs alimentés par Substreams. -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## Que signifient les subgraphs et les subgraphs alimentés par des substreams pour le réseau graph ? -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +L'intégration promet de nombreux avantages, notamment une indexation extrêmement performante et une plus grande composabilité grâce à l'exploitation des modules de la communauté et à leur développement. diff --git a/website/pages/fr/developing/supported-networks.json b/website/pages/fr/developing/supported-networks.json index 5e12392b8c7d..cdfccb6de5ee 100644 --- a/website/pages/fr/developing/supported-networks.json +++ b/website/pages/fr/developing/supported-networks.json @@ -1,9 +1,9 @@ { "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "cliName": "Nom du CLI", + "chainId": "ID de la chaîne", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "Réseau décentralisé", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/fr/developing/supported-networks.mdx b/website/pages/fr/developing/supported-networks.mdx index fff8e7f23cfb..81cdce0b418b 100644 --- a/website/pages/fr/developing/supported-networks.mdx +++ b/website/pages/fr/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Réseaux pris en charge +title: Réseaux supportés --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,16 +9,16 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +Pour une liste complète des fonctionnalités prises en charge par le réseau décentralisé, voir [cette page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). -Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. +Les subgraphs alimentés par Substreams indexant `mainnet` Ethereum sont pris en charge sur le Subgraph Studio et le réseau décentralisé. -## Nœud The Graph +## Nœud de The Graph -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +Si votre réseau préféré n'est pas pris en charge par le réseau décentralisé du Graph, vous pouvez lancer votre propre [Nœud Graph](https://github.com/graphprotocol/graph-node) pour indexer tout réseau compatible avec l'EVM. Assurez-vous que la [version](https://github.com/graphprotocol/graph-node/releases) que vous utilisez prend en charge le réseau et que vous disposez de la configuration nécessaire. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/fr/developing/unit-testing-framework.mdx b/website/pages/fr/developing/unit-testing-framework.mdx index 0dc0037845ad..4cdbb7245322 100644 --- a/website/pages/fr/developing/unit-testing-framework.mdx +++ b/website/pages/fr/developing/unit-testing-framework.mdx @@ -1,14 +1,14 @@ --- -title: Cadre de test unitaire +title: Cadre pour les tests unitaires --- -Matchstick est un cadre de test unitaire, développé par [LimeChain](https://limechain.tech/), qui permet aux développeurs de subgraphes de tester leur logique de cartographie dans un environnement sandbox et de déployer leurs subgraphes en toute confiance ! +Matchstick est un cadre de test unitaire, développé par [LimeChain](https://limechain.tech/), qui permet aux développeurs de subgraphs de tester leur logique de cartographie dans un environnement de type bac à sable et de déployer leurs subgraphs en toute confiance ! -## Introduction +## Démarrage -### Configuration requise +### Installer les dépendances -Afin d'utiliser les méthodes d'aide aux tests et d'exécuter les tests, vous devrez installer les composants suivants : +Pour utiliser les méthodes d'assistance aux tests et exécuter les tests, vous devrez installer les dépendances suivantes : ```sh yarn add --dev matchstick-as @@ -16,15 +16,15 @@ yarn add --dev matchstick-as ❗ `graph-node` dépend de PostgreSQL, donc si vous ne l'avez pas déjà, vous devrez l'installer. Nous vous conseillons vivement d'utiliser les commandes ci-dessous, car l'ajouter d'une autre manière peut provoquer des erreurs inattendues ! -#### MacOS +#### Le MacOS -Commande d'installation de Postgres : +Commande d'installation Postgres : ```sh -brew install postgresql +préparer l'installation de postgresql ``` -Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` +Créez un lien symbolique vers la dernière libpq.5.lib _Vous devrez peut-être d'abord créer ce répertoire_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -32,101 +32,101 @@ ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/o #### Linux -Commande d'installation de Postgres (dépend de votre fournisseur) : +Commande d'installation de Postgres (dépend de votre distribution) : ```sh -sudo apt install postgresql +sudo apt installer postgresql ``` -### WSL (sous-système Windows pour Linux) +### WSL (Système Windows pour Linux) -Vous pouvez utiliser Matchstick sur WSL aussi bien en utilisant l'approche Docker que l'approche binaire. Comme WSL peut être un peu délicat, voici quelques conseils au cas où vous rencontreriez des problèmes tels que : +Vous pouvez utiliser Matchstick sur WSL en utilisant à la fois l'approche Docker et l'approche binaire. Comme WSL peut être un peu délicat, voici quelques conseils au cas où vous rencontreriez des problèmes tels que ``` -static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = +static BYTES = Symbol("Bytes") SyntaxError: Jeton inattendu = ``` -ou +ou bien ``` -/node_modules/gluegun/build/index.js:13 throw up; +/node_modules/gluegun/build/index.js:13 lancer ; ``` -Veuillez vous assurer que vous êtes sur une version plus récente de Node.js, car graph-cli ne supporte plus la **v10.19.0**. En effet, c'est toujours la version par défaut pour les nouvelles images Ubuntu sur WSL. Par exemple, il est confirmé que Matchstick fonctionne sur WSL avec la **v18.1.0**, vous pouvez par conséquent passer à cette version soit via **nvm**, soit en mettant à jour votre Node.js global. N'oubliez pas de supprimer `node_modules` et de relancer `npm install` après avoir mis à jour votre nodejs ! Ensuite, assurez-vous que vous avez installé **libpq**, vous pouvez le faire en exécutant : +Veuillez vous assurer que vous utilisez une version plus récente de Node.js graph-cli qui ne prend plus en charge la **v10.19.0**, et qu'il s'agit toujours de la version par défaut pour le nouvel Ubuntu. images sur WSL. Par exemple, il est confirmé que Matchstick fonctionne sur WSL avec **v18.1.0**, vous pouvez y accéder soit via **nvm** ou si vous mettez à jour votre Node.js global. N'oubliez pas de supprimer `node_modules` et d'exécuter à nouveau `npm install` après avoir mis à jour votre nodejs ! Ensuite, assurez-vous que **libpq** est installé, vous pouvez le faire en exécutant ``` sudo apt-get install libpq-dev ``` -And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as +Et en conclussion, n'utilisez pas `graph test` (qui utilise votre installation globale de graph-cli et pour une raison quelconque, cela semble être cassé sur WSL actuellement), utilisez plutôt `yarn test` ou `npm run test` (cela utilisera l'instance locale de graph-cli au niveau du projet, qui fonctionne à merveille). Pour cela, vous devez bien sûr avoir un script `"test"` dans votre fichier `package.json` qui peut être quelque chose d'aussi simple que ```json { - "name": "demo-subgraph", - "version": "0.1.0", - "scripts": { - "test": "graph test", - ... - }, - "dependencies": { - "@graphprotocol/graph-cli": "^0.30.0", - "@graphprotocol/graph-ts": "^0.27.0", - "matchstick-as": "^0.5.0" - } + "name": "subgraph de démonstration", + "version": "0.1.0", + "scripts": { + "test": "test graphique", + ... + }, + "dépendances": { + "@graphprotocol/graph-cli": "^0.30.0", + "@graphprotocol/graph-ts": "^0.27.0", + "matchstick-as": "^0.5.0" + } } ``` -### Utilisation +### Usage -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +Pour utiliser **Matchstick** dans votre projet de subgraph, il suffit d'ouvrir un terminal, de naviguer vers le dossier racine de votre projet et d'exécuter simplement `graph test [options] ` - il télécharge le dernier binaire **Matchstick** et exécute le test spécifié ou tous les tests dans un dossier de test (ou tous les tests existants si aucun datasource flag n'est spécifié). -### Options CLI +### CLI options -Ceci lancera tous les tests dans le dossier de test : +Cette opération permet d'exécuter tous les tests contenus dans le dossier test : ```sh graph test ``` -Cela lancera un test nommé gravity.test.ts et/ou tous les tests à l'intérieur d'un dossier nommé gravity : +Ceci lancera un test nommé gravity.test.ts et/ou tous les tests à l'intérieur d'un dossier nommé gravity : ```sh -graph test gravity +gravity graph test ``` -Cela n'exécutera que ce fichier de test spécifique : +Ce fichier de test sera le seul à être exécuté : ```sh -graph test path/to/file.test.ts +chemin de test graph/vers/fichier.test.ts ``` -**Options :** +**Les Options :** ```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used +-c, --coverage Exécuter les tests en mode couverture +-d, --docker Exécutez les tests dans un conteneur Docker (Remarque : veuillez exécuter à partir du dossier racine du subgraph) +-f, --force Binary : télécharge à nouveau le binaire. Docker : télécharge à nouveau le fichier Docker et reconstruit l'image Docker. +-h, --help Afficher les informations d'utilisation +-l, --logs Enregistre dans la console des informations sur le système d'exploitation, le modèle de processeur et l'URL de téléchargement (à des fins de débogage) +-r, --recompile Force les tests à être recompilés +-v, --version Choisissez la version du binaire Rust que vous souhaitez télécharger/utiliser ``` ### Docker -Depuis la version `0.25.2 de graph-cli`, la commande `graph test` supporte l'exécution de `matchstick` dans un conteneur docker avec le drapeau `-d`. L'implémentation de docker utilise [bind mount](https://docs.docker.com/storage/bind-mounts/) pour ne pas avoir à reconstruire l'image docker à chaque fois que la commande `graph test -d` est exécutée. Alternativement, vous pouvez suivre les instructions du dépôt [matchstick](https://github.com/LimeChain/matchstick#docker-) pour exécuter docker manuellement. +À partir de `graph-cli 0.25.2`, la commande `graph test` prend en charge l'exécution de `matchstick` dans un conteneur Docker avec le `-d drapeau. L'implémentation de Docker utilise bind mount afin de ne pas avoir à reconstruire l'image Docker à chaque fois que la commande graph test -d` est exécutée. Vous pouvez également suivre les instructions du référentiel [matchstick](https://github.com/LimeChain/matchstick#docker-) pour exécuter Docker manuellement. -❗ Si vous avez précédemment exécuté `graph test`, vous pouvez rencontrer l'erreur suivante pendant la construction de docker : +❗ En cas d'exécution préalable de `graph test`, vous risquez de rencontrer l'erreur suivante lors de la construction de docker : ```sh - error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied + erreur de l'expéditeur : failed to xattr node_modules/binary-install-raw/bin/binary- : permission denied ``` -In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` +Dans ce cas, il faut créer un `.dockerignore` dans le dossier racine et ajoutez `node_modules/binary-install-raw/bin` -### Configuration +### La Configuration -Matchstick peut être configuré pour utiliser un chemin personnalisé pour les tests, les libs et le manifeste via le fichier de configuration `matchstick.yaml` : +Matchstick peut être configuré pour utiliser des tests personnalisés, des bibliothèques et un chemin de manifeste via le fichier de configuration `matchstick.yaml` : ```yaml testsFolder: path/to/tests @@ -134,40 +134,40 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Subgraphe de démonstration +### Subgraph démonstration -Vous pouvez essayer et jouer avec les exemples de ce guide en clonant le [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) +Vous pouvez tester et jouer avec les exemples de ce guide en clonant le repo [Demo Subgraph](https://github.com/LimeChain/demo-subgraph) -### Tutoriels vidéo +### Tutoriels vidéos -Vous pouvez également consulter la série de vidéos sur [« comment utiliser Matchstick pour écrire des tests unitaires pour vos subgraphes »](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Vous pouvez également consulter la série de vidéos sur [« comment utiliser Matchstick pour écrire des tests unitaires pour vos subgraphs »](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) -## Structure des tests (> 0.5.0) +## Structure des essais (>=0.5.0) -_**IMPORTANT : Nécessite matchstick-as >=0.5.0**_ +_**IMPORTANT : Requiert matchstick-as >=0.5.0**_ -### describe() +### décrivez() -`describe(name: String , () => {})` - Définit un groupe de test. +`décrivez(name: String , () => {})` - Définit un groupe de test. -**_Remarques :_** +**_Notez :_** -- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ +- _Les descriptions ne sont pas indispensable. Vous pouvez toujours utiliser test() à l'ancienne, en dehors des blocs describe()_ -Exemple: +L'exemple: ```typescript -import { describe, test } from "matchstick-as/assembly/index" -import { handleNewGravatar } from "../../src/gravity" +importer { describe, test } du "matchstick-as/assembly/index" +import { handleNewGravatar } du "../../src/gravity" -describe("handleNewGravatar()", () => { - test("Should create a new Gravatar entity", () => { +décrire ("handleNewGravatar()", () => { + test("Devrait créer une nouvelle entité Gravatar", () => { ... }) }) ``` -Nested `describe()` example: +Exemple de `décrire ()` imbriqué : ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -190,27 +190,27 @@ describe("handleUpdatedGravatar()", () => { --- -### test() +### tester () -`test(name : String, () =>, should_fail : bool)` - Définit un scénario de test. Vous pouvez utiliser test() à l'intérieur de blocs describe() ou indépendamment. +`test(name: String, () =>, Should_fail: bool)` - Définit un scénario de test. Vous pouvez utiliser test() à l’intérieur des blocs décrire() ou indépendamment. -Exemple: +L'exemple: ```typescript -import { describe, test } from "matchstick-as/assembly/index" -import { handleNewGravatar } from "../../src/gravity" +importez { describe, test } from "matchstick-as/assembly/index" +importez { handleNewGravatar } from "../../src/gravity" -describe("handleNewGravatar()", () => { - test("Should create a new Entity", () => { +décrivez ("handleNewGravatar()", () => { + testez ("Devrait créer une nouvelle entité", () => { ... }) }) ``` -ou +ou bien ```typescript -test("handleNewGravatar() should create a new entity", () => { +test("handleNewGravatar() doit créer une nouvelle entité", () => { ... }) @@ -219,18 +219,18 @@ test("handleNewGravatar() should create a new entity", () => { --- -### beforeAll() +### avantTout() -Exécute un bloc de code avant tous les tests du fichier. Si `beforeAll` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute au début de ce bloc `describe`. +Exécute un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il est exécuté à la fin de ce bloc `describe`. -Exemples: +Les Exemples: -Le code contenu dans `beforeAll` sera exécuté une fois avant _tous_ les tests du fichier. +Le code contenu dans `beforeAll` sera exécuté une fois avant les tests _all_ du fichier. ```typescript -import { describe, test, beforeAll } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" -import { Gravatar } from "../../generated/schema" +import { describe, test, beforeAll } du "matchstick-as/assembly/index" +import { handleUpdatedGravatar, handleNewGravatar } du "../../src/gravity" +import { Gravatar } du "../../generated/schema" beforeAll(() => { let gravatar = new Gravatar("0x0") @@ -239,20 +239,20 @@ beforeAll(() => { ... }) -describe("When the entity does not exist", () => { - test("it should create a new Gravatar with id 0x1", () => { +describe("Lorsque l'entité n'existe pas", () => { + test("il devrait créer un nouveau Gravatar avec l'id 0x1", () => { ... }) }) -describe("When entity already exists", () => { - test("it should update the Gravatar with id 0x0", () => { +describe("Lorsque l'entité existe déjà", () => { + test("il devrait mettre à jour le Gravatar avec l'id 0x0", () => { ... }) }) ``` -Le code contenu dans `beforeAll` sera exécuté une fois avant tous les tests du fichier +Le code contenu dans `beforeAll` sera exécuté une fois avant tous les tests du premier bloc de description ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -262,16 +262,16 @@ import { Gravatar } from "../../generated/schema" describe("handleUpdatedGravatar()", () => { beforeAll(() => { let gravatar = new Gravatar("0x0") - gravatar.displayName = “First Gravatar” + gravatar.displayName = "Premier Gravatar" gravatar.save() ... }) - test("updates Gravatar with id 0x0", () => { + test("met à jour le Gravatar avec l'identifiant 0x0", () => { ... }) - test("creates new Gravatar with id 0x1", () => { + test("crée un nouveau Gravatar avec l'identifiant 0x1", () => ; { ... }) }) @@ -281,40 +281,41 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Exécute un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il est exécuté à la fin de ce bloc `describe`. +Lance un bloc de code après tous les tests du fichier. Si `afterAll` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute à la fin de ce bloc `describe`. -Exemple: +L'exemple: -Code inside `afterAll` will execute once after _all_ tests in the file. +Le code situé dans `afterAll` sera exécuté une fois après _all_ tests dans le fichier. ```typescript -import { describe, test, afterAll } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" -import { store } from "@graphprotocol/graph-ts" +importer { describe, test, afterAll } depuis "matchstick-as/assembly/index" +importer { handleUpdatedGravatar, handleNewGravatar } depuis "../../src/gravity" +importer { store } depuis "@graphprotocol/graph-ts" -afterAll(() => { - store.remove("Gravatar", "0x0") - ... +aprèsTout(() => { + store.remove("Gravatar", "0x0") + ... }) -describe("handleNewGravatar, () => { - test("creates Gravatar with id 0x0", () => { +décrire("handleNewGravatar, () => { + test("crée un Gravatar avec l'identifiant 0x0", () => { ... }) }) -describe("handleUpdatedGravatar", () => { - test("updates Gravatar with id 0x0", () => { +décrire("handleUpdatedGravatar", () => { + test("met à jour Gravatar avec l'identifiant 0x0", () => { ... - }) + } ... + }) }) ``` -Code inside `afterAll` will execute once after all tests in the first describe block +Le code à l'intérieur de `afterAll` s'exécute une fois après tous les tests du premier bloc de description ```typescript -import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +importez { describe, test, afterAll, clearStore } du "matchstick-as/assembly/index" +importez { handleUpdatedGravatar, handleNewGravatar } du "../../src/gravity" describe("handleNewGravatar", () => { afterAll(() => { @@ -342,12 +343,12 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. +Lance un bloc de code avant chaque test. Si `beforeEach` est déclaré à l'intérieur d'un bloc `describe`, il s'exécute avant chaque test de ce bloc `describe`. -Examples: Code inside `beforeEach` will execute before each tests. +Exemples : Le code contenu dans `beforeEach` s'exécute avant chaque test. ```typescript -import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" +importez { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" import { handleNewGravatars } from "./utils" beforeEach(() => { @@ -367,11 +368,11 @@ describe("handleNewGravatars, () => { ... ``` -Code inside `beforeEach` will execute only before each test in the that describe +Exécutez un bloc de code avant chaque test. Si `beforeEach` est déclaré à l'intérieur d'un bloc describe, il s'exécute avant chaque test de ce bloc describe ```typescript -import { describe, test, beforeEach } from 'matchstick-as/assembly/index' -import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' +importez { describe, test, beforeEach } from 'matchstick-as/assembly/index' +importez { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' describe('handleUpdatedGravatars', () => { beforeEach(() => { @@ -384,7 +385,7 @@ describe('handleUpdatedGravatars', () => { test('Upates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // code that should update the displayName to 1st Gravatar + // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -393,7 +394,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -405,14 +406,14 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. +Lance un bloc de code après chaque test. Si `afterEach` est déclaré à l'intérieur d'un bloc de `description`, il s'exécute après chaque test de ce bloc de `description`. -Exemples: +Les Exemples: Code inside `afterEach` will execute after every test. ```typescript -import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +importez { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" beforeEach(() => { @@ -433,7 +434,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -441,18 +442,18 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Code inside `afterEach` will execute after each test in that describe +Le code contenu dans `afterEach` exécutera après chaque test dans cette description ```typescript -import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" -import { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" +importez { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" +importez { handleUpdatedGravatar, handleNewGravatar } from "../../src/gravity" describe("handleNewGravatar", () => { ... @@ -473,7 +474,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // code qui devrait mettre à jour le nom d'affichage à 1 Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -481,7 +482,7 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // code qui devrait changer le imageUrl en https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -491,162 +492,162 @@ describe("handleUpdatedGravatar", () => { ## Asserts ```typescript -fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) +fieldEquals (entityType : chaîne, identifiant : chaîne, fieldName : chaîne, ExpectVal : chaîne) -equals(expected: ethereum.Value, actual: ethereum.Value) +est égal (attendu : ethereum.Value, réel : ethereum.Value) -notInStore(entityType: string, id: string) +notInStore (entityType : chaîne, identifiant : chaîne) -addressEquals(address1: Address, address2: Address) +adresseEquals (adresse1 : adresse, adresse2 : adresse) -bytesEquals(bytes1: Bytes, bytes2: Bytes) +bytesEquals (octets1 : octets, octets2 : octets) -i32Equals(number1: i32, number2: i32) +i32Equals(numéro1 : i32, numéro2 : i32) -bigIntEquals(bigInt1: BigInt, bigInt2: BigInt) +bigIntEquals (bigInt1 : BigInt, bigInt2 : BigInt) -booleanEquals(bool1: boolean, bool2: boolean) +booleanEquals(bool1 : booléen, bool2 : booléen) -stringEquals(string1: string, string2: string) +stringEquals(string1 : chaîne, string2 : chaîne) -arrayEquals(array1: Array, array2: Array) +arrayEquals (tableau1 : tableau, tableau2 : tableau) -tupleEquals(tuple1: ethereum.Tuple, tuple2: ethereum.Tuple) +tupleEquals (tuple1 : ethereum.Tuple, tuple2 : ethereum.Tuple) -assertTrue(value: boolean) +assertTrue(valeur : booléenne) -assertNull(value: T) +assertNull(valeur : T) -assertNotNull(value: T) +assertNotNull(valeur : T) -entityCount(entityType: string, expectedCount: i32) +EntityCount (EntityType : chaîne, ExpectedCount : i32) ``` -## Write a Unit Test +## Écrivez un test unitaire -Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Voyons à quoi ressemblerait un test unitaire simple en utilisant les exemples Gravatar dans le [subgraph de démonstration](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). -Assuming we have the following handler function (along with two helper functions to make our life easier): +En supposant que nous disposions de la fonction de traitement suivante (ainsi que de deux fonctions d'aide pour nous faciliter la vie) : ```typescript -export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id.toHex()) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() +fonction d'exportation handleNewGravatar (événement : NewGravatar) : void { + laissez gravatar = new Gravatar(event.params.id.toHex()) + gravatar.owner = event.params.owner + gravatar.displayName = event.params.displayName + gravatar.imageUrl = event.params.imageUrl + gravatar.save() } -export function handleNewGravatars(events: NewGravatar[]): void { - events.forEach((event) => { - handleNewGravatar(event) - }) +fonction d'exportation handleNewGravatars(events: NewGravatar[]): void { + events.forEach((event) => { + handleNewGravatar (événement) + }) } -export function createNewGravatarEvent( - id: i32, - ownerAddress: string, - displayName: string, - imageUrl: string, -): NewGravatar { - let mockEvent = newMockEvent() - let newGravatarEvent = new NewGravatar( - mockEvent.address, - mockEvent.logIndex, - mockEvent.transactionLogIndex, - mockEvent.logType, - mockEvent.block, - mockEvent.transaction, - mockEvent.parameters, - ) - newGravatarEvent.parameters = new Array() - let idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) - let addressParam = new ethereum.EventParam( - 'ownderAddress', - ethereum.Value.fromAddress(Address.fromString(ownerAddress)), - ) - let displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) - let imageUrlParam = new ethereum.EventParam('imageUrl', ethereum.Value.fromString(imageUrl)) - - newGravatarEvent.parameters.push(idParam) - newGravatarEvent.parameters.push(addressParam) - newGravatarEvent.parameters.push(displayNameParam) - newGravatarEvent.parameters.push(imageUrlParam) - - return newGravatarEvent +fonction d'exportation createNewGravatarEvent( + identifiant : i32, + OwnerAddress : chaîne, + displayName : chaîne, + imageUrl : chaîne, +): NouveauGravatar { + laissez mockEvent = newMockEvent() + laissez newGravatarEvent = new NewGravatar( + mockEvent.adresse, + mockEvent.logIndex, + mockEvent.transactionLogIndex, + mockEvent.logType, + mockEvent.block, + mockEvent.transaction, + mockEvent.paramètres, + ) + newGravatarEvent.parameters = nouveau tableau() + laissez idParam = new ethereum.EventParam('id', ethereum.Value.fromI32(id)) + laissez adresseParam = new ethereum.EventParam ( + 'adresse du propriétaire', + ethereum.Value.fromAddress(Address.fromString(ownerAddress)), + ) + laissez displayNameParam = new ethereum.EventParam('displayName', ethereum.Value.fromString(displayName)) + laissez imageUrlParam = new ethereum.EventParam('imageUrl', ethereum.Value.fromString(imageUrl)) + + newGravatarEvent.parameters.push(idParam) + newGravatarEvent.parameters.push(addressParam) + newGravatarEvent.parameters.push(displayNameParam) + newGravatarEvent.parameters.push(imageUrlParam) + + retourner newGravatarEvent } ``` -We first have to create a test file in our project. This is an example of how that might look like: +Nous devons tout d'abord créer un fichier de test dans notre projet. Voici un exemple de ce à quoi cela pourrait ressembler : ```typescript -import { clearStore, test, assert } from 'matchstick-as/assembly/index' -import { Gravatar } from '../../generated/schema' -import { NewGravatar } from '../../generated/Gravity/Gravity' -import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' - -test('Can call mappings with custom events', () => { - // Create a test entity and save it in the store as initial state (optional) - let gravatar = new Gravatar('gravatarId0') - gravatar.save() - - // Create mock events - let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - - // Call mapping functions passing the events we just created - handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - - // Assert the state of the store - assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') - assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') - assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - - // Clear the store in order to start the next test off on a clean slate - clearStore() +importer { clearStore, test, assert } depuis 'matchstick-as/assembly/index' +importer { Gravatar } depuis '../../generated/schema' +importer { NewGravatar } depuis '../../generated/Gravity/Gravity' +importer { createNewGravatarEvent, handleNewGravatars } depuis '../mappings/gravity' + +test('Peut appeler des mappages avec des événements personnalisés', () => { + // Créez une entité de test et enregistrez-la dans le magasin comme état initial (facultatif) + laissez gravatar = new Gravatar('gravatarId0') + gravatar.save() + + // Créer des événements fictifs + laissez newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + laissez anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') + + // Appelez les fonctions de mappage en passant les événements que nous venons de créer + handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) + + // Affirmer l'état du magasin + assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') + assert.fieldEquals('Gravatar', '12345', 'propriétaire', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') + assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') + + // Vider le magasin afin de démarrer le prochain test sur une table rase + clearStore() }) -test('Next test', () => { - //... +test('Test suivant', () => { + //... }) ``` -That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: +Cela fait beaucoup à décortiquer ! Tout d'abord, une chose importante à noter est que nous importons des choses de `matchstick-as`, notre bibliothèque d'aide AssemblyScript (distribuée en tant que module npm). Vous pouvez trouver le dépôt [ici](https://github.com/LimeChain/matchstick-as). `matchstick-as` nous fournit des méthodes de test utiles et définit également la fonction `test()` que nous utiliserons pour construire nos blocs de test. Le reste est assez simple - voici ce qui se passe : -- We're setting up our initial state and adding one custom Gravatar entity; -- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; -- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; -- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; -- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. +- Mettons en place notre état initial et ajoutons une entité Gravatar personnalisée ; +- Définissons deux objets événement `NewGravatar` avec leurs données, en utilisant la fonction `createNewGravatarEvent()` ; +- Appelons des méthodes de gestion pour ces événements - `handleNewGravatars()` et nous passons la liste de nos événements personnalisés ; +- Affirmons l'état du magasin. Comment cela fonctionne-t-il ? - Nous passons une combinaison unique de type d'entité et d'identifiant. Ensuite, nous vérifions un champ spécifique de cette entité et affirmons qu'il a la valeur que nous attendons. Nous faisons cela à la fois pour l'entité Gravatar initiale que nous avons ajoutée au magasin, ainsi que pour les deux entités Gravatar qui sont ajoutées lorsque la fonction de gestion est appelée ; +- Et enfin, Nettoyons le magasin à l'aide de `clearStore()` afin que notre prochain test puisse commencer avec un objet magasin frais et vide. Nous pouvons définir autant de blocs de test que nous le souhaitons. -There we go - we've created our first test! 👏 +Et voilà, nous avons formulé notre premier test ! 👏 Now in order to run our tests you simply need to run the following in your subgraph root folder: -`graph test Gravity` +`gravity graph test` -And if all goes well you should be greeted with the following: +Et si tout se passe bien, vous devriez être accueilli par ce qui suit : ![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) -## Scénarios de test courants +## Scénarios de tests actuels -### Hydrating the store with a certain state +### L'Hydratation du magasin avec un certain état -Les utilisateurs ont la possibilité d'initialiser le magasin avec un ensemble connu d'entités. Voici un exemple pour initialiser le magasin avec une entité Gravatar : +Les utilisateurs peuvent hydrater le magasin avec un ensemble connu d'entités. Voici un exemple pour initialiser la boutique avec une entité Gravatar : ```typescript -let gravatar = new Gravatar('entryId') +laissez gravatar = new Gravatar('entryId') gravatar.save() ``` -### Appel d'une fonction de mapping avec un événement +### Appel d'une fonction de cartographie avec un événement -Un utilisateur peut créer un événement personnalisé et le transmettre à une fonction de mappage liée au magasin : +Un utilisateur peut créer un événement personnalisé et le transmettre à une fonction de cartographie liée au magasin : ```typescript -import { store } from 'matchstick-as/assembly/store' -import { NewGravatar } from '../../generated/Gravity/Gravity' +importez { store } du 'matchstick-as/assembly/store' +importez { NewGravatar } du '../../generated/Gravity/Gravity' import { handleNewGravatars, createNewGravatarEvent } from './mapping' let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') @@ -654,14 +655,14 @@ let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01 handleNewGravatar(newGravatarEvent) ``` -### Appeler toutes les mappings avec des fixtures d'événement +### Appel de tous les mappages avec des projecteurs d'événements -Les utilisateurs peuvent appeler les mappings avec des dispositifs de test. +Les utilisateurs peuvent appeler les mappages avec des dispositifs de test. ```typescript -import { NewGravatar } from '../../generated/Gravity/Gravity' -import { store } from 'matchstick-as/assembly/store' -import { handleNewGravatars, createNewGravatarEvent } from './mapping' +importez { NewGravatar } from '../../generated/Gravity/Gravity' +importez { store } from 'matchstick-as/assembly/store' +importez { handleNewGravatars, createNewGravatarEvent } from './mapping' let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') @@ -671,21 +672,21 @@ handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) ``` ``` -export function handleNewGravatars(events: NewGravatar[]): void { - events.forEach(event => { - handleNewGravatar(event); - }); +fonction d'exportation handleNewGravatars(events: NewGravatar[]): void { + events.forEach(event => { + handleNewGravatar(événement); + }); } ``` -### Simulation d'appels de contrats +### Appels de contrat moqueurs -Les utilisateurs peuvent simuler des appels de contrat : +Les utilisateurs peuvent simuler des appels de contrat : ```typescript -import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' -import { Gravity } from '../../generated/Gravity/Gravity' -import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' +importez { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' +importez { Gravity } from '../../generated/Gravity/Gravity' +importez { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') let expectedResult = Address.fromString('0x90cBa2Bbb19ecc291A12066Fd8329D65FA1f1947') @@ -700,115 +701,116 @@ let result = gravity.gravatarToOwner(bigIntParam) assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) ``` -Comme nous l'avons démontré, pour simuler un appel de contrat et souligner une valeur de retour, l'utilisateur doit fournir une adresse de contrat, un nom de fonction, une signature de fonction, un tableau d'arguments et, bien sûr, la valeur de retour. +Comme démontré, afin de se moquer d'un appel de contrat et d'obtenir une valeur de retour, l'utilisateur doit fournir une adresse de contrat, un nom de fonction, une signature de fonction, un tableau d'arguments et bien sûr – la valeur de retour. -Users can also mock function reverts: +Utilisateurs peuvent également simuler des annulations de fonctions : ```typescript -let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') +laissez contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(string,string)') - .withArgs([ethereum.Value.fromAddress(contractAddress)]) - .reverts() + .withArgs([ethereum.Value.fromAddress(contractAddress)]) + .reverts() ``` -### Simulation de fichiers IPFS (à partir de matchstick 0.4.1) +### Se moquer des fichiers IPFS (à partir de Matchstick 0.4.1) -Les utilisateurs peuvent simuler des fichiers IPFS en utilisant la fonction `mockIpfsFile(hash, filePath)`. La fonction accepte deux arguments, le premier est le hash/chemin du fichier IPFS et le second est le chemin vers un fichier local. +Les utilisateurs peuvent simuler les fichiers IPFS en utilisant la fonction `mockIpfsFile(hash, filePath)`. La fonction accepte deux arguments, le premier est le hachage/chemin du fichier IPFS et le second est le chemin d'accès à un fichier local. -REMARQUE : Lorsque vous testez `ipfs.map/ipfs.mapJSON`, la fonction de rappel doit être exportée du fichier de test afin que matchstck puisse la détecter, comme la fonction `processGravatar()` dans l'exemple de test ci-dessous : +NOTEZ : Lorsque vous testez `ipfs.map/ipfs.mapJSON`, la fonction de rappel doit être exportée du fichier de test afin que matchstck puisse la détecter, comme la fonction `processGravatar()` dans l'exemple de test ci-dessous : -Fichier `.test.ts` : +Fichier `.test.ts` : ```typescript -import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' -import { ipfs } from '@graphprotocol/graph-ts' -import { gravatarFromIpfs } from './utils' +importer { assert, test, mockIpfsFile } depuis 'matchstick-as/assembly/index' +importer { ipfs } depuis '@graphprotocol/graph-ts' +importer { gravatarFromIpfs } depuis './utils' -// Exportation du callback ipfs.map() pour que matchstck le détecte. -export { processGravatar } from './utils' +// Exportation du callback ipfs.map() pour que matchtck le détecte. +exporter { processGravatar } depuis './utils' test('ipfs.cat', () => { - mockIpfsFile('ipfsCatfileHash', 'tests/ipfs/cat.json') + mockIpfsFile('ipfsCatfileHash', 'tests/ipfs/cat.json') - assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) - gravatarFromIpfs() + gravatarFromIpfs() - assert.entityCount(GRAVATAR_ENTITY_TYPE, 1) - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'imageUrl', 'https://i.ytimg.com/vi/MELP46s8Cic/maxresdefault.jpg') + assert.entityCount(GRAVATAR_ENTITY_TYPE, 1) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'imageUrl', 'https://i.ytimg.com/vi/MELP46s8Cic/maxresdefault.jpg') - clearStore() + clearStore() }) test('ipfs.map', () => { - mockIpfsFile('ipfsMapfileHash', 'tests/ipfs/map.json') + mockIpfsFile('ipfsMapfileHash', 'tests/ipfs/map.json') - assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) + assert.entityCount(GRAVATAR_ENTITY_TYPE, 0) - ipfs.map('ipfsMapfileHash', 'processGravatar', Value.fromString('Gravatar'), ['json']) + ipfs.map('ipfsMapfileHash', 'processGravatar', Value.fromString('Gravatar'), ['json']) - assert.entityCount(GRAVATAR_ENTITY_TYPE, 3) - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'displayName', 'Gravatar1') - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '2', 'displayName', 'Gravatar2') - assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '3', 'displayName', 'Gravatar3') + assert.entityCount(GRAVATAR_ENTITY_TYPE, 3) + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '1', 'displayName', 'Gravatar1') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '2', 'displayName', 'Gravatar2') + assert.fieldEquals(GRAVATAR_ENTITY_TYPE, '3', 'displayName', 'Gravatar3') }) ``` -Fichier `utils.ts` : +Fichier `utils.ts` : ```typescript -import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" -import { Gravatar } from "../../generated/schema" +importer {Adresse, Ethereum, JSONValue, Valeur, ipfs, json, Octets} depuis "@graphprotocol/graph-ts" +importer { Gravatar } depuis "../../generated/schema" ... // rappel ipfs.map -export function processGravatar(value: JSONValue, userData: Value): void { - // Consultez la documentation de JSONValue pour plus de détails sur la façon de traiter les données. - // avec JSON values - let obj = value.toObject() - let id = obj.get('id') +fonction d'exportation processGravatar (valeur : JSONValue, userData : valeur) : void { + // Consultez la documentation de JSONValue pour plus de détails sur la façon de traiter les données. + // avec valeurs JSON + laissez obj = valeur.toObject() + laissez id = obj.get('id') - if (!id) { + si (!id) { return } + } - // Des entités de rappel peuvent également être créées - let gravatar = new Gravatar(id.toString()) - gravatar.displayName = userData.toString() + id.toString() - gravatar.save() + // Des entités de rappel peuvent également être créées + laissez gravatar = new Gravatar(id.toString()) + gravatar.displayName = userData.toString() + id.toString() + gravatar.save() } // fonction qui appelle ipfs.cat -export function gravatarFromIpfs(): void { - let rawData = ipfs.cat("ipfsCatfileHash") +fonction d'exportation gravatarFromIpfs() : void { + laissez rawData = ipfs.cat("ipfsCatfileHash") - if (!rawData) { + si (!rawData) { return } - let jsonData = json.fromBytes(rawData as Bytes).toObject() + laissez jsonData = json.fromBytes (rawData as Bytes).toObject() - let id = jsonData.get('id') - let url = jsonData.get("imageUrl") + laissez id = jsonData.get('id') + laissez url = jsonData.get("imageUrl") - if (!id || !url) { + si (!id || !url) { return } - let gravatar = new Gravatar(id.toString()) - gravatar.imageUrl = url.toString() - gravatar.save() + laissez gravatar = new Gravatar(id.toString()) + gravatar.imageUrl = url.toString() + gravatar.save() } ``` -### Affirmer l'état du magasin +### Affirmation de l'état du magasin -Les utilisateurs peuvent affirmer l'état final (ou intermédiaire) du magasin en affirmant des entités. Pour ce faire, l'utilisateur doit fournir un type d'entité, l'ID spécifique d'une entité, le nom d'un champ de cette entité et la valeur attendue du champ. Voici un exemple rapide : +Les utilisateurs sont en mesure d'affirmer l'état final (ou intermédiaire) du magasin via des entités d'affirmation. Pour ce faire, l'utilisateur doit fournir un type d'entité, l'ID spécifique d'une entité, le nom d'un champ sur cette entité et la valeur attendue du champ. Voici un exemple rapide: ```typescript -import { assert } from 'matchstick-as/assembly/index' -import { Gravatar } from '../generated/schema' +importez { assert } du 'matchstick-as/assembly/index' +importez { Gravatar } du '../generated/schema' let gravatar = new Gravatar('gravatarId0') gravatar.save() @@ -816,30 +818,30 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -L'exécution de la fonction assert.fieldEquals() vérifiera l'égalité du champ donné par rapport à la valeur attendue donnée. Le test échouera et un message d'erreur sera affiché si les valeurs **ne sont pas** égales. Sinon, le test passe avec succès. +L'exécution de la fonction assert.fieldEquals() vérifiera l'égalité du champ donné par rapport à la valeur attendue indiquée. Le test échouera et un message d'erreur sera généré si les valeurs sont **NON** égales. Sinon, le test réussira. -### Interaction avec les métadonnées d'événement +### Interagir avec les métadonnées d'événement -Les utilisateurs peuvent utiliser les métadonnées de transaction par défaut, qui peuvent être renvoyées comme un ethereum.Event en utilisant la fonction `newMockEvent()`. L'exemple suivant montre comment vous pouvez lire/écrire dans ces champs sur l'objet Event : +Les utilisateurs peuvent utiliser les métadonnées de transaction par défaut, qui peuvent être renvoyées comme un ethereum. Event en utilisant la fonction `newMockEvent()`. L'exemple suivant montre comment vous pouvez lire/écrire dans ces champs sur l'objet Event : ```typescript -// Lire +// Lisez let logType = newGravatarEvent.logType -// Écriture +// Écrivez let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) ``` -### Affirmer l'égalité des variables +### Affirmation de l'égalité des variables ```typescript -assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); +assert.equals(ethereum.Value.fromString("bonjour"); ethereum.Value.fromString("bonjour")); ``` -### Affirmer qu'une entité **n'existe pas** dans le magasin +### Affirmez qu'une entité **n'existe pas** dans le magasin -Les utilisateurs peuvent affirmer qu'une entité n'existe pas dans le magasin. La fonction prend un type d'entité et un identifiant. Si l'entité se trouve en fait dans le magasin, le test échouera avec un message d'erreur pertinent. Voici un exemple rapide de l'utilisation de cette fonctionnalité : +Les utilisateurs peuvent affirmer qu'une entité n'existe pas dans le magasin. La fonction prend un type d'entité et un identifiant. Si l'entité se trouve effectivement dans le magasin, le test échouera avec un message d'erreur pertinent. Voici un exemple rapide de la façon d'utiliser cette fonctionnalité : ```typescript assert.notInStore('Gravatar', '23') @@ -847,37 +849,37 @@ assert.notInStore('Gravatar', '23') ### Impression de l'ensemble du magasin (à des fins de débogage) -Vous pouvez imprimer l'ensemble du magasin sur la console à l'aide de cette fonction d'aide : +Vous pouvez imprimer l'intégralité du magasin sur la console à l'aide de cette fonction d'assistance: ```typescript -import { logStore } from 'matchstick-as/assembly/store' +importez { logStore } du 'matchstick-as/assembly/store' logStore() ``` -### Échec attendu +### Échec prévu -Les utilisateurs peuvent prévoir des échecs de test, en utilisant l'indicateur shouldFail sur les fonctions test() : +Les utilisateurs peuvent s'attendre à des échecs de test, en utilisant l'indicateur ShouldFail sur les fonctions test() : ```typescript test( - 'Should throw an error', - () => { - throw new Error() - }, - true, + 'Devrait générer une erreur', + () => { + lancer une nouvelle erreur() + }, + vrai, ) ``` -Si le test est marqué avec shouldFail = true mais n'échoue PAS, cela apparaîtra comme une erreur dans les journaux et le bloc de test échouera. De même, s'il est marqué avec shouldFail = false (l'état par défaut), l'exécuteur de test se plantera. +Si le test est marqué avec ShouldFail = true mais n'échoue PAS, cela apparaîtra comme une erreur dans les journaux et le bloc de test échouera. De plus, s'il est marqué avec ShouldFail = false (l'état par défaut), l'exécuteur de test plantera. -### Journal de bord +### Logging -Avoir des logs personnalisés dans les tests unitaires est exactement la même chose que d'avoir des journaux dans les mappings. La différence est que l'objet log doit être importé de matchstick-as plutôt que de graph-ts. Voici un exemple simple avec tous les types de logs non critiques : +Avoir des journaux personnalisés dans les tests unitaires équivaut exactement à la journalisation des mappages. La différence est que l'objet journal doit être importé depuis matchstick-as plutôt que graph-ts. Voici un exemple simple avec tous les types de journaux non critiques : ```typescript -import { test } from "matchstick-as/assembly/index"; -import { log } from "matchstick-as/assembly/log"; +importez { test } from "matchstick-as/assembly/index"; +importez { log } from "matchstick-as/assembly/log"; test("Success", () => { log.success("Success!". []); @@ -896,64 +898,64 @@ test("Warning", () => { }); ``` -Les utilisateurs peuvent également simuler une défaillance critique, comme suit : +Les utilisateurs peuvent également simuler une panne critique, comme ceci : ```typescript -test('Blow everything up', () => { +test('Tout faire exploser', () => { log.critical('Boom!') }) ``` -La journalisation des erreurs critiques arrêtera l'exécution des tests et fera tout sauter. Après tout - nous voulons être sûrs que votre code n'a pas de logs critiques en déploiement, et vous devriez le remarquer tout de suite si cela devait arriver. +La journalisation des erreurs critiques arrêtera l’exécution des tests et fera tout exploser. Après tout, nous voulons nous assurer que votre code ne contient pas de journaux critiques lors du déploiement, et vous devriez le remarquer immédiatement si cela devait se produire. -### Test des champs dérivés +### Tests dérivés -Testing derived fields is a feature which (as the example below shows) allows the user to set a field in a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. Important thing to note is that the first entity needs to be reloaded as the automatic update happens in the store in rust of which the AS code is agnostic. +Le test des champs dérivés est une fonctionnalité qui (comme le montre l'exemple ci-dessous) permet à l'utilisateur de définir un champ dans une certaine entité et de mettre à jour automatiquement une autre entité si elle dérive l'un de ses champs de la première entité. La chose importante à noter est que la première entité doit être rechargée car la mise à jour automatique se produit dans le magasin Rust dont le code AS est indépendant. ```typescript -test('Derived fields example test', () => { - let mainAccount = new GraphAccount('12') - mainAccount.save() - let operatedAccount = new GraphAccount('1') - operatedAccount.operators = ['12'] - operatedAccount.save() - let nst = new NameSignalTransaction('1234') - nst.signer = '12' - nst.save() - - assert.assertNull(mainAccount.get('nameSignalTransactions')) - assert.assertNull(mainAccount.get('operatorOf')) - - mainAccount = GraphAccount.load('12')! - - assert.i32Equals(1, mainAccount.nameSignalTransactions.length) - assert.stringEquals('1', mainAccount.operatorOf[0]) +test('Exemple de test de champs dérivés', () => { + laissez mainAccount = new GraphAccount('12') + mainAccount.save() + laissez exploitéAccount = new GraphAccount ('1') + OperatorAccount.operators = ['12'] + exploitéAccount.save() + laissez nst = nouveau NameSignalTransaction('1234') + nst.signer = '12' + nst.save() + + assert.assertNull(mainAccount.get('nameSignalTransactions')) + assert.assertNull(mainAccount.get('operatorOf')) + + mainAccount = GraphAccount.load('12') ! + + assert.i32Equals(1, mainAccount.nameSignalTransactions.length) + assert.stringEquals('1', mainAccount.operatorOf[0]) }) ``` -### Testing dynamic data sources +### Tester les sources de données dynamiques -Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). +Le test des sources de données dynamiques peut être effectué en simulant la valeur de retour des fonctions `context()`, `address()` et `network()` du Espace de noms dataSource. Ces fonctions renvoient actuellement les éléments suivants : `context()` - renvoie une entité vide (DataSourceContext), `address()` - renvoie `0x000000000000000000000000000000000000000000`, ` network()` - renvoie `mainnet`. Les fonctions `create(...)` et `createWithContext(...)` sont simulées pour ne rien faire, elles n'ont donc pas du tout besoin d'être appelées dans les tests. Les modifications des valeurs de retour peuvent être effectuées via les fonctions de l'espace de noms `dataSourceMock` dans `matchstick-as` (version 0.3.0+). Example below: First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): ```typescript -export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { - let tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString())! - if (dataSource.network() == 'rinkeby') { - tokenLockWallet.tokenDestinationsApproved = true - } - let context = dataSource.context() - if (context.get('contextVal')!.toI32() > 0) { - tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) - } - tokenLockWallet.save() +fonction d'exportation handleApproveTokenDestinations (événement : ApproveTokenDestinations) : void { + laissez tokenLockWallet = TokenLockWallet.load(dataSource.address().toHexString()) ! + if (dataSource.network() == 'rinkeby') { + tokenLockWallet.tokenDestinationsApproved = true + } + laissez contexte = dataSource.context() + if (context.get('contextVal')!.toI32() > 0) { + tokenLockWallet.setBigInt('tokensReleased', BigInt.fromI32(context.get('contextVal')!.toI32())) + } + tokenLockWallet.save() } ``` -And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: +Et puis nous avons le test utilisant l'une des méthodes de l'espace de noms dataSourceMock pour définir une nouvelle valeur de retour pour toutes les fonctions dataSource : ```typescript import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' @@ -986,50 +988,50 @@ test('Data source simple mocking example', () => { }) ``` -Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. +Notez que dataSourceMock.resetValues() est appelé à la fin. C'est parce que les valeurs sont mémorisées lorsqu'elles sont modifiées et doivent être réinitialisées si vous voulez revenir aux valeurs par défaut. -## Test Coverage +## Couverture de test Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +L'outil de couverture de test prend les binaires de test `wasm` compilés et les convertit en fichiers `wat`, qui peuvent ensuite être facilement inspectés pour voir si les gestionnaires définis dans `subgraph .yaml` ont été appelés. Étant donné que la couverture du code (et les tests dans leur ensemble) en sont à leurs tout premiers stades dans AssemblyScript et WebAssembly, **Matchstick** ne peut pas vérifier la couverture des branches. Au lieu de cela, nous nous appuyons sur l'affirmation selon laquelle si un gestionnaire donné a été appelé, l'événement/la fonction correspondant a été correctement simulé. -### Prerequisites +### Conditions préalables -To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: +Pour exécuter la fonctionnalité de couverture de test fournie dans **Matchstick**, vous devez préparer quelques éléments au préalable : -#### Export your handlers +#### Exportez vos gestionnaires -In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: +Pour que **Matchstick** vérifie quels gestionnaires sont exécutés, ces gestionnaires doivent être exportés à partir du **fichier de test**. Ainsi, par exemple, dans notre exemple, dans notre fichier gravitation.test.ts, nous avons le gestionnaire suivant en cours d'importation : ```typescript -import { handleNewGravatar } from '../../src/gravity' +importez { handleNewGravatar } from '../../src/gravity' ``` -In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: +Pour que cette fonction soit visible (pour qu'elle soit incluse dans le fichier `wat` **par son nom**), nous devons également l'exporter, comme ceci : ```typescript -export { handleNewGravatar } +exportez { handleNewGravatar } ``` -### Utilisation +### Usage -Once that's all set up, to run the test coverage tool, simply run: +Une fois tout configuré, pour exécuter l'outil de couverture de test, exécutez simplement : ```sh graph test -- -c ``` -You could also add a custom `coverage` command to your `package.json` file, like so: +Vous pouvez également ajouter une commande `coverage` personnalisée à votre fichier `package.json`, comme ceci : ```typescript "scripts": { - /.../ - "coverage": "graph test -- -c" - }, + /.../ + "coverage": "test graph -- -c" + }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +Cela exécutera l'outil de couverture et vous devriez voir quelque chose comme ceci dans le terminal : ```sh $ graph test -c @@ -1068,32 +1070,32 @@ Test coverage: 0.0% (0/6 handlers). Global test coverage: 22.2% (2/9 handlers). ``` -### Test run time duration in the log output +### Durée d'exécution du test dans la sortie du journal -The log output includes the test run duration. Here's an example: +La sortie du journal inclut la durée de l’exécution du test. Voici un exemple : -`[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` +`[Jeudi 31 mars 2022 13:54:54 +0300] Programme exécuté en : 42,270 ms.` ## Common compiler errors -> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined +> Critique : impossible de créer WasmInstance à partir d'un module valide avec un contexte : importation inconnue : wasi_snapshot_preview1::fd_write n'a pas été défini -This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/assemblyscript-api/#logging-api) +Cela signifie que vous avez utilisé `console.log` dans votre code, ce qui n'est pas pris en charge par AssemblyScript. Veuillez envisager d'utiliser l'[API Logging](/developing/assemblyscript-api/#logging-api) -> ERROR TS2554: Expected ? arguments, but got ?. +> ERREUR TS2554 : attendu ? arguments, mais j'ai eu ?. > -> return new ethereum.Block(defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt); +> renvoyer le nouveau ethereum.Block (defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultAddress, defaultAddressBytes, defaultAddressBytes, defaultAddressBytes, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt, defaultBigInt) ; > > in ~lib/matchstick-as/assembly/defaults.ts(18,12) > > ERROR TS2554: Expected ? arguments, but got ?. > -> return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); +> renvoyer un nouveau ethereum.Transaction (defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt) ; > -> in ~lib/matchstick-as/assembly/defaults.ts(24,12) +> dans ~lib/matchstick-as/assembly/defaults.ts(24,12) -The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. +L'inadéquation des arguments est causée par une inadéquation entre `graph-ts` et `matchstick-as`. La meilleure façon de résoudre des problèmes comme celui-ci est de tout mettre à jour vers la dernière version publiée. ## Feedback -If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. +Si vous avez des questions, des commentaires, des demandes de fonctionnalités ou si vous souhaitez simplement nous contacter, le meilleur endroit serait The Graph Discord où nous avons une chaîne dédiée à Matchstick, appelée 🔥| tests unitaires. diff --git a/website/pages/fr/docsearch.json b/website/pages/fr/docsearch.json index 52fa4a446f7f..7dde567954f0 100644 --- a/website/pages/fr/docsearch.json +++ b/website/pages/fr/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Recherche", + "buttonText": "La Recherche", "buttonAriaLabel": "Recherche" }, "modal": { "searchBox": { "resetButtonTitle": "Effacer la requête", - "resetButtonAriaLabel": "Effacer la requête", + "resetButtonAriaLabel": "Supprimer la requête", "cancelButtonText": "Annuler", "cancelButtonAriaLabel": "Annuler" }, "startScreen": { - "recentSearchesTitle": "Récent", - "noRecentSearchesText": "Aucune recherche récente", - "saveRecentSearchButtonTitle": "Sauvegarder cette recherche", - "removeRecentSearchButtonTitle": "Supprimer cette recherche de l'historique", - "favoriteSearchesTitle": "Favoris", - "removeFavoriteSearchButtonTitle": "Supprimer cette recherche des favoris" + "recentSearchesTitle": "Récente", + "noRecentSearchesText": "Pas de recherche récente", + "saveRecentSearchButtonTitle": "Enregistrer la recherche", + "removeRecentSearchButtonTitle": "Retirer cette recherche de l'historique", + "favoriteSearchesTitle": "Préféré", + "removeFavoriteSearchButtonTitle": "Supprimer cette recherche des préférés" }, "errorScreen": { - "titleText": "Impossible de récupérer les résultats", - "helpText": "Il est conseillé de vérifier votre connexion réseau." + "titleText": "Impossible d'obtenir les résultats", + "helpText": "Vous pouvez vérifier votre connexion réseau." }, "footer": { - "selectText": "pour sélectionner", - "selectKeyAriaLabel": "Touche « entrée »", - "navigateText": "pour naviguer", - "navigateUpKeyAriaLabel": "Flèche vers le haut", - "navigateDownKeyAriaLabel": "Flèche vers le bas", - "closeText": "pour fermer", - "closeKeyAriaLabel": "Touche Echap", - "searchByText": "Recherche par" + "selectText": "sélectionner", + "selectKeyAriaLabel": "Touche d'entrée", + "navigateText": "naviguer", + "navigateUpKeyAriaLabel": "Flèche en haut", + "navigateDownKeyAriaLabel": "Flèche en bas", + "closeText": "fermer", + "closeKeyAriaLabel": "Touche Échap", + "searchByText": "Cherche par" }, "noResultsScreen": { - "noResultsText": "Aucun résultat pour", - "suggestedQueryText": "Essayez de chercher", - "reportMissingResultsText": "Croyez-vous que cette requête devrait retourner des résultats ?", - "reportMissingResultsLinkText": "Faites-nous en part." + "noResultsText": "Pas de résultats pour", + "suggestedQueryText": "Essayez de rechercher", + "reportMissingResultsText": "Pensez-vous que cette requête devrait donner des résultats ?", + "reportMissingResultsLinkText": "Faites-nous savoir." } } } diff --git a/website/pages/fr/firehose.mdx b/website/pages/fr/firehose.mdx index ecf9b7e9da9f..f4669f63f370 100644 --- a/website/pages/fr/firehose.mdx +++ b/website/pages/fr/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Les intégrations de Firehose ont été construites pour Ethereum (et de nombreuses chaînes EVM), NEAR, Solana, Cosmos et Arweave, et d'autres sont en préparation. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visitez la [documentation du Firehose](https://firehose.streamingfast.io/) pour en savoir plus. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Démarrage + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/fr/global.json b/website/pages/fr/global.json index 0ff5316bcd33..7041afc44a5a 100644 --- a/website/pages/fr/global.json +++ b/website/pages/fr/global.json @@ -1,14 +1,14 @@ { - "collapse": "Effondrement", - "expand": "Étendre", - "previous": "Précédent", - "next": "Suivant", - "editPage": "Modifier la page", - "pageSections": "Section des pages", - "linkToThisSection": "Lien avec cette section", - "technicalLevelRequired": "Niveau technique requis", - "notFoundTitle": "Oups ! Cette page a été perdue dans l'espace...", - "notFoundSubtitle": "Vérifiez si vous utilisez la bonne adresse ou explorez notre site web en cliquant sur le lien ci-dessous.", - "goHome": "Revenir à la page d'accueil", - "video": "Vidéo" + "collapse": "Abattement", + "expand": "Développer", + "previous": "Précédente", + "next": "Suivante", + "editPage": "Modifier une page", + "pageSections": "Sections de la page", + "linkToThisSection": "Lien vers cette section", + "technicalLevelRequired": "Niveau technique exigé", + "notFoundTitle": "Oups ! Cette page a été perdue au cours de l'espace...", + "notFoundSubtitle": "Vérifiez si vous utilisez la bonne adresse ou explorez notre site web par cliquant sur le lien ci-dessous.", + "goHome": "Rentrer à la page d'accueil", + "video": "La vidéo" } diff --git a/website/pages/fr/glossary.mdx b/website/pages/fr/glossary.mdx index 2e840513f1ea..ac4a6d3f3a63 100644 --- a/website/pages/fr/glossary.mdx +++ b/website/pages/fr/glossary.mdx @@ -2,17 +2,17 @@ title: Glossary --- -- **The Graph**: A decentralized protocol for indexing and querying data. +- **The Graph**: un protocole décentralisé pour l'indexation et la requête de données. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Query**: une requête de données. Dans le cas de The Graph, une requête est une demande de données provenant d'un subgraph qui sera traitée par un indexeur. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL** : Un langage de requête pour les API et un moteur d'exécution pour répondre à ces requêtes avec vos données existantes. Le graph utilise GraphQL pour interroger les subgraphs. - **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/fr/graphcast.mdx b/website/pages/fr/graphcast.mdx index e397aad36e43..ad65e428d144 100644 --- a/website/pages/fr/graphcast.mdx +++ b/website/pages/fr/graphcast.mdx @@ -2,7 +2,7 @@ title: Graphcast --- -## Introduction +## Présentation Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/fr/index.json b/website/pages/fr/index.json index ea7acdc6819c..930e53ae45e9 100644 --- a/website/pages/fr/index.json +++ b/website/pages/fr/index.json @@ -1,77 +1,76 @@ { - "title": "Débuter", - "intro": "Découvrez The Graph, un protocole décentralisé d'indexation et d'interrogation des données provenant des blockchains.", + "title": "Commencer", + "intro": "Découvrez The Graph, un protocole décentralisé pour indexer et interroger les données des blockchains.", "shortcuts": { "aboutTheGraph": { - "title": "À propos de The Graph", - "description": "En savoir plus sur The Graph" + "title": "À propos du Graph", + "description": "Plus d'infos sur The Graph" }, "quickStart": { - "title": "Démarrage rapide", - "description": "Lancez-vous et commencez votre aventure avec The Graphe" + "title": "Début rapide", + "description": "Lancez-vous et commencez avec The Graph" }, "developerFaqs": { - "title": "FAQs pour les développeurs", - "description": "Questions fréquentes" + "title": "Questions fréquentes des développeurs", + "description": "Questions fréquemment posées" }, "queryFromAnApplication": { - "title": "Requête depuis une application", - "description": "Apprenez à exécuter vos requêtes à partir d'une application" + "title": "Requête d'une application", + "description": "Apprenez à exécuter vos requêtes d'une application" }, "createASubgraph": { - "title": "Créer un subgraphe", - "description": "Utiliser le « Studio » pour créer des subgraphes" + "title": "Créer un subgraph", + "description": "Utiliser le « Studio » pour créer des subgraphs" }, "migrateFromHostedService": { - "title": "Migrer à partir du Service Hébergé", - "description": "Migrer des subgraphes vers le réseau The Graph" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Les divers rôles dans le réseau", - "description": "Découvrez les divers rôles du réseau The Graph.", + "title": "Les divers rôles du réseau", + "description": "Découvrez les rôles réseau de The Graph.", "roles": { "developer": { - "title": "Développeur", - "description": "Créer un subgraphes ou utiliser des subgraphes existants dans une application décentralisée" + "title": "Developer", + "description": "Créer un subgraphe ou utiliser des subgraphs existants dans une application décentralisée" }, "indexer": { - "title": "Indexeur", - "description": "Exploiter un nœud pour indexer les données et traiter les requêtes" + "title": "Indexer", + "description": "Exploitez un nœud pour indexer les données et traitez les requêtes" }, "curator": { - "title": "Curateur", - "description": "Organiser les données en signalant les subgraphes" + "title": "Curator", + "description": "Organiser les données en signalant les subgraphs" }, "delegator": { - "title": "Délégateur", - "description": "Sécuriser le réseau en déléguant des GRT aux indexeurs" + "title": "Delegator", + "description": "Sécuriser le réseau en déléguant les TRG aux indexeurs" } } }, - "readMore": "En savoir plus", + "readMore": "Lire la suite", "products": { - "title": "Produits", + "title": "Products", "products": { "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Créer, gérer, déployer des subgraphes et des clés API" + "title": "Studio Subgraph", + "description": "Créer, gérer, déployer des subgraphs et des clés API" }, "graphExplorer": { - "title": "Graph Explorer", - "description": "Explorer les subgraphes et interagir avec le protocole" + "title": "Explorateur Graph", + "description": "Explorer les subgraphs et interagir avec le protocole" }, "hostedService": { - "title": "Service hébergé", - "description": "Créer et explorer des subgraphes sur le service hébergé" + "title": "Hosted Service", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Réseaux pris en charge", - "description": "The Graph prend en charge les réseaux suivants sur le réseau The Graph et le Service Hébergé.", - "graphNetworkAndHostedService": "Le réseau The Graph et le Service Hébergé", - "hostedService": "Service hébergé", - "betaWarning": "En version bêta." + "title": "Réseaux supportés", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/fr/managing/deprecating-a-subgraph.mdx b/website/pages/fr/managing/deprecating-a-subgraph.mdx index 49a82f28df9d..3507c618d81a 100644 --- a/website/pages/fr/managing/deprecating-a-subgraph.mdx +++ b/website/pages/fr/managing/deprecating-a-subgraph.mdx @@ -1,18 +1,18 @@ --- -title: Suppression d'un subgraphe +title: Suppression d'un subgraph --- -Vous souhaitez donc supprimer votre subgraphe sur The Graph Explorer. Vous êtes au bon endroit ! Suivez les étapes ci-dessous : +Vous souhaitez donc supprimer votre subgraph sur The Graph Explorer. Vous êtes au bon endroit ! Suivez les étapes ci-dessous : -1. Visitez l'adresse du contrat [ici](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. +1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) +2. Call `deprecateSubgraph` with your `SubgraphID` comme argument. +3. Voilà ! Votre subgraph n'apparaîtra plus dans les recherches sur The Graph Explorer. -Please note the following: +Veuillez noter ce qui suit : -- The `deprecateSubgraph` function should be called by the owner's wallet. -- Les curateurs ne seront plus en mesure de signaler le subgraphe. -- Les curateurs qui ont déjà signalé sur le subgraphe seront en mesure de retirer leur signal à un prix d'action moyen. -- Les subgraphes dépréciés seront signalés par un message d'erreur. +- La fonction `deprecateSubgraph` doit être appelée par le portefeuille du propriétaire. +- Les curateurs ne seront plus en mesure de signaler le subgraph. +- Les curateurs qui ont déjà signalé sur le subgraph seront en mesure de retirer leur signal à un prix d'action moyen. +- Les subgraphs dépréciés seront signalés par un message d'erreur. -Si vous avez interagi avec le subgraphe inactif, vous pourrez le retrouver dans votre profil d'utilisateur sous l'onglet "Subgraphes", "Indexation" ou "Conservation", respectivement. +Si vous avez interagi avec le subgraph inactif, vous pourrez le retrouver dans votre profil d'utilisateur sous l'onglet "Subgraphs", "Indexation" ou "Conservation", respectivement. diff --git a/website/pages/fr/managing/transferring-subgraph-ownership.mdx b/website/pages/fr/managing/transferring-subgraph-ownership.mdx index ce74e42075fe..d83e6c7f8966 100644 --- a/website/pages/fr/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/fr/managing/transferring-subgraph-ownership.mdx @@ -1,24 +1,24 @@ --- -title: Transfert de la propriété d'un subgraphe +title: Transfert de la propriété d'un subgraph --- -The Graph supporte le transfert de propriété d'un subgraphe. +The Graph supporte le transfert de propriété d'un subgraph. -Lorsque vous déployez un subgraphe sur le réseau principal, un NFT sera généré pour l'adresse qui a déployé le subgraphe. Le NFT est basé sur un standard ERC721, il peut donc être facilement transféré vers différents comptes. +Lorsque vous déployez un subgraph sur le mainnet, un NFT sera généré pour l'adresse qui a déployé le subgraph. Le NFT est basé sur un standard ERC721, il peut donc être facilement transféré vers différents comptes. -La personne qui possède le NFT contrôle le subgraphe. Si le propriétaire décide de vendre le NFT ou de le transférer, il ne sera plus en mesure d'effectuer des modifications ou des mises à jour de ce subgraphe sur le réseau. +La personne qui possède le NFT contrôle le subgraph. Si le propriétaire décide de vendre le NFT ou de le transférer, il ne sera plus en mesure d'effectuer des modifications ou des mises à jour de ce subgraph sur le réseau. -En plus d'ajouter plus de flexibilité au cycle de vie du développement, cette fonctionnalité rend certains cas d'utilisation plus pratiques, comme le transfert de votre contrôle à un multisig ou un membre de la communauté le créant au nom d'une DAO. +En plus d'ajouter plus de flexibilité au cycle de vie de développement, cette fonctionnalité rend certains cas d'utilisation plus pratiques, tels que le déplacement de votre contrôle vers un multisig ou la création d'un membre de la communauté au nom d'un DAO. ## Viewing your subgraph as an NFT -Pour visualiser votre subgraphe en tant que NFT, vous pouvez visiter une place de marché NFT comme OpenSea : +Pour visualiser votre subgraph en tant que NFT, vous pouvez visiter une place de marché NFT comme OpenSea : ``` -https://opensea.io/adresse-de-votre-portefeuille +https://opensea.io/your-wallet-address ``` -Ou un explorateur de portefeuilles comme **Rainbow.me** : +Ou un explorateur de portefeuille tel que **Rainbow.me** : ``` https://rainbow.me/adresse-de-votre-portefeuille @@ -26,14 +26,14 @@ https://rainbow.me/adresse-de-votre-portefeuille ## Transferring ownership of a subgraph -Pour transférer la propriété d'un sous-graphe, vous pouvez utiliser l'interface utilisateur intégrée à Subgraph Studio : +Pour transférer la propriété d'un subgraph, vous pouvez utiliser l'interface utilisateur intégrée à Subgraph Studio : -![Transfert de propriété de subgraphe](/img/subgraph-ownership-transfer-1.png) +![Transfert de propriété de subgraph](/img/subgraph-ownership-transfer-1.png) -Et ensuite choisir l'adresse à laquelle vous souhaitez transférer le subgraphe : +Et ensuite choisir l'adresse à laquelle vous souhaitez transférer le subgraph : -![Transfert de propriété de subgraphe](/img/subgraph-ownership-transfer-2.png) +![Transfert de propriété de subgraph](/img/subgraph-ownership-transfer-2.png) -Vous pouvez également utiliser l'interface utilisateur intégrée des places de marché NFT comme OpenSea : +Vous pouvez également utiliser l'interface utilisateur intégrée des marchés NFT comme OpenSea : -![Transfert de propriété de subgraphe depuis la place de marché NFT](/img/subgraph-ownership-transfer-nft-marketplace.png) +![Transfert de propriété de subgraph depuis la place de marché NFT](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/fr/mips-faqs.mdx b/website/pages/fr/mips-faqs.mdx index 73efe82662cb..ad63ae49bdc8 100644 --- a/website/pages/fr/mips-faqs.mdx +++ b/website/pages/fr/mips-faqs.mdx @@ -2,7 +2,9 @@ title: MIPs FAQs --- -## Introduction +## Présentation + +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. @@ -30,7 +32,7 @@ A community member, [SunTzu](https://github.com/suntzu93), has created a script ### 2. Which chain will the MIPs program incentivise first? -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. +La première chaîne qui sera prise en charge sur le réseau décentralisé est Gnosis Chain ! Anciennement connue sous le nom de xDAI, Gnosis Chain est une chaîne basée sur EVM. Gnosis Chain a été sélectionnée comme la première en raison de sa convivialité d'exécution des nœuds, de sa préparation à l'indexeur, de son alignement avec The Graph et de son adoption dans web3. ### 3. How will new chains be added to the MIPs program? @@ -94,11 +96,11 @@ The percentage to be distributed at the end of the program will be subject to ve ### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -Yes +Oui ### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -Yes +Oui ### 15. During the MIPs program, will there be a period to dispute invalid POI? @@ -114,12 +116,12 @@ Please email info@thegraph.foundation ### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -Yes +Oui ### 20. Are there recommended regions to run the servers? -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. +Nous ne donnons pas de recommandations sur les régions. Lorsque vous choisissez des emplacements, vous voudrez peut-être réfléchir aux principaux marchés pour les crypto-monnaies. ### 21. What is “handler gas cost”? -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. +C'est la mesure déterministe du coût d'exécution d'un gestionnaire. Contrairement à ce que son nom pourrait laisser penser, il n’est pas lié au coût du gaz sur les blockchains. diff --git a/website/pages/fr/network/benefits.mdx b/website/pages/fr/network/benefits.mdx index bd279a31e050..4a932e5b788b 100644 --- a/website/pages/fr/network/benefits.mdx +++ b/website/pages/fr/network/benefits.mdx @@ -1,5 +1,5 @@ --- -title: Le réseau The Graph par rapport l'hébergement locale +title: Le réseau Graph vs l'auto-hébergement socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- @@ -9,72 +9,72 @@ The benefits of this decentralized protocol cannot be replicated by running a `g Here is an analysis: -## Pourquoi adopter le réseau The Graph ? +## Pourquoi devriez-vous utiliser le réseau Graph -- Coût mensuel réduit de 60 à 98 % -- Pas de frais d'installation d'infrastructure -- Temps de fonctionnement amélioré -- Access to 438 Indexers (and counting) -- Assistance technique 24/7 par la communauté +- Coût mensuel inférieur de 60 à 98 % +- 0 $ de frais de configuration de l'infrastructure +- Disponibilité supérieure +- Access to hundreds of independent Indexers around the world +- Assistance technique 24h/24 et 7j/7 par la communauté mondiale ## The Benefits Explained -### Une structure de coûts plus faible et plus flexible +### Une structure & de coûts faible et plus flexible No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. Query costs may vary; the quoted cost is the average at time of publication (December 2022). -## Utilisateur de petit volume (moins de 30 000 requêtes par mois) +## Utilisateur à faible volume (moins de 30 000 requêtes par mois) -| Comparaison des coûts | Hébergement local | Réseau The Graph | +| Cost Comparison | Auto-hébergé | Réseau de The Graph | | :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ par mois | 0 $ | -| Coût des requêtes | + 0 $ | ~15 $ par mois | -| Coût du développement | 400 $ par mois | None, built into the network with globally distributed Indexers | -| Requêtes par mois | Limité aux capacités d'infrastructure | 30 000 (mise à l'échelle automatique) | -| Coût par requête | 0 $ | 0,0005 $ | -| Infrastructure | Centralisé | Décentralisé | -| Redondance géographique | + 750 $ par nœud supplémentaire | Inclus | -| Garantie de fonctionnement | Varie | + 99.9% | -| Coût mensuel total | + 750 $ | ~15 $ | - -## Utilisateur de volume moyen (3 000 000+ requêtes par mois) - -| Comparaison des coûts | Hébergement local | Réseau The Graph | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | + 0 $ | ~15 $ au mois | +| Temps d'ingénierie | 400 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | 30 000 (automatique mise à l'échelle) | +| Tarif par requête | 0 $ | 0,0005 $ | +| Les infrastructures | Centralisée | Décentralisée | +| La redondance géographique | 750$+ par nœud complémentaire | Compris | +| Temps de disponibilité | Variable | + 99.9% | +| Total des coûts mensuels | + 750 $ | ~15 $ | + +## Utilisateur moyen (3 000 000+ demandes par mois) + +| Comparaison de coût | Auto-hébergé | Réseau de The Graph | | :-: | :-: | :-: | -| Coût mensuel du serveur\* | 350 $ par mois | 0 $ | -| Coût des requêtes | 500 $ par mois | 750 $ par mois | -| Coût du développement | 800 $ par mois | None, built into the network with globally distributed Indexers | -| Requêtes par mois | Limité aux capacités d'infrastructure | + 3 000 000 | -| Coût par requête | 0 $ | 0,00025 $ | -| Infrastructure | Centralisé | Décentralisé | -| Frais de développement | 200 $ par mois | Inclus | -| Redondance géographique | 1 200 $ de coûts totaux par nœud supplémentaire | Inclus | -| Garantie de fonctionnement | Varie | + 99.9% | -| Coût mensuel total | $1,650+ | $750 | - -## Utilisateur de volume conséquent (30 000 000+ requêtes par mois) - -| Comparaison des coûts | Hébergement local | Réseau The Graph | +| Coût mensuel du serveur\* | 350 $ au mois | 0 $ | +| Frais de requête | 500 $ au mois | 750 $ au mois | +| Temps d'ingénierie | 800 $ au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | + 3 000 000 | +| Tarif par requête | 0 $ | 0,00025 $ | +| L'infrastructure | Centralisée | Décentralisée | +| Frais d'ingénierie | 200 $ au mois | Compris | +| La redondance géographique | 1 200 $ coût total par nœud supplémentaire | Compris | +| Temps de disponibilité | Variable | + 99.9% | +| Total des coûts mensuels | $1,650+ | $750 | + +## L'utilisateur de volume conséquent (30 000 000+ requêtes au mois) + +| Cost Comparison | Auto-hébergé | Réseau de The Graph | | :-: | :-: | :-: | -| Coût mensuel du serveur\* | 1100 $ par mois, par nœud | 0 $ | -| Coût des requêtes | 4000 $ | 4500 $ par mois | -| Nombre de nœuds nécessaires | 10 | Ne s'applique pas ici | -| Coût du développement | 6000 $ ou plus par mois | None, built into the network with globally distributed Indexers | -| Requêtes par mois | Limité aux capacités d'infrastructure | + 30 000 000 | -| Coût par requête | 0 $ | 0,00015 $ | -| Infrastructure | Centralisé | Décentralisé | -| Redondance géographique | 1 200 $ de coûts totaux par nœud supplémentaire | Inclus | -| Garantie de fonctionnement | Varie | + 99.9% | -| Coût mensuel total | + 11,000 $ | $4,500 | +| Coût mensuel du serveur\* | 1100 $ au mois, par nœud | 0 $ | +| Frais de requête | 4000 $ | 4500 $ au mois | +| Nombre de nœuds obligatoires | 10 | Sans objet | +| Temps d'ingénierie | 6000 $ ou plus au mois | Aucun, intégré au réseau avec des indexeurs distribués à l'échelle mondiale | +| Requêtes au mois | Limité aux capacités infra | + 30 000 000 | +| Tarif par requête | 0 $ | 0,00015 $ | +| L'infrastructure | Centralisée | Décentralisée | +| La redondance géographique | $1,200 in total costs per additional node | Compris | +| Temps de disponibilité | Variable | + 99.9% | +| Total des coûts mensuels | + 11 000 $ | $4,500 | -\*y compris les coûts de sauvegarde : 50 à 100 dollars par mois +\*y compris les coûts de sauvegarde : $50-$ à 100 dollars au mois -la durée de développement estimée à 200 $ par heure +Temps d'ingénierie basé sur une hypothèse de 200 $ de l'heure -en exploitant la fonction de plafond budgétaire pour les requêtes dans l'onglet de facturation du budget, -tout en maintenant une qualité de service élevée + en utilisant la fonction "max query budget" dans l'onglet "budget billing", tout en maintenant une qualité de +service élevée Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. @@ -84,13 +84,13 @@ Some users may need to update their subgraph to a new version. Due to Ethereum g Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. -## Pas de coûts d'installation et une plus grande efficacité opérationnelle +## Pas de frais d'installation & plus grande efficacité opérationnelle -Commencez immédiatement, sans frais d'installation ni frais généraux et aucun matériel. De plus, il n'y pas à se soucier de pannes dues à une infrastructure centralisée, par conséquent, vous avez plus de temps pour vous concentrer sur votre produit principal. Il n'est également pas nécessaire d'avoir des serveurs de secours, de dépannage ou d'autres ressources techniques coûteuses. +Commencez tout de suite, sans installation, sans frais généraux et sans matériel. De plus, vous n'avez pas à vous soucier des temps d'arrêt dus à une infrastructure centralisée, ce qui vous laisse plus de temps pour vous concentrer sur votre produit principal. Vous n'avez pas non plus besoin de serveurs de secours, de dépannage ou d'autres ressources techniques coûteuses. -## Fiabilité et résilience +## Fiabilité & Résilience -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/fr/network/curating.mdx b/website/pages/fr/network/curating.mdx index ef2e5354d6a7..b9e5181537d4 100644 --- a/website/pages/fr/network/curating.mdx +++ b/website/pages/fr/network/curating.mdx @@ -1,83 +1,83 @@ --- -title: Le rôle de curateur +title: Curation --- -Les curateurs sont essentiels à l'économie décentralisée de The Graph. Ils utilisent leurs connaissances de l'écosystème web3 pour évaluer et signaler les subgraphes d'intérêt pour le réseau The Graph. Grâce à l'explorateur, les curateurs sont en mesure de visualiser les données du réseau afin de signaler un subgraphe d'intérêt. Le réseau The Graph récompense les curateurs qui signalent des subgraphes de bonne qualité en leur versant une partie des frais de recherche que ces subgraphes génèrent. Les curateurs sont économiquement incités à signaler le plus tôt possible. Les signaux des curateurs sont importants pour les indexeurs, qui peuvent ensuite traiter ou indexer les données de ces subgraphes signalés. +Les curateurs sont essentiels à l'économie décentralisée de The Graph. Ils utilisent leurs connaissances de l'écosystème web3 pour évaluer et signaler les subgraphs d'intérêt pour le réseau The Graph. Grâce à l'explorateur, les curateurs sont en mesure de visualiser les données du réseau afin de signaler un subgraph d'intérêt. Le réseau The Graph récompense les curateurs qui signalent des subgraphs de bonne qualité en leur versant une partie des frais de recherche que ces subgraphs génèrent. Les curateurs sont économiquement incités à signaler le plus tôt possible. Les signaux des curateurs sont importants pour les indexeurs, qui peuvent ensuite traiter ou indexer les données de ces subgraphs signalés. When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. -N'oubliez pas que ce rôle est risqué. Assurez-vous de signaler des subgraphes auxquels vous faites confiance. La création d'un subgraphe n'est pas soumise à autorisation. Les gens peuvent donc créer des subgraphes et leur donner le nom qu'ils veulent. Pour avoir de plus amples informations sur les risques des curateurs, consultez [le guide des curateurs de l'académie Graph.](https://thegraph.academy/curators/) +N'oubliez pas que ce rôle est risqué. Assurez-vous de signaler des subgraphs auxquels vous faites confiance. La création d'un subgraph n'est pas soumise à autorisation. Les gens peuvent donc créer des subgraphs et leur donner le nom qu'ils veulent. Pour avoir de plus amples informations sur les risques des curateurs, consultez [le guide des curateurs de l'académie Graph.](https://thegraph.academy/curators/) -## Courbe de liaison 101 +## Courbe d'adhérence 101 -Tout d'abord, nous prenons du recul. Chaque subgraphe a une courbe de liaison sur laquelle des actions de curation sont générées lorsqu'un utilisateur ajoute un signal **dans** la courbe. La courbe de liaison de chaque subgraphe est unique. Les courbes de liaison sont conçues de manière à ce que le prix de la production d'une action de curation sur un subgraphe augmente linéairement, en fonction du nombre d'actions frappées. +Tout d'abord, nous prenons du recul. Chaque subgraph a une courbe de liaison sur laquelle des actions de curation sont générées lorsqu'un utilisateur ajoute un signal **dans** la courbe. La courbe de liaison de chaque subgraph est unique. Les courbes de liaison sont conçues de manière à ce que le prix de la production d'une action de curation sur un subgraph augmente linéairement, en fonction du nombre d'actions frappées. -![Prix par action](/img/price-per-share.png) +![Prix des actions](/img/price-per-share.png) -Par conséquent, le prix augmente linéairement, ce qui signifie qu'il sera de plus en plus cher d'acheter une action au fil du temps. Voici un exemple de courbe de liaison ci-dessous : +Par conséquent, le prix augmente de façon linéaire, ce qui signifie qu'il est de plus en plus cher d'acheter une action au fil du temps. Voici un exemple de ce que nous entendons par là, voir la courbe de liaison ci-dessous : -![Courbe de liaison](/img/bonding-curve.png) +![Courbe d'adhésion](/img/bonding-curve.png) -Considérons que nous avons deux curateurs qui monnayent des actions pour un subgraphe : +Considérons que nous avons deux curateurs qui monnayent des actions pour un subgraph : -- Le curateur A est le premier à se signaler sur le subgraphe. En ajoutant 120 000 GRT dans la courbe, il est capable de monnayer 2000 actions. -- Le signal du curateur B se trouve sur le subgraphe à un moment ultérieur. Pour recevoir le même nombre d'actions que le curateur A, il devra ajouter 360 000 GRT à la courbe. -- Comme les deux curateurs détiennent la moitié du total des parts de curation, ils recevraient un montant égal de redevances de curateur. -- Si l'un des curateurs brûlait maintenant ses 2000 parts de curation, il recevrait 360 000 GRT. -- Le curateur restant recevrait alors toutes les redevances de curateur pour ce subgraphe. S'il brûlait ses parts pour retirer la GRT, il recevrait 120 000 GRT. -- **TLDR** : La valeur en GRT des parts de curation est déterminée par la courbe de liaison et peut-être volatile. Il est possible de subir de grosses pertes. Signer tôt signifie que vous investissez moins de GRT pour chaque action. Par extension, cela signifie que vous gagnez plus de redevances de curation par GRT que les curateurs ultérieurs pour le même subgraphe. +- Le curateur A est le premier à se signaler sur le subgraph. En ajoutant 120 000 TJB à la courbe, il est en mesure de monnayer 2000 actions. +- Le signal du curateur B se trouve sur le subgraph à un moment ultérieur. Pour recevoir le même nombre d'actions que le curateur A, il devra ajouter 360 000 GRT à la courbe. +- Étant donné que les deux conservateurs détiennent la moitié du total des parts de conservation, ils recevraient un montant égal de redevances de conservation. +- Si l’un des conservateurs brûlait maintenant ses 2 000 actions de conservation, il recevrait 360 000 GRT. +- Le curateur restant recevrait alors toutes les redevances de curateur pour ce subgraph. S'il brûlait ses pièces pour retirer la GRT, il recevrait 120 000 GRT. +- **TLDR** : La valeur en GRT des parts de curation est déterminée par la courbe de liaison et peut-être volatile. Il est possible de subir de grosses pertes. Signer tôt signifie que vous investissez moins de GRT pour chaque action. Par extension, cela signifie que vous gagnez plus de redevances de curation par GRT que les curateurs ultérieurs pour le même subgraph. -En général, une courbe de liaison est une courbe mathématique qui définit la relation entre l'offre de jetons et le prix de l'actif. Dans le cas spécifique du signalememt de subgraphes, le **prix de chaque action de subgraphe augmente avec chaque jeton investi** et le **prix de chaque action diminue avec chaque jeton vendu** +En général, une courbe de liaison est une courbe mathématique qui définit la relation entre l'offre de jetons et le prix de l'actif. Dans le cas spécifique du signalememt de subgraphs, le **prix de chaque action de subgraph augmente avec chaque jeton investi** et le **prix de chaque action diminue avec chaque jeton vendu** -Dans le cas de The Graph, [la mise en œuvre par Bancor d'une formule de courbe de liaison](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) est exploitée. +Dans le cas de The Graph, la [mise en œuvre par Bancor d'une formule de courbe de liaison](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) est exploitée. -## Comment signaler un subraphe ? +## Comment signaler -Maintenant que nous avons couvert les bases du fonctionnement de la courbe de liaison, voici comment vous allez procéder pour signaler un subgraphe. Dans l'onglet Curateur de l'Explorateur The Graph, les curateurs pourront signaler ou non certains subgraphes en fonction des statistiques du réseau. Pour un aperçu étape par étape de la façon de procéder dans l'explorateur, cliquez [ici.](/network/explorer) +Maintenant que nous avons couvert les bases du fonctionnement de la courbe de liaison, voici comment vous allez procéder pour signaler un subgraph. Dans l'onglet Curateur de l'Explorateur The Graph, les curateurs pourront signaler ou non certains subgraphs en fonction des statistiques du réseau. Pour un aperçu étape par étape de la façon de procéder dans l'explorateur, cliquez [ici.](/network/explorer) -Un curateur peut choisir de signaler une version spécifique d'un sugraphe ou de faire migrer automatiquement son signal vers la version de production la plus récente de ce subgraphe. Ces deux stratégies sont valables et comportent leurs propres avantages et inconvénients. +Un curateur peut choisir de signaler une version spécifique d'un sugraph ou de faire migrer automatiquement son signal vers la version de production la plus récente de ce subgraph. Ces deux stratégies sont valables et comportent leurs propres avantages et inconvénients. -La signalisation sur une version spécifique est particulièrement utile lorsqu'un subgraphe est utilisé par plusieurs dApps. Un dApp peut avoir besoin de mettre à jour régulièrement le subgraphe avec de nouvelles fonctionnalités. Une autre dApp pourrait préférer utiliser une version plus ancienne et bien testée du subgraphe. Lors de la curation initiale, une taxe standard de 1% est encourue. +La signalisation sur une version spécifique est particulièrement utile lorsqu'un subgraph est utilisé par plusieurs dApps. Un dApp peut avoir besoin de mettre à jour régulièrement le subgraph avec de nouvelles fonctionnalités. Une autre dApp pourrait préférer utiliser une version plus ancienne et bien testée du subgraph. Lors de la curation initiale, une taxe standard de 1% est encourue. -La migration automatique de votre signal vers la version de production la plus récente peut s'avérer utile pour vous assurer que vous continuez à accumuler des frais de requête. Chaque fois que vous effectuez une curation, une taxe de curation de 1 % est appliquée. Vous paierez également une taxe de curation de 0,5 % à chaque migration. Les développeurs de subgraphes sont découragés de publier fréquemment de nouvelles versions - ils doivent payer une taxe de curation de 0,5 % sur toutes les parts de curation migrées automatiquement. +La migration automatique de votre signal vers la version de production la plus récente peut s'avérer utile pour vous assurer que vous continuez à accumuler des frais de requête. Chaque fois que vous effectuez une curation, une taxe de curation de 1 % est appliquée. Vous paierez également une taxe de curation de 0,5 % à chaque migration. Les développeurs de subgraphs sont découragés de publier fréquemment de nouvelles versions - ils doivent payer une taxe de curation de 0,5 % sur toutes les parts de curation migrées automatiquement. -> **Remarque** : La première adresse à signaler un subgraphe particulier est considérée comme le premier curateur et devra effectuer un travail beaucoup plus gourmand en gaz que les curateurs suivants, car le premier curateur initialise les jetons de curation, initialise la courbe de liaison et transfère également les jetons dans le proxy The Graph. +> **Remarque** : La première adresse à signaler un subgraph particulier est considérée comme le premier curateur et devra effectuer un travail beaucoup plus gourmand en gaz que les curateurs suivants, car le premier curateur initialise les jetons de curation, initialise la courbe de liaison et transfère également les jetons dans le proxy The Graph. -## Que signifie « signaler un subgraphe » pour le réseau The Graph ? +## Que signifie « signaler un subgraph » pour le réseau The Graph ? -Pour que les consommateurs finaux puissent interroger un subgraphe, celui-ci doit d'abord être indexé. L'indexation est un processus au cours duquel les fichiers, les données et les métadonnées sont examinés, catalogués, puis indexés afin que les résultats puissent être trouvés plus rapidement. Pour que les données d'un subgraphe puissent être recherchées, elles doivent être organisées. +Pour que les consommateurs finaux puissent interroger un subgraph, celui-ci doit d'abord être indexé. L'indexation est un processus au cours duquel les fichiers, les données et les métadonnées sont examinés, catalogués, puis indexés afin que les résultats puissent être trouvés plus rapidement. Pour que les données d'un subgraph puissent être recherchées, elles doivent être organisées. -Ainsi, si les indexeurs devaient deviner quels subgraphes ils devraient indexer, il y aurait peu de chances qu'ils gagnent des frais de requête robustes, car ils n'auraient aucun moyen de valider quels subgraphes sont de bonne qualité. C'est là qu'intervient la curation. +Ainsi, si les indexeurs devaient deviner quels subgraphs ils devraient indexer, il y aurait peu de chances qu'ils gagnent des frais de requête robustes, car ils n'auraient aucun moyen de valider quels subgraphs sont de bonne qualité. C'est là qu'intervient la curation. Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! -![Diagramme de signalisation](/img/curator-signaling.png) +![Diagramme de la signalisation](/img/curator-signaling.png) -Les indexeurs peuvent trouver des subgraphes à indexer en fonction des signaux de curation qu'ils voient dans The Graph Explorer (capture d'écran ci-dessous). +Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). -![Les subgraphes d'exploration](/img/explorer-subgraphs.png) +![Les subgraphs d'exploration](/img/explorer-subgraphs.png) -## Risques +## Des risques -1. Le marché des requêtes est par nature récente chez The Graph et il existe un risque que votre %APY soit inférieur à vos attentes en raison de la dynamique naissante du marché. +1. Le marché des requêtes est intrinsèquement jeune chez The Graph et il y a un risque que votre %APY soit inférieur à vos attentes en raison de la dynamique naissante du marché. 2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. 3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. Un subgraphe peut échouer à cause d'un bug. Un subgraphe qui échoue n'accumule pas de frais de requête. Par conséquent, vous devrez attendre que le développeur corrige le bogue et déploie une nouvelle version. - - Si vous êtes abonné à la version la plus récente d'un subgraphe, vos parts migreront automatiquement vers cette nouvelle version. Cela entraînera une taxe de curation de 0,5 %. - - Si vous avez signalé sur une version spécifique du subgraphe et que cela échoue, vous devrez brûler manuellement vos parts de curation. Notez que vous pouvez recevoir plus ou moins de GRT que ce que vous avez initialement déposé dans la courbe de curation, ce qui est un risque associé au statut de curateur. Vous pouvez alors signaler la nouvelle version du subgraphe, ce qui entraîne une taxe de curation de 1%. +4. Un subgraph peut échouer à cause d'un bug. Un subgraph qui échoue n'accumule pas de frais de requête. Par conséquent, vous devrez attendre que le développeur corrige le bogue et déploie une nouvelle version. + - Si vous êtes abonné à la version la plus récente d'un subgraph, vos parts migreront automatiquement vers cette nouvelle version. Cela entraînera une taxe de curation de 0,5 %. + - Si vous avez signalé sur une version spécifique du subgraph et que cela échoue, vous devrez brûler manuellement vos parts de curation. Notez que vous pouvez recevoir plus ou moins de GRT que ce que vous avez initialement déposé dans la courbe de curation, ce qui est un risque associé au statut de curateur. Vous pouvez alors signaler la nouvelle version du subgraph, ce qui entraîne une taxe de curation de 1%. -## FAQ +## FAQ sur la conservation -### 1. Quel est le pourcentage accordé au curateur sur les frais totaux ? +### 1. Quel pourcentage des frais de requête les conservateurs perçoivent-ils ? -En vous signalant sur un subgraphe, vous gagnerez une part de tous les frais de requête que ce subgraphe génère. 10% de tous les frais d'interrogation vont aux curateurs au prorata de leurs parts de curation. Ces 10% sont soumis à la gouvernance. +En vous signalant sur un subgraph, vous gagnerez une part de tous les frais de requête que ce subgraph génère. 10% de tous les frais d'interrogation vont aux curateurs au prorata de leurs parts de curation. Ces 10% sont soumis à la gouvernance. -### 2. Comment choisir un subgraphe sur lequel signaler ? +### 2. Comment décider quels sont les subgraphs de haute qualité sur lesquels on peut émettre un signal ? Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: -- Les curateurs peuvent utiliser leur compréhension d'un réseau pour essayer de prédire comment un subgraphe individuel peut générer un volume de requêtes plus ou moins élevé à l'avenir -- Les curateurs doivent également comprendre les mesures disponibles dans l'Explorateur The Graph. Des mesures telles que le volume de requêtes passées et l'identité du développeur du subgraphe peuvent aider à déterminer si un subgraphe mérite ou non d'être signalé. +- Les curateurs peuvent utiliser leur compréhension d'un réseau pour essayer de prédire comment un subgraph individuel peut générer un volume de requêtes plus ou moins élevé à l'avenir +- Les curateurs doivent également comprendre les mesures disponibles dans l'Explorateur The Graph. Des mesures telles que le volume de requêtes passées et l'identité du développeur du subgraph peuvent aider à déterminer si un subgraph mérite ou non d'être signalé. ### 3. What’s the cost of updating a subgraph? @@ -91,6 +91,6 @@ It’s suggested that you don’t update your subgraphs too frequently. See the Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. -Vous ne savez toujours pas quoi faire ? N'hésitez pas à consulter notre guide vidéo sur la curation ci-dessous : +Vous ne savez toujours pas où vous en êtes ? Regardez notre guide vidéo sur la curation ci-dessous : diff --git a/website/pages/fr/network/delegating.mdx b/website/pages/fr/network/delegating.mdx index 4a6d6e00b73e..1f6648e16226 100644 --- a/website/pages/fr/network/delegating.mdx +++ b/website/pages/fr/network/delegating.mdx @@ -1,98 +1,98 @@ --- -title: Delegating +title: Délégation --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +Les délégués sont des participants au réseau qui délèguent (c'est-à-dire qui "mettent en jeu") le GRT à un ou plusieurs indexeurs. Les délégués contribuent à la sécurisation du réseau sans exploiter eux-mêmes un nœud de graph. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +En déléguant à un indexeur, les délégants gagnent une partie des frais de requête et des récompenses de l'indexeur. Le nombre de requêtes qu'un indexeur peut traiter dépend de la participation propre (et déléguée) de l'indexeur et du prix facturé par l'indexeur pour chaque requête. Ainsi, plus la participation allouée à un indexeur est importante, plus il peut traiter de requêtes potentielles. -## Delegator Guide +## Guide du délégué -This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: +En déléguant à un indexeur, les délégants gagnent une partie des frais de requête et des récompenses de l'indexeur. Le nombre de requêtes qu'un indexeur peut traiter dépend de la participation propre (et déléguée) de l'indexeur et du prix facturé par l'indexeur pour chaque requête. Ainsi, plus la participation allouée à un indexeur est importante, plus il peut traiter de requêtes potentielles: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator -- A video guide showing the steps to delegate in the Graph Network UI +- Les risques de la délégation de jetons dans The Graph Network +- Comment calculer les rendements escomptés en tant que délégué +- Un guide vidéo montrant les étapes pour déléguer dans l'interface utilisateur du réseau graph -## Delegation Risks +## Risques de délégation -Listed below are the main risks of being a Delegator in the protocol. +Les principaux risques liés à la fonction de délégué dans le protocole sont transmis ci-dessous. -### The delegation tax +### La taxe de délégation -Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. +Les délégués ne peuvent pas être licenciés en cas de mauvais comportement, mais ils sont soumis à une taxe visant à décourager les mauvaises décisions susceptibles de nuire à l'intégrité du réseau. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +Il est important de comprendre que chaque fois que vous déléguez, vous êtes facturé 0,5 %. Cela signifie que si vous déléguez 1000 TJB, vous brûlerez automatiquement 5 TJB. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Cela signifie que pour être sûr, un délégant doit calculer quel sera son rendement en déléguant à un indexeur. Par exemple, un délégant peut calculer combien de jours il lui faudra avant de récupérer la taxe de 0,5 % sur sa délégation. -### The delegation unbonding period +### La période de découplage de la délégation -Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. +Lorsqu'un délégué souhaite se dé-déléguer, ses jetons sont soumis à une période de déliaison de 28 jours. Cela signifie qu'il ne peut pas transférer ses jetons ou gagner des récompenses pendant 28 jours. -One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. +Il convient également de choisir judicieusement un indexeur. Si vous choisissez un indexeur qui n'est pas digne de confiance ou qui ne fait pas du bon travail, vous voudrez annuler la délégation, ce qui signifie que vous perdrez beaucoup d'occasions de gagner des récompenses, ce qui peut être tout aussi grave que de brûler des TRG.
- ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + !Délégation débondage](/img/Delegation-Unbonding.png) _Notez la commission de 0,5% dans l'interface utilisateur de la + délégation, ainsi que la période de débondage de 28 jours. de 28 jours
-### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Choisir un indexeur digne de confiance avec une rémunération équitable pour les délégués -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Il s'agit d'une partie importante à comprendre. Discutons d'abord de trois valeurs très importantes, qui sont les paramètres de délégation. -Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. +Réduction de la récompense d'indexation - La réduction de la récompense d'indexation est la part des récompenses que l'indexeur gardera pour lui. Cela signifie que si elle est fixée à 100 %, en tant que délégué, vous obtiendrez 0 récompense d'indexation. Si vous voyez 80 % dans l'interface utilisateur, cela signifie qu'en tant que délégué, vous recevrez 20 %. Remarque importante : au début du réseau, les récompenses d'indexation représenteront la majorité des récompenses.
- ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *Le meilleur indexeur donne aux délégués 90 % des récompenses. Le + celui du milieu donne 20 % aux délégués. Celui du bas donne aux délégués environ 83 %.*
-- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Réduction des frais de requête – Cela fonctionne exactement comme la réduction des récompenses d’indexation. Cependant, cela concerne spécifiquement les retours sur les frais de requête collectés par l’indexeur. Il convient de noter qu’au démarrage du réseau, les retours sur les frais de requête seront très faibles par rapport à la récompense d’indexation. Il est recommandé de prêter attention au réseau pour déterminer quand les frais de requête sur le réseau commenceront à être plus importants. -As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. +Comme vous pouvez le constater, le choix du bon indexeur demande beaucoup de réflexion. C'est pourquoi nous vous recommandons fortement d'explorer The Graph Discord pour déterminer qui sont les indexeurs ayant la meilleure réputation sociale et technique, afin de récompenser les délégués de manière cohérente. De nombreux indexeurs sont très actifs sur Discord et se feront un plaisir de répondre à vos questions. Beaucoup d'entre eux indexent depuis des mois sur le testnet et font de leur mieux pour aider les délégués à obtenir un bon retour, car cela améliore la santé et le succès du réseau. -### Calculating Delegators expected return +### Calcul du rendement attendu des délégués -A Delegator has to consider a lot of factors when determining the return. These include: +Le délégué doit prendre en compte un grand nombre de facteurs pour déterminer le rendement. Ces facteurs sont les suivants: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- Un délégué technique peut également examiner la capacité de l'indexeur à utiliser les jetons délégués dont il dispose. Si un indexeur n'alloue pas tous les jetons disponibles, il ne réalise pas le profit maximum qu'il pourrait réaliser pour lui-même ou pour ses délégués. +- À l'heure actuelle, sur le réseau, un indexeur peut choisir de clôturer une allocation et de collecter des récompenses à tout moment entre 1 et 28 jours. Il est donc possible qu’un indexeur ait beaucoup de récompenses qu’il n’a pas encore collectées et que ses récompenses totales soient donc faibles. Ceci doit être pris en considération dès les premiers jours. -### Considering the query fee cut and indexing fee cut +### Considérant la réduction des frais d'interrogation et la réduction des frais d'indexation -As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: +Comme décrit dans les sections précédentes, vous devez choisir un indexeur qui est transparent et honnête dans la fixation de sa réduction des frais de requête et d'indexation. Un délégué doit également examiner le temps de refroidissement des paramètres pour voir de combien de temps il dispose. Après cela, il est assez simple de calculer le montant des récompenses que les délégués reçoivent. La formule est la suivante : -![Delegation Image 3](/img/Delegation-Reward-Formula.png) +![Délégation Image 3](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### Compte tenu du pool de délégation de l'indexeur -Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: +Une autre chose qu'un délégant doit prendre en compte est la proportion du pool de délégation qu'il possède. Toutes les récompenses de délégation sont partagées équitablement, avec un simple rééquilibrage du pool déterminé par le montant que le délégant a déposé dans le pool. Cela donne au délégant une part du pool : -![Share formula](/img/Share-Forumla.png) +![Formule de partage](/img/Share-Forumla.png) -Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. +En utilisant cette formule, nous pouvons voir qu'il est en fait possible pour un indexeur qui n'offre que 20 % aux délégants d'offrir aux délégants une récompense encore meilleure qu'un indexeur qui donne 90 % aux délégants. -A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. +Un délégué peut donc faire le calcul pour déterminer que l'indexeur qui offre 20 % aux délégués propose un meilleur rendement. -### Considering the delegation capacity +### Compte tenu de la capacité de délégation -Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. +Une autre chose à considérer est la capacité de délégation. Actuellement, le ratio de délégation est fixé à 16. Cela signifie que si un indexeur a mis en jeu 1 000 000 GRT, sa capacité de délégation est de 16 000 000 GRT de jetons délégués qu'il peut utiliser dans le protocole. Tout jeton délégué dépassant ce montant diluera toutes les récompenses du délégué. -Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. +Imaginons qu'un indexeur se voit déléguer 100 000 000 GRT et que sa capacité ne soit que de 16 000 000 GRT. Cela signifie que 84 000 000 GRT ne sont pas utilisés pour gagner des tokens. Et tous les délégués, ainsi que l'indexeur, gagnent beaucoup moins de récompenses qu'ils ne le pourraient. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Par conséquent, un délégant doit toujours prendre en compte la capacité de délégation d’un indexeur et en tenir compte dans sa prise de décision. -## Delegator FAQs and Bugs +## FAQ et bugs pour les délégants -### MetaMask "Pending Transaction" Bug +### Bug MetaMask « Transaction en attente » -**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** +**Lorsque j'essaie de déléguer ma transaction dans MetaMask, elle apparaît comme « En attente » ou « En file d'attente » plus longtemps que prévu. Que dois-je faire?** -At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. +Parfois, les tentatives de délégation aux indexeurs via MetaMask peuvent échouer et entraîner des périodes prolongées de tentatives de transactions "en attente" ou "en fichier d'attente". Par exemple, un utilisateur peut tenter de téléguer des frais d'essence insuffisants par rapport aux prix actuels, ce qui fait que la tentative de transaction s'affiche comme "en attente" dans son portefeuille MetaMask pendant plus de 15 minutes. Dans ce cas, l'utilisateur peut tenter d'effectuer des transactions ultérieures, mais celles-ci ne seront pas traitées tant que la transaction initiale n'aura pas été minée, car les transactions pour une adresse doivent être traitées dans l'ordre. Dans ce cas, ces transactions peuvent être annulées dans MetaMask, mais les tentatives de transactions accumuleront des frais de gaz sans aucune garantie que les tentatives suivantes aboutissent. Une solution plus simple consiste à redémarrer le navigateur (par exemple en utilisant "abort:restart" dans la barre d'adresse), ce qui annulera toutes les tentatives précédentes sans que le gaz ne soit soustrait du portefeuille. Plusieurs utilisateurs qui ont rencontré ce problème ont signalé des transactions réussies après avoir redémarré leur navigateur et tenté de déléguer. -## Video guide for the network UI +## Guide vidéo pour l'interface utilisateur du réseau -This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. +Ce guide présente un examen complet de ce document et explique comment tenir compte de tous les éléments qu'il contient lors de l'utilisation de l'interface utilisateur. diff --git a/website/pages/fr/network/developing.mdx b/website/pages/fr/network/developing.mdx index e40701fe1849..1b47b0ae079e 100644 --- a/website/pages/fr/network/developing.mdx +++ b/website/pages/fr/network/developing.mdx @@ -1,12 +1,12 @@ --- -title: Développement +title: Le Développement --- Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. -## Flux du cycle de vie des subgraphes +## Flux du cycle de vie des subgraphs -Subgraphs deployed to the network have a defined lifecycle. +Les subgraphs déployés sur le réseau ont un cycle de vie défini. ### Build locally diff --git a/website/pages/fr/network/explorer.mdx b/website/pages/fr/network/explorer.mdx index 8a826eeb1b84..7d9abbc8eb3e 100644 --- a/website/pages/fr/network/explorer.mdx +++ b/website/pages/fr/network/explorer.mdx @@ -1,30 +1,30 @@ --- -title: Graph Explorer +title: Explorateur Graph --- -Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): +Bienvenue sur le Graph Explorer, ou comme nous aimons l'appeler, votre portail décentralisé dans le monde des subgraphs et des données de réseau. 👩🏽‍🚀 L'Explorateur de graphs se compose de plusieurs parties où vous pouvez interagir avec d'autres développeurs de subgraphs, développeurs de dapps, curateurs, indexeurs et délégués. Pour un aperçu général de l'explorateur de graphs, regardez la vidéo ci-dessous (ou continuez à lire ci-dessous) : -## Subgraphes +## Subgraphs -First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. +Tout d'abord, si vous venez de terminer le déploiement et la publication de votre subgraph dans Subgraph Studio, l'onglet Subgraphs en haut de la barre de navigation est l'endroit où vous pouvez afficher vos propres subgraphs terminés (et ceux des autres) sur le réseau décentralisé. Ici, vous pourrez trouver le subgraph exact que vous recherchez en fonction de la date de création, du montant du signal ou du nom. ![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) -When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. +Lorsque vous cliquerez sur un subgraph, vous pourrez tester des requêtes dans l'aire de jeu et exploiter les détails du réseau pour prendre des décisions éclairées. Vous pourrez également signaler le GRT sur votre propre subgraph ou sur les subgraphs d'autres personnes afin de sensibiliser les indexeurs à son importance et à sa qualité. Ceci est essentiel car le fait de signaler un subgraph incite à l'indexer, ce qui signifie qu'il fera surface sur le réseau pour éventuellement répondre à des requêtes. ![Explorer Image 2](/img/Subgraph-Details.png) -On each subgraph’s dedicated page, several details are surfaced. These include: +Sur la page dédiée à chaque subgraph, plusieurs détails font surface. Il s'agit notamment de: -- Signal/Un-signal on subgraphs -- View more details such as charts, current deployment ID, and other metadata -- Switch versions to explore past iterations of the subgraph -- Query subgraphs via GraphQL +- Signal/Un-signal sur les subgraphs +- Afficher plus de détails tels que des graphs, l'ID de déploiement actuel et d'autres métadonnées +- Passer d'une version à l'autre pour explorer les itérations passées du subgraph +- Interroger les subgraphs via GraphQL - Test subgraphs in the playground -- View the Indexers that are indexing on a certain subgraph -- Subgraph stats (allocations, Curators, etc) +- Afficher les indexeurs qui indexent sur un certain subgraph +- Statistiques du subgraph (allocations, conservateurs, etc.) - View the entity who published the subgraph ![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) diff --git a/website/pages/fr/network/indexing.mdx b/website/pages/fr/network/indexing.mdx index 0210b9447fd9..8cb1418aa24d 100644 --- a/website/pages/fr/network/indexing.mdx +++ b/website/pages/fr/network/indexing.mdx @@ -1,8 +1,8 @@ --- -title: Indexing +title: Indexage --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -10,21 +10,21 @@ Indexers select subgraphs to index based on the subgraph’s curation signal, wh -## FAQ +## Questions fréquemment posées ### What is the minimum stake required to be an Indexer on the network? -The minimum stake for an Indexer is currently set to 100K GRT. +La mise minimale pour un indexeur est actuellement fixée à 100 000 GRT. -### What are the revenue streams for an Indexer? +### Quelles sont les sources de revenus pour un indexeur ? -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. +**Remises sur les frais de requête** : paiements pour le traitement des requêtes sur le réseau. Ces paiements sont acheminés via des canaux étatiques entre un indexeur et une passerelle. Chaque requête de requête provenant d'une passerelle contient un paiement et la réponse correspondante une preuve de validité du résultat de la requête. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Récompenses d'indexation** : générées via une inflation annuelle de 3 % à l'échelle du protocole, les récompenses d'indexation sont distribuées aux indexeurs qui indexent les déploiements de subgraphs pour le réseau. -### How are indexing rewards distributed? +### Comment sont distribuées les récompenses d’indexation ? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Les récompenses de l'indexation proviennent de l'inflation du protocole qui est fixée à 3 % par an. Ils sont répartis entre les subraphs en fonction de la proportion de tous les signaux de curation sur chacun, puis distribués proportionnellement aux indexeurs en fonction de leur participation allouée sur ce subgraph. **Une allocation doit être clôturée avec une preuve d'indexation (POI) valide et répondant aux normes fixées par la charte d'arbitrage afin d'être éligible aux récompenses.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -147,7 +147,7 @@ Note: To support agile scaling, it is recommended that query and indexing concer > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. -#### Nœud The Graph +#### Nœud de The Graph | Port | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | @@ -295,7 +295,7 @@ kubectl config use-context $(kubectl config get-contexts --output='name' Deploy all resources with `kubectl apply -k $dir`. -### Nœud The Graph +### Nœud de The Graph [Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. @@ -338,7 +338,7 @@ cargo run -p graph-node --release -- \ #### Getting started using Docker -#### Prerequisites +#### Conditions préalables - **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. @@ -457,7 +457,7 @@ docker run -p 18000:8000 -it indexer-agent:latest ... See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### Utilisation +#### Usage > **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). @@ -529,7 +529,7 @@ graph indexer status The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Utilisation +#### Usage The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -739,7 +739,7 @@ Example query costing using the above model: #### Applying the cost model -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. +Les modèles de coûts sont appliqués via la CLI Indexer, qui les transmet à l'API de gestion de l'indexeur de l'agent Indexer pour les stocker dans la base de données. Le service d'indexation les récupérera ensuite et fournira les modèles de coûts aux passerelles chaque fois qu'elles les demanderont. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' @@ -762,7 +762,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/networ 3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Sous Environnement, sélectionnez `Injected Web3` et sous `Compte` sélectionnez votre adresse d'indexeur. 5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. @@ -776,7 +776,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/networ 3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Sous Environnement, sélectionnez `Injected Web3` et sous `Compte` sélectionnez votre adresse d'indexeur. 5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/fr/network/overview.mdx b/website/pages/fr/network/overview.mdx index b0d9d932dc9d..a09b708c81d9 100644 --- a/website/pages/fr/network/overview.mdx +++ b/website/pages/fr/network/overview.mdx @@ -2,11 +2,11 @@ title: Network Overview --- -Le réseau The Graph est un protocole d'indexation décentralisé qui a pour but d'organiser les données de la blockchain. Les applications utilisent alors GraphQL pour interroger des API ouvertes appelées subgraphes, afin de récupérer les données indexées sur le réseau. Par conséquent, les développeurs peuvent créer des applications qui ne nécessitent pas de serveur et qui fonctionnent entièrement sur l'infrastructure publique. +Le réseau The Graph est un protocole d'indexation décentralisé qui a pour but d'organiser les données de la blockchain. Les applications utilisent alors GraphQL pour interroger des API ouvertes appelées subgraphs, afin de récupérer les données indexées sur le réseau. Par conséquent, les développeurs peuvent créer des applications qui ne nécessitent pas de serveur et qui fonctionnent entièrement sur l'infrastructure publique. -## Vue d'ensemble +## Aperçu -Le réseau The Graph se compose d'indexeurs, de curateurs et de délegateurs qui fournissent des services au réseau et servent des données aux applications Web3. Les consommateurs, quant à eux, utilisent les applications et consomment les données. +Le réseau Graph se compose d'indexeurs, de conservateurs et de délégués qui fournissent des services au réseau et servent des données aux applications Web3. Les clients utilisent les applications et consomment les données. ![Token Economics](/img/Network-roles@2x.png) diff --git a/website/pages/fr/new-chain-integration.mdx b/website/pages/fr/new-chain-integration.mdx index c5934efa6f87..55784afce054 100644 --- a/website/pages/fr/new-chain-integration.mdx +++ b/website/pages/fr/new-chain-integration.mdx @@ -11,21 +11,21 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +Les nouveaux intégrateurs de chaîne EVM peuvent également envisager l'approche basée sur Firehose, compte tenu des avantages des sous-flux et de ses capacités d'indexation parallélisées massives. La prise en charge des deux permet aux développeurs de choisir entre la création de sous-flux ou de subgraphs pour la nouvelle chaîne. > **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/fr/operating-graph-node.mdx b/website/pages/fr/operating-graph-node.mdx index a191f347021b..677f17908d8e 100644 --- a/website/pages/fr/operating-graph-node.mdx +++ b/website/pages/fr/operating-graph-node.mdx @@ -1,36 +1,36 @@ --- -title: Operating Graph Node +title: Nœud de graph opérationnel --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Node est le composant qui indexe les subgraphs et rend les données résultantes disponibles pour interrogation via une API GraphQL. En tant que tel, il est au cœur de la pile de l’indexeur, et le bon fonctionnement de Graph Node est crucial pour exécuter un indexeur réussi. -This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). +Cela fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les indexeurs. Une documentation et des instructions détaillées sont disponibles dans le [référentiel Graph Node](https://github.com/graphprotocol/graph-node). -## Nœud The Graph +## Nœud de The Graph -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +Cela fournit un aperçu contextuel de Graph Node et de certaines des options les plus avancées disponibles pour les indexeurs. Une documentation et des instructions détaillées sont disponibles dans le [référentiel Graph Node](https://github.com/graphprotocol/graph-node). -Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Node (et l'ensemble de la pile d'indexation) peut être exécuté sur du métal nu ou dans un environnement cloud. Cette flexibilité du composant d’indexation central est cruciale pour la robustesse du Graph Protocol. De même, Graph Node peut être [créé à partir des sources](https://github.com/graphprotocol/graph-node), ou les indexeurs peuvent utiliser l'une des [images Docker fournies](https://hub.docker.com/r/graphprotocol/graph-node). ### PostgreSQL database -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +Le magasin principal du nœud de graph, c'est là que les données des sous-graphes sont stockées, ainsi que les métadonnées sur les subgraphs et les données réseau indépendantes des subgraphs telles que le cache de blocs et le cache eth_call. -### Network clients +### Clients réseau In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Les métadonnées de déploiement de subgraphs sont stockées sur le réseau IPFS. Le nœud Graph accède principalement au nœud IPFS pendant le déploiement du sous-graphe pour récupérer le manifeste du subgraph et tous les fichiers liés. Les indexeurs de réseau n'ont pas besoin d'héberger leur propre nœud IPFS. Un nœud IPFS pour le réseau est hébergé sur https://ipfs.network.thegraph.com. -### Prometheus metrics server +### Serveur de métriques Prometheus -To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. +Pour activer la surveillance et la création de rapports, Graph Node peut éventuellement enregistrer les métriques sur un serveur de métriques Prometheus. ### Getting started from source @@ -71,11 +71,11 @@ cargo run -p graph-node --release -- \ ### Getting started with Kubernetes -A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +Un exemple complet de configuration de Kubernetes est disponible dans le [dépôt de l'indexeur](https://github.com/graphprotocol/indexer/tree/main/k8s). ### Ports -When it is running Graph Node exposes the following ports: +Lorsqu'il est en cours d'exécution, Graph Node expose les ports suivants : | Port | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | @@ -85,21 +85,21 @@ When it is running Graph Node exposes the following ports: | 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | | 8040 | Prometheus metrics | /metrics | --metrics-port | - | -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. +> **Important** : Soyez prudent lorsque vous exposez les ports publiquement : les **ports d'administration** doivent rester verrouillés. Cela inclut le point de terminaison Graph Node JSON-RPC. -## Advanced Graph Node configuration +## Configuration avancée du nœud graph -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +Dans sa forme la plus simple, Graph Node peut être utilisé avec une seule instance de Graph Node, une seule base de données PostgreSQL, un nœud IPFS et les clients réseau selon les besoins des subgraphs à indexer. -This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. +Cette configuration peut être mise à l'échelle horizontalement, en ajoutant plusieurs nœuds graphs et plusieurs bases de données pour prendre en charge ces nœuds graphs. Les utilisateurs avancés voudront peut-être profiter de certaines des capacités de mise à l'échelle horizontale de Graph Node, ainsi que de certaines des options de configuration les plus avancées, via le fichier `config.toml` et les variables d'environnement de Graph Node. ### `config.toml` -A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. +Un fichier de configuration [TOML](https://toml.io/en/) peut être utilisé pour définir des configurations plus complexes que celles exposées dans la CLI. L'emplacement du fichier est transmis avec le commutateur de ligne de commande --config. -> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. +> Lors de l'utilisation d'un fichier de configuration, il n'est pas possible d'utiliser les options --postgres-url, --postgres-secondary-hosts et --postgres-host-weights. -A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: +Un fichier `config.toml` minimal peut être fourni ; le fichier suivant équivaut à l'utilisation de l'option de ligne de commande --postgres-url : ```toml [store] @@ -107,22 +107,22 @@ A minimal `config.toml` file can be provided; the following file is equivalent t connection="<.. postgres-url argument ..>" [deployment] [[deployment.rule]] -indexers = [ "<.. list of all indexing nodes ..>" ] +indexers = [ "<.. liste de tous les nœuds d'indexation ..>" ] ``` -Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +La documentation complète de `config.toml` est disponible dans la [documentation Graph Node](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). #### Multiple Graph Nodes -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestor), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +L'indexation de Graph Node peut évoluer horizontalement, en exécutant plusieurs instances de Graph Node pour diviser l'indexation et les requêtes sur différents nœuds. Cela peut être fait simplement en exécutant des nœuds graphs configurés avec un `node_id` différent au démarrage (par exemple dans le fichier Docker Compose), qui peut ensuite être utilisé dans le fichier `config.toml`. pour spécifier des [nœuds de requête dédiés](#dedicated-query-nodes), des [ingesteurs de blocs](#dedicated-block-ingestor) et diviser les subgraphs entre les nœuds avec des [règles de déploiement](#deployment-rules). -> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. +> Notez que plusieurs nœuds de graph peuvent tous être configurés pour utiliser la même base de données, qui elle-même peut être mise à l'échelle horizontalement via le partitionnement. -#### Deployment rules +#### Règles de déploiement -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Étant donné plusieurs nœuds de graph, il est nécessaire de gérer le déploiement de nouveaux subgraphs afin qu'un même subgraph ne soit pas indexé par deux nœuds différents, ce qui entraînerait des collisions. Cela peut être fait à l'aide de règles de déploiement, qui peuvent également spécifier dans quelle `partition` les données d'un subgraph doivent être stockées, si la partition de base de données est utilisée. Les règles de déploiement peuvent correspondre au nom du subgraph et au réseau que le déploiement indexe afin de prendre une décision. -Example deployment rule configuration: +Exemple de configuration de règle de déploiement : ```toml [deployment] @@ -150,51 +150,51 @@ indexers = [ ] ``` -Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +En savoir plus sur les règles de déploiement [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). -#### Dedicated query nodes +#### Nœuds de requête dédiés -Nodes can be configured to explicitly be query nodes by including the following in the configuration file: +Les nœuds peuvent être configurés pour être explicitement des nœuds de requête en incluant les éléments suivants dans le fichier de configuration : ```toml [general] query = "" ``` -Any node whose --node-id matches the regular expression will be set up to only respond to queries. +Tout nœud dont --node-id correspond à l'expression régulière sera configuré pour répondre uniquement aux requêtes. -#### Database scaling via sharding +#### Mise à l'échelle de la base de données via le partitionnement -For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. +Pour la plupart des cas d'utilisation, une seule base de données Postgres suffit pour prendre en charge une instance de nœud graph. Lorsqu'une instance de nœud graph dépasse une seule base de données Postgres, il est possible de diviser le stockage des données de nœud graph sur plusieurs bases de données Postgres. Toutes les bases de données forment ensemble le magasin de l’instance de nœud graph. Chaque base de données individuelle est appelée une partition. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Les fragments peuvent être utilisés pour répartir les déploiements de subgraphs sur plusieurs bases de données, et peuvent également être utilisés pour utiliser des réplicas afin de répartir la charge des requêtes entre les bases de données. Cela inclut la configuration du nombre de connexions de base de données disponibles que chaque `nœud de graph` doit conserver dans son pool de connexions pour chaque base de données, ce qui devient de plus en plus important à mesure que de plus en plus de subgraphs sont indexés. -Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. +Le partage devient utile lorsque votre base de données existante ne peut pas suivre la charge que Graph Node lui impose et lorsqu'il n'est plus possible d'augmenter la taille de la base de données. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> Il est généralement préférable de créer une base de données unique aussi grande que possible avant de commencer avec des fragments. Une exception est lorsque le trafic des requêtes est réparti de manière très inégale entre les subgraphs ; dans ces situations, cela peut être considérablement utile si les subgraphs à volume élevé sont conservés dans une partition et tout le reste dans une autre, car cette configuration rend plus probable que les données des subgraphs à volume élevé restent dans le cache interne de la base de données et ne le font pas. sont remplacés par des données qui ne sont pas autant nécessaires à partir de subgraphs à faible volume. -In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. +En termes de configuration des connexions, commencez par max_connections dans postgresql.conf défini sur 400 (ou peut-être même 200) et regardez les métriques store_connection_wait_time_ms et store_connection_checkout_count Prometheus. Des temps d'attente notables (tout ce qui dépasse 5 ms) indiquent qu'il y a trop peu de connexions disponibles ; des temps d'attente élevés seront également dus au fait que la base de données est très occupée (comme une charge CPU élevée). Cependant, si la base de données semble par ailleurs stable, des temps d'attente élevés indiquent la nécessité d'augmenter le nombre de connexions. Dans la configuration, le nombre de connexions que chaque instance de nœud graph peut utiliser constitue une limite supérieure, et Graph Node ne maintiendra pas les connexions ouvertes s'il n'en a pas besoin. -Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +En savoir plus sur la configuration du magasin [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). -#### Dedicated block ingestion +#### Ingestion de blocs dédiés -If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: +Si plusieurs nœuds sont configurés, il sera nécessaire de spécifier un nœud responsable de l'ingestion de nouveaux blocs, afin que tous les nœuds d'index configurés n'interrogent pas la tête de chaîne. Cela se fait dans le cadre de l'espace de noms `chains`, en spécifiant le `node_id` à utiliser pour l'ingestion de bloc : ```toml [chains] ingestor = "block_ingestor_node" ``` -#### Supporting multiple networks +#### Prise en charge de plusieurs réseaux -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +Le protocole Graph augmente le nombre de réseaux pris en charge pour l'indexation des récompenses, et il existe de nombreux subgraphs indexant des réseaux non pris en charge qu'un indexeur aimerait traiter. Le fichier `config.toml` permet une configuration expressive et flexible de : -- Multiple networks -- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). -- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) +- Plusieurs réseaux +- Plusieurs fournisseurs par réseau (cela peut permettre de répartir la charge entre les fournisseurs, et peut également permettre la configuration de nœuds complets ainsi que de nœuds d'archives, Graph Node préférant les fournisseurs moins chers si une charge de travail donnée le permet). +- Détails supplémentaires sur le fournisseur, tels que les fonctionnalités, l'authentification et le type de fournisseur (pour la prise en charge expérimentale de Firehose) -The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. +La section `[chains]` contrôle les fournisseurs Ethereum auxquels graph-node se connecte et où sont stockés les blocs et autres métadonnées de chaque chaîne. L'exemple suivant configure deux chaînes, mainnet et kovan, où les blocs pour le réseau principal sont stockés dans la partition vip et les blocs pour kovan sont stockés dans la partition principale. La chaîne du mainnet peut utiliser deux fournisseurs différents, alors que kovan n'a qu'un seul fournisseur. ```toml [chains] @@ -210,136 +210,136 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +En savoir plus sur la configuration du fournisseur [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). -### Environment variables +### Variables d'environnement -Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node prend en charge une gamme de variables d'environnement qui peuvent activer des fonctionnalités ou modifier le comportement de Graph Node. Ceux-ci sont documentés [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). -### Continuous deployment +### Déploiement continu -Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. +Les utilisateurs qui utilisent une configuration d'indexation à grande échelle avec une configuration avancée peuvent bénéficier de la gestion de leurs nœuds graph avec Kubernetes. -- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. +- Le dépôt de l'indexeur contient un [exemple de référence Kubernetes](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) est une boîte à outils permettant d'exécuter un indexeur de protocole graph sur Kubernetes géré par GraphOps. Il fournit un ensemble de graph Helm et une CLI pour gérer un déploiement de Graph Node. -### Managing Graph Node +### Gestion du nœud de graph -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Étant donné un nœud de graph en cours d'exécution (ou des nœuds de graph !), le défi consiste alors à gérer les subgraphs déployés sur ces nœuds. Graph Node propose une gamme d'outils pour vous aider à gérer les subgraphs. -#### Journal de bord +#### Logging -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Les journaux de Graph Node peuvent fournir des informations utiles pour le débogage et l'optimisation de Graph Node et de subgraphs spécifiques. Graph Node prend en charge différents niveaux de journalisation via la variable d'environnement `GRAPH_LOG`, avec les niveaux suivants : erreur, avertissement, information, débogage ou trace. -In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). +De plus, définir `GRAPH_LOG_QUERY_TIMING` sur `gql` fournit plus de détails sur la façon dont les requêtes GraphQL sont exécutées (bien que cela génère un grand volume de journaux). #### Monitoring & alerting -Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. +Graph Node fournit les métriques via le point de terminaison Prometheus sur le port 8040 par défaut. Grafana peut ensuite être utilisé pour visualiser ces métriques. -The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +Le référentiel de l'indexeur fournit un [exemple de configuration Grafana](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. +`graphman` est un outil de maintenance pour Graph Node, aidant au diagnostic et à la résolution de différentes tâches quotidiennes et exceptionnelles. -The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. +La commande graphman est incluse dans les conteneurs officiels et vous pouvez docker exec dans votre conteneur graph-node pour l'exécuter. Il nécessite un fichier `config.toml`. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +La documentation complète des commandes `graphman` est disponible dans le référentiel Graph Node. Voir \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) dans le nœud graphique `/docs` -### Working with subgraphs +### Travailler avec des subgraphs -#### Indexing status API +#### API d'état d'indexation -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Disponible sur le port 8030/graphql par défaut, l'API d'état d'indexation expose une gamme de méthodes pour vérifier l'état d'indexation de différents subgraphs, vérifier les preuves d'indexation, inspecter les fonctionnalités des subgraphs et bien plus encore. -The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Le schéma complet est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). -#### Indexing performance +#### Performances d'indexation -There are three separate parts of the indexing process: +Le processus d'indexation comporte trois parties distinctes : -- Fetching events of interest from the provider -- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) -- Writing the resulting data to the store +- Récupération des événements d'intérêt auprès du fournisseur +- Traiter les événements dans l'ordre avec les gestionnaires appropriés (cela peut impliquer d'appeler la chaîne pour connaître l'état et de récupérer les données du magasin) +- Écriture des données résultantes dans le magasin -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +Ces étapes sont pipeline (c’est-à-dire qu’elles peuvent être exécutées en parallèle), mais elles dépendent les unes des autres. Lorsque les subgraphs sont lents à indexer, la cause sous-jacente dépendra du subgraph spécifique. -Common causes of indexing slowness: +Causes courantes de lenteur d’indexation : -- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) -- Making large numbers of `eth_calls` as part of handlers -- A large amount of store interaction during execution -- A large amount of data to save to the store -- A large number of events to process -- Slow database connection time, for crowded nodes -- The provider itself falling behind the chain head -- Slowness in fetching new receipts at the chain head from the provider +- Temps nécessaire pour trouver les événements pertinents de la chaîne (les gestionnaires d'appels en particulier peuvent être lents, étant donné le recours à `trace_filter`) +- Effectuer un grand nombre d'`eth_calls` dans le cadre des gestionnaires +- Une grande quantité d'interactions avec le magasin pendant l'exécution +- Une grande quantité de données à sauvegarder dans le magasin +- Un grand nombre d'événements à traiter +- Temps de connexion à la base de données lent, pour les nœuds encombrés +- Le prestataire lui-même prend du retard sur la tête de la chaîne +- Lenteur dans la récupération des nouvelles recettes en tête de chaîne auprès du prestataire -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Les métriques d’indexation de subgraphs peuvent aider à diagnostiquer la cause première de la lenteur de l’indexation. Dans certains cas, le problème réside dans le subgraph lui-même, mais dans d'autres, des fournisseurs de réseau améliorés, une réduction des conflits de base de données et d'autres améliorations de configuration peuvent améliorer considérablement les performances d'indexation. -#### Failed subgraphs +#### Subgraphs ayant échoué -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +Lors de l'indexation, les subgraphs peuvent échouer s'ils rencontrent des données inattendues, si certains composants ne fonctionnent pas comme prévu ou s'il y a un bogue dans les gestionnaires d'événements ou la configuration. Il existe deux types généraux de pannes : -- Deterministic failures: these are failures which will not be resolved with retries -- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. +- Échecs déterministes : ce sont des échecs qui ne seront pas résolus par de nouvelles tentatives +- Échecs non déterministes : ils peuvent être dus à des problèmes avec le fournisseur ou à une erreur inattendue de Graph Node. Lorsqu'un échec non déterministe se produit, Graph Node réessaiera les gestionnaires défaillants, en reculant au fil du temps. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +Dans certains cas, un échec peut être résolu par l'indexeur (par exemple, si l'erreur est due au fait de ne pas disposer du bon type de fournisseur, l'ajout du fournisseur requis permettra de poursuivre l'indexation). Cependant, dans d'autres cas, une modification du code du subgraph est requise. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Les échecs déterministes sont considérés comme « définitifs », avec une preuve d'indexation générée pour le bloc défaillant, tandis que les échecs non déterministes ne le sont pas, car le subgraph peut réussir à « échouer » et continuer l'indexation. Dans certains cas, l'étiquette non déterministe est incorrecte et le subgraph ne surmontera jamais l'erreur ; ces échecs doivent être signalés en tant que problèmes sur le référentiel Graph Node. -#### Block and call cache +#### Bloquer et appeler le cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Node met en cache certaines données dans le magasin afin d'économiser la récupération auprès du fournisseur. Les blocs sont mis en cache, tout comme les résultats de `eth_calls` (ces derniers étant mis en cache à partir d'un bloc spécifique). Cette mise en cache peut augmenter considérablement la vitesse d'indexation lors de la « resynchronisation » d'un subgraph légèrement modifié. However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. -If a block cache inconsistency is suspected, such as a tx receipt missing event: +Si une incohérence du cache de blocs est suspectée, telle qu'un événement de réception de transmission manquant : -1. `graphman chain list` to find the chain name. -2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. - 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. - 2. If the block matches the provider, then the issue can be debugged directly against the provider. +1. `liste de chaînes graphman` pour trouver le nom de la chaîne. +2. `graphman chain check-blocks par numéro ` vérifiera si le bloc mis en cache correspond au fournisseur et supprimera le bloc du cache si ce n'est pas le cas. + 1. S'il y a une différence, il peut être plus sûr de tronquer tout le cache avec `graphman chain truncate `. + 2. Si le bloc correspond au fournisseur, le problème peut être débogué directement auprès du fournisseur. -#### Querying issues and errors +#### Interroger les problèmes et les erreurs -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Une fois qu'un subgraph a été indexé, les indexeurs peuvent s'attendre à traiter les requêtes via le point de terminaison de requête dédié du subgraph. Si l'indexeur espère traiter un volume de requêtes important, un nœud de requête dédié est recommandé, et en cas de volumes de requêtes très élevés, les indexeurs peuvent souhaiter configurer des fragments de réplique afin que les requêtes n'aient pas d'impact sur le processus d'indexation. However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. -There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. +Il n'existe pas de solution miracle, mais une gamme d'outils permettant de prévenir, de diagnostiquer et de traiter les requêtes lentes. -##### Query caching +##### Mise en cache des requêtes -Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Node met en cache les requêtes GraphQL par défaut, ce qui peut réduire considérablement la charge de la base de données. Cela peut être configuré davantage avec les paramètres `GRAPH_QUERY_CACHE_BLOCKS` et `GRAPH_QUERY_CACHE_MAX_MEM` - pour en savoir plus [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). -##### Analysing queries +##### Analyser les requêtes -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Les requêtes problématiques apparaissent le plus souvent de deux manières. Dans certains cas, les utilisateurs eux-mêmes signalent qu'une requête donnée est lente. Dans ce cas, le défi consiste à diagnostiquer la raison de la lenteur, qu'il s'agisse d'un problème général ou spécifique à ce subgraph ou à cette requête. Et puis bien sûr de le résoudre, si possible. -In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. +Dans d'autres cas, le déclencheur peut être une utilisation élevée de la mémoire sur un nœud de requête, auquel cas le défi consiste d'abord à identifier la requête à l'origine du problème. -Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. +Les indexeurs peuvent utiliser [qlog](https://github.com/graphprotocol/qlog/) pour traiter et résumer les journaux de requêtes de Graph Node. `GRAPH_LOG_QUERY_TIMING` peut également être activé pour aider à identifier et déboguer les requêtes lentes. -Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. +Étant donné une requête lente, les indexeurs disposent de quelques options. Bien entendu, ils peuvent modifier leur modèle de coûts pour augmenter considérablement le coût d’envoi de la requête problématique. Cela peut entraîner une réduction de la fréquence de cette requête. Cependant, cela ne résout souvent pas la cause première du problème. -##### Account-like optimisation +##### Optimisation de type compte -Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) +Les tables de base de données qui stockent les entités semblent généralement se décliner en deux variétés : les tables de type « transaction », où les entités, une fois créées, ne sont jamais mises à jour, c'est-à-dire qu'elles stockent quelque chose qui s'apparente à une liste de transactions financières, et les « de type compte », où les entités sont mis à jour très souvent, c'est-à-dire qu'ils stockent quelque chose comme des comptes financiers qui sont modifiés à chaque fois qu'une transaction est enregistrée. Les tables de type compte se caractérisent par le fait qu'elles contiennent un grand nombre de versions d'entités, mais relativement peu d'entités distinctes. Souvent, dans de tels tableaux, le nombre d'entités distinctes représente 1 % du nombre total de lignes (versions d'entités) -For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. +Pour les tables de type compte, `graph-node` peut générer des requêtes qui tirent parti des détails de la façon dont Postgres finit par stocker les données avec un taux de changement si élevé, à savoir que toutes les versions des blocs récents sont en une petite sous-section du stockage global pour une telle table. -The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. +La commande `graphman stats show indique, pour chaque type/table d'entité dans un déploiement, le nombre d'entités distinctes et le nombre de versions d'entité que chaque table contient. Ces données sont basées sur des estimations internes à Postgres et sont donc nécessairement imprécises et peuvent être erronées d'un ordre de grandeur. Un `-1` dans la colonne `entités` signifie que Postgres estime que toutes les lignes contiennent une entité distincte. In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. Once a table has been determined to be account-like, running `graphman stats account-like .
` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +Pour les subgraphs de type Uniswap, les tables `pair` et `token` sont les meilleurs candidats pour cette optimisation et peuvent avoir un effet considérable sur la charge de la base de données. #### Removing subgraphs -> This is new functionality, which will be available in Graph Node 0.29.x +> Il s'agit d'une nouvelle fonctionnalité qui sera disponible dans Graph Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +À un moment donné, un indexeur souhaitera peut-être supprimer un subgraph donné. Cela peut être facilement fait via `graphman drop`, qui supprime un déploiement et toutes ses données indexées. Le déploiement peut être spécifié sous la forme d'un nom de subgraph, d'un hachage IPFS `Qm..` ou de l'espace de noms de base de données `sgdNNN`. Une documentation supplémentaire est disponible [ici](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). diff --git a/website/pages/fr/publishing/publishing-a-subgraph.mdx b/website/pages/fr/publishing/publishing-a-subgraph.mdx index 102ff5ecf6a5..1a2992b1c93d 100644 --- a/website/pages/fr/publishing/publishing-a-subgraph.mdx +++ b/website/pages/fr/publishing/publishing-a-subgraph.mdx @@ -1,24 +1,24 @@ --- -title: Publication d'un subgraphe sur le réseau décentralisé +title: Publication d'un subgraph sur le réseau décentralisé --- -Une fois que votre subgraphe a été [déployé dans Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), que vous l'avez testé et que vous êtes prêt à le mettre en production, vous pouvez le déployer sur le réseau décentralisé. +Une fois que votre subgraph a été [déployé dans Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), que vous l'avez testé et que vous êtes prêt à le mettre en production, vous pouvez le déployer sur le réseau décentralisé. -La publication d'un subgraphe sur le réseau décentralisé permet aux [curateurs](/network/curating) de commencer à signaler et aux [indexeurs](/network/indexing) de l'indexer. +La publication d'un subgraph sur le réseau décentralisé permet aux [curateurs](/network/curating) de commencer à signaler et aux [indexeurs](/network/indexing) de l'indexer. -Afin d'obtenir de plus amples informations sur la manière de déployer sur le réseau décentralisé, consultez cette [vidéo](https://youtu.be/HfDgC2oNnwo?t=580). + Vous pouvez trouver la liste des réseaux supportés [ici](/developing/supported-networks). -## Publishing a subgraph +## Publier un subgraph -Les subgraphes peuvent être publiés sur le réseau décentralisé directement depuis le tableau de bord du subgraphe Studio en cliquant sur le bouton **Publier**. Une fois qu'un subgraphe est publié, il sera disponible dans l'[explorateur The Graph](https://thegraph.com/explorer/). +Les subgraphs peuvent être publiés sur le réseau décentralisé directement depuis le tableau de bord du subgraph Studio en cliquant sur le bouton **Publier**. Une fois qu'un subgraph est publié, il sera disponible dans l'[explorateur The Graph](https://thegraph.com/explorer/). - Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. - Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). -- Lors de la publication d'une nouvelle version pour un subgraphe existant, les mêmes règles que ci-dessus s'appliquent. +- Lors de la publication d'une nouvelle version pour un subgraph existant, les mêmes règles que ci-dessus s'appliquent. ## Curating your subgraph @@ -30,4 +30,4 @@ Subgraph Studio enables you to be the first to curate your subgraph by adding GR ## Updating metadata for a published subgraph -Une fois que votre subgraphe a été publié sur le réseau décentralisé, vous pouvez modifier les métadonnées à tout moment en effectuant la mise à jour dans le tableau de bord Subgraph Studio du subgraphe. Après avoir enregistré les modifications et publié vos mises à jour sur le réseau, elles seront reflétées dans l'Explorateur The Graph. Cela ne créera pas de nouvelle version, puisque votre déploiement n'a pas changé. +Une fois que votre subgraph a été publié sur le réseau décentralisé, vous pouvez modifier les métadonnées à tout moment en effectuant la mise à jour dans le tableau de bord Subgraph Studio du subgraphe. Après avoir enregistré les modifications et publié vos mises à jour sur le réseau, elles seront reflétées dans l'Explorateur The Graph. Cela ne créera pas de nouvelle version, puisque votre déploiement n'a pas changé. diff --git a/website/pages/fr/querying/distributed-systems.mdx b/website/pages/fr/querying/distributed-systems.mdx index 85337206bfd3..eb1f30013c17 100644 --- a/website/pages/fr/querying/distributed-systems.mdx +++ b/website/pages/fr/querying/distributed-systems.mdx @@ -8,24 +8,24 @@ Connections fail. Requests arrive out of order. Different computers with out-of- Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. -1. Indexer ingests block 8 -2. Request served to the client for block 8 +1. L'indexeur ingère le bloc 8 +2. Demande transmise au client pour le bloc 8 3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +4. L'indexeur ingère le bloc 10A +5. Demande transmise au client pour le bloc 10A +6. L'indexeur détecte la réorganisation à 10B et annule 10A +7. Demande transmise au client pour le bloc 9 +8. L'indexeur ingère le bloc 10B +9. L'indexeur ingère le bloc 11 +10. Demande transmise au client pour le bloc 11 -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +Du point de vue de l'indexeur, les choses progressent logiquement. Le temps avance, bien que nous ayons dû revenir en arrière sur un bloc oncle et faire avancer le bloc faisant l'objet d'un consensus par-dessus. En cours de route, l'indexeur répond aux demandes en utilisant le dernier état dont il a connaissance à ce moment-là. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +Mais du point de vue du client, les choses semblent chaotiques. Le client observe que les réponses concernaient les blocs 8, 10, 9 et 11 dans cet ordre. Nous appelons cela le problème de « l’oscillation du bloc ». Lorsqu'un client subit une oscillation de blocage, les données peuvent sembler se contredire au fil du temps. La situation s'aggrave lorsque l'on considère que les indexeurs n'ingèrent pas tous les derniers blocs simultanément et que vos requêtes peuvent être acheminées vers plusieurs indexeurs. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +Il est de la responsabilité du client et du serveur de travailler ensemble pour fournir des données cohérentes à l'utilisateur. Différentes approches doivent être utilisées en fonction de la cohérence souhaitée, car il n’existe pas de programme adapté à chaque problème. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Il est difficile de raisonner sur les implications des systèmes distribués, mais la solution ne l'est pas nécessairement ! Nous avons établi des API et des modèles pour vous aider à naviguer dans certains cas d'utilisation courants. Les exemples suivants illustrent ces modèles tout en éludant les détails requis par le code de production (comme la gestion des erreurs et l'annulation) afin de ne pas obscurcir les idées principales. ## Polling for updated data @@ -78,7 +78,7 @@ async function updateProtocolPaused() { Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Ici, nous utiliserons l'argument `block: { hash: $blockHash }` pour épingler tous nos résultats dans le même bloc. ```javascript /// Gets a list of domain names from a single block using pagination diff --git a/website/pages/fr/querying/graphql-api.mdx b/website/pages/fr/querying/graphql-api.mdx index 89cda460d58f..7dca36ee0f9e 100644 --- a/website/pages/fr/querying/graphql-api.mdx +++ b/website/pages/fr/querying/graphql-api.mdx @@ -2,13 +2,13 @@ title: GraphQL API --- -This guide explains the GraphQL Query API that is used for the Graph Protocol. +Ce guide explique l'API de requête GraphQL utilisée pour le protocole Graph. ## Queries In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. -### Examples +### Exemples Query for a single `Token` entity defined in your schema: @@ -38,7 +38,7 @@ Query all `Token` entities: When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. -#### Example +#### Exemple ```graphql { @@ -121,7 +121,7 @@ The first time, it would send the query with `lastID = ""`, and for subsequent r ### Filtering -You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. +Vous pouvez utiliser le paramètre `where` dans vos requêtes pour filtrer différentes propriétés. Vous pouvez filtrer sur plusieurs valeurs dans le paramètre `where`. #### Example using `where` @@ -282,7 +282,7 @@ The result of such a query will not change over time, i.e., querying at a certai Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. -#### Example +#### Exemple ```graphql { @@ -298,7 +298,7 @@ Note that the current implementation is still subject to certain limitations tha This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. -#### Example +#### Exemple ```graphql { @@ -329,7 +329,7 @@ Fulltext search operators: | `<->` | `Follow by` | Specify the distance between two words. | | `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | -#### Examples +#### Exemples Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. @@ -376,9 +376,9 @@ Graph Node implements [specification-based](https://spec.graphql.org/October2021 ## Schema -The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +Le schéma de votre source de données, c'est-à-dire les types d'entités, les valeurs et les relations disponibles pour l'interrogation, est défini par le [Langage de définition d'interface GraphQL (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. +Les schémas GraphQL définissent généralement des types racine pour les `requêtes`, les `abonnements` et les `mutations`. Le graphe ne prend en charge que les `requêtes`. Le type racine `Query` pour votre subgraph est automatiquement généré à partir du schéma GraphQL inclus dans le manifeste de votre subgraph. > **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. @@ -406,14 +406,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +Si un bloc est fourni, les métadonnées sont celles de ce bloc, sinon le dernier bloc indexé est utilisé. S'il est fourni, le bloc doit être postérieur au bloc de départ du subgraph et inférieur ou égal au bloc indexé le plus récent. -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` est un identifiant unique, correspondant au CID IPFS du fichier `subgraph.yaml`. -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` fournit des informations sur le dernier bloc (en tenant compte des contraintes de bloc transmises à `_meta`) : -- hash: the hash of the block +- hash : le hash du bloc - number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- timestamp : l'horodatage du bloc, si disponible (ceci n'est actuellement disponible que pour les subgraphs indexant les réseaux EVM) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` est un booléen identifiant si le subgraph a rencontré des erreurs d'indexation au cours d'un bloc passé diff --git a/website/pages/fr/querying/managing-api-keys.mdx b/website/pages/fr/querying/managing-api-keys.mdx index ee7c274bca10..c878d79d19c9 100644 --- a/website/pages/fr/querying/managing-api-keys.mdx +++ b/website/pages/fr/querying/managing-api-keys.mdx @@ -1,12 +1,12 @@ --- -title: Managing your API keys +title: Gérer vos clés API --- -Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. +Que vous soyez un développeur dapp ou un développeur de subgraphs, vous devrez gérer vos clés API. Ceci est important pour que vous puissiez interroger les subgraphs, car les clés API garantissent que les connexions entre les services d'application sont valides et autorisées. Cela inclut l'authentification de l'utilisateur final et de l'appareil utilisant l'application. -The Studio will list out existing API keys, which will give you the ability to manage or delete them. +Le Studio répertoriera les clés API existantes, ce qui vous donnera la possibilité de les gérer ou de les supprimer. -1. The **Overview** section will allow you to: +1. La section **Vue d'ensemble** vous permettra de : - Edit your key name - Regenerate API keys - View the current usage of the API key with stats: diff --git a/website/pages/fr/querying/querying-best-practices.mdx b/website/pages/fr/querying/querying-best-practices.mdx index 7aedc9979470..10d504b7c708 100644 --- a/website/pages/fr/querying/querying-best-practices.mdx +++ b/website/pages/fr/querying/querying-best-practices.mdx @@ -2,15 +2,15 @@ title: Querying Best Practices --- -The Graph provides a decentralized way to query data from blockchains. +Le Graph fournit un moyen décentralisé d’interroger les données des blockchains. -The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. +Les données du réseau Graph sont exposées via une API GraphQL, ce qui facilite l'interrogation des données avec le langage GraphQL. -This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. +Cette page vous guidera à travers les règles essentielles du langage GraphQL et les meilleures pratiques en matière de requêtes GraphQL. --- -## Querying a GraphQL API +## Interroger une API GraphQL ### The anatomy of a GraphQL query @@ -63,7 +63,7 @@ While the list of syntactic do's and don'ts is long, here are the essential rule Failing to follow the above rules will end with an error from the Graph API. -For a complete list of rules with code examples, please look at our GraphQL Validations guide. +Pour une liste complète des règles avec des exemples de code, veuillez consulter notre guide de validations GraphQL. ### Sending a query to a GraphQL API @@ -73,7 +73,7 @@ It means that you can query a GraphQL API using standard `fetch` (natively or vi However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- Gestion des subgraphs inter-chaînes : interrogation à partir de plusieurs subgraphs en une seule requête - [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - Fully typed result @@ -104,7 +104,7 @@ main() More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +Maintenant que nous avons couvert les règles de base de la syntaxe des requêtes GraphQL, examinons maintenant les meilleures pratiques d'écriture de requêtes GraphQL. --- @@ -356,7 +356,7 @@ fragment MyFragment on BigInt { Fragments are defined on specific types and should be used accordingly in queries. -Exemple: +L'exemple: ```graphql query { diff --git a/website/pages/fr/querying/querying-from-an-application.mdx b/website/pages/fr/querying/querying-from-an-application.mdx index 30b6c2264d64..50f6fc2fb140 100644 --- a/website/pages/fr/querying/querying-from-an-application.mdx +++ b/website/pages/fr/querying/querying-from-an-application.mdx @@ -2,7 +2,7 @@ title: Querying from an Application --- -Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: +Une fois qu'un subgraph est déployé dans Subgraph Studio ou dans The Graph Explorer, vous recevrez le point de terminaison de votre API GraphQL qui devrait ressembler à ceci : **Subgraph Studio (testing endpoint)** @@ -11,7 +11,7 @@ Queries (HTTP) https://api.studio.thegraph.com/query/// ``` -**Graph Explorer** +**Explorateur Graph** ```sh Queries (HTTP) @@ -28,7 +28,7 @@ Here are a couple of the more popular GraphQL clients in the ecosystem and how t The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- Gestion des subgraphs inter-chaînes : interrogation à partir de plusieurs subgraphs en une seule requête - [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - Fully typed result diff --git a/website/pages/fr/querying/querying-the-graph.mdx b/website/pages/fr/querying/querying-the-graph.mdx index af9dcaaf2477..0c044b152622 100644 --- a/website/pages/fr/querying/querying-the-graph.mdx +++ b/website/pages/fr/querying/querying-the-graph.mdx @@ -6,7 +6,7 @@ With the subgraph deployed, visit the [Graph Explorer](https://thegraph.com/expl An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. -## Example +## Exemple This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: diff --git a/website/pages/fr/querying/querying-the-hosted-service.mdx b/website/pages/fr/querying/querying-the-hosted-service.mdx index cbccaee1112c..3ba2019be83a 100644 --- a/website/pages/fr/querying/querying-the-hosted-service.mdx +++ b/website/pages/fr/querying/querying-the-hosted-service.mdx @@ -1,28 +1,28 @@ --- -title: Interroger le Service Hébergé +title: Interrogation du Service Hébergé --- -Une fois le subgraphe déployé, visitez le [Service Hébergé](https://thegraph.com/hosted-service/) pour ouvrir une interface [GraphiQL](https://github.com/graphql/graphiql) où vous pouvez explorer l'API GraphQL déployée pour le subgraphe en émettant des requêtes et en visualisant le schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. -Un exemple est fourni ci-dessous, mais veuillez consulter l'[API de requête](/querying/graphql-api) pour une référence complète sur la façon d'interroger les entités du subgraphe. +Un exemple est fourni ci-dessous, mais veuillez consulter l'[API de requête](/querying/graphql-api) pour une référence complète sur la façon d'interroger les entités du subgraph. -## Example +## L'exemple -Cette requête liste tous les compteurs que notre mapping a créés. Comme nous n'en créons qu'un seul, le résultat ne contiendra que notre seul `compteur par défaut` : +Cette requête répertorie tous les compteurs créés par notre mappage. Puisque nous n'en créons qu'un, le résultat ne contiendra qu'un seul `compteur par défaut` : ```graphql { - counters { - id - value + compteurs { + identifiant + valeur } } ``` -## Using The Hosted Service +## Using the hosted service -Le Graph Explorer et son terrain de jeu GraphQL est un moyen utile d'explorer et d'interroger les subgraphes déployés sur le service hébergé. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. -Certaines des principales fonctionnalités sont détaillées ci-dessous : +Certaines des principales caractéristiques sont détaillées ci-dessous : -![Explorer Playground](/img/explorer-playground.png) +![L'Explorer Playground](/img/explorer-playground.png) diff --git a/website/pages/fr/querying/querying-with-python.mdx b/website/pages/fr/querying/querying-with-python.mdx new file mode 100644 index 000000000000..9927754617b2 --- /dev/null +++ b/website/pages/fr/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Démarrage + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/fr/quick-start.mdx b/website/pages/fr/quick-start.mdx new file mode 100644 index 000000000000..936794363ad8 --- /dev/null +++ b/website/pages/fr/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Début rapide +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +Ce guide est rédigé en supposant que vous possédez : + +- Une adresse de smart contract sur le réseau de votre choix +- GRT to curate your subgraph +- A crypto wallet + +## 1. Créez un subgraph sur Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Une fois connecté, vous pouvez commencer par cliquer sur « créer un subgraph ». Sélectionnez le réseau de votre choix et cliquez sur Continuer. + +## 2. Installez la CLI Graph + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +Sur votre machine locale, exécutez l'une des commandes suivantes : + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +## 3. Initialiser votre subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Lorsque vous initialisez votre subgraph, l'outil CLI vous demande les informations suivantes : + +- Protocole : choisissez le protocole à partir duquel votre subgraph indexera les données +- Slug de subgraph : créez un nom pour votre subgraph. Votre slug de subgraph est un identifiant pour votre subgraph. +- Répertoire dans lequel créer le subgraph : choisissez votre répertoire local +- Réseau Ethereum (facultatif) : vous devrez peut-être spécifier à partir de quel réseau compatible EVM votre subgraph indexera les données +- Adresse du contrat : localisez l'adresse du contrat intelligent à partir de laquelle vous souhaitez interroger les données +- ABI : si l'ABI n'est pas renseigné automatiquement, vous devrez le saisir manuellement sous forme de fichier JSON +- Bloc de démarrage : il est suggéré de saisir le bloc de démarrage pour gagner du temps pendant que votre subgraph indexe les données de la blockchain. Vous pouvez localiser le bloc de démarrage en recherchant le bloc dans lequel votre contrat a été déployé. +- Nom du contrat : saisissez le nom de votre contrat +- Indexer les événements de contrat en tant qu'entités : il est suggéré de définir cette valeur sur true car cela ajoutera automatiquement des mappages à votre subgraph pour chaque événement émis +- Ajouter un autre contrat (facultatif) : vous pouvez ajouter un autre contrat + +Initialisez votre subgraph à partir d'un contrat existant en exécutant la commande suivante : + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Rédigez votre subgraph + +Les commandes précédentes créent un subgraph d'échafaudage que vous pouvez utiliser comme point de départ pour construire votre propre subgraph. Lorsque vous apporterez des modifications au subgraph, vous travaillerez principalement avec trois fichiers : + +- Manifest (subgraph.yaml) : le manifeste définit les sources de données que vos subgraphs indexeront. +- Schéma (schema.graphql) - Le schéma GraphQL définit les données que vous souhaitez récupérer du subgraph. +- Mappages AssemblyScript (mapping.ts) - Il s'agit du code qui traduit les données de vos sources de données vers les entités définies dans le schéma. + +Pour plus d'informations sur la façon d'écrire votre subgraph, voir [Création d'un subgraph](/developing/creating-a-subgraph). + +## 5. Déployer vers le Subgraph Studio + +Une fois votre subgraph écrit, exécutez les commandes suivantes : + +```sh +$ graph codegen +$ graph build +``` + +- Authentifiez et déployez votre subgraph. La clé de déploiement se trouve sur la page du subgraph dans Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Testez votre subgraph + +Vous pouvez tester votre subgraph en effectuant une requête type dans la section de l'aire de jeux. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(sous-graphiques : ["Qm..."]) { + nœud + synchronisé + santé + erreur fatale { + message + bloc { + nombre + hacher + } + gestionnaire + } + nonFatalErrors { + message + bloc { + nombre + hacher + } + gestionnaire + } + Chaînes { + réseau + chaîneHeadBlock { + nombre + } + premierBloc { + nombre + } + dernierBloc { + nombre + } + dernierBlocSanté { + nombre + } + } + EntitéCount + } +} +``` + +## 7. Publier votre subgraph sur le réseau décentralisé de The Graph + +Une fois que votre subgraph a été déployé dans le Subgraph Studio, que vous l'avez testé et que vous êtes prêt à le mettre en production, vous pouvez alors le publier sur le réseau décentralisé. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Avant de pouvoir interroger votre subgraph, les indexeurs doivent commencer à servir des requêtes sur celui-ci. Afin de rationaliser ce processus, vous pouvez créer votre propre subgraph à l'aide de GRT. + +Au moment de la rédaction, il est recommandé de créer votre propre subgraph avec 10 000 GRT pour vous assurer qu'il est indexé et disponible pour interrogation dès que possible. + +Pour économiser sur les coûts de gaz, vous pouvez organiser votre subgraph dans la même transaction que celle où vous l'avez publié en sélectionnant ce bouton lorsque vous publiez votre subgraph sur le réseau décentralisé de The Graph : + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Interrogez votre subgraph + +Désormais, vous pouvez interroger votre subgraph en envoyant des requêtes GraphQL à l'URL de requête de votre subgraph, que vous pouvez trouver en cliquant sur le bouton de requête. + +Si vous n'avez pas votre clé API, vous pouvez effectuer une requête à partir de votre dapp via l'URL de requête temporaire gratuite et limitée dans le temps, qui peut être utilisée pour le développement et la mise à l'essai. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/fr/release-notes/assemblyscript-migration-guide.mdx b/website/pages/fr/release-notes/assemblyscript-migration-guide.mdx index 46617e76303a..19878b0ca508 100644 --- a/website/pages/fr/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/fr/release-notes/assemblyscript-migration-guide.mdx @@ -132,7 +132,7 @@ You'll need to rename your duplicate variables if you had variable shadowing. ### Null Comparisons -By doing the upgrade on your subgraph, sometimes you might get errors like these: +En effectuant la mise à niveau sur votre subgraph, vous pouvez parfois obtenir des erreurs comme celles-ci : ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -167,7 +167,7 @@ However this only works in two scenarios: - Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); - Upcasting on class inheritance (subclass → superclass) -Exemples: +Les Exemples: ```typescript // primitive casting @@ -249,7 +249,7 @@ Also we've added a few more static methods in some types to ease casting, they a ### Nullability check with property access -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +Pour utiliser la [fonctionnalité de vérification de nullité](https://www.assemblyscript.org/basics.html#nullability-checks), vous pouvez utiliser soit les instructions `if`, soit l'opérateur ternaire (`?` et `:`) comme ce: ```typescript let something: string | null = 'data' @@ -520,5 +520,5 @@ This changed because of nullability differences between AssemblyScript versions, - Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Lors du décalage d'une petite valeur entière de type `i8`/`u8` ou `i16`/`u16`, seuls les 3 respectivement 4 les plus petits les bits significatifs de la valeur RHS affectent le résultat, de la même manière que le résultat d'un `i32.shl` n'est affecté que par les 5 bits les moins significatifs de la valeur RHS. Exemple : `someI8 << 8` produisait auparavant la valeur `0`, mais produit désormais `someI8` en raison du masquage du RHS comme `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) - Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/pages/fr/release-notes/graphql-validations-migration-guide.mdx b/website/pages/fr/release-notes/graphql-validations-migration-guide.mdx index 0c8bfdbb49fb..86d4f151207b 100644 --- a/website/pages/fr/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/fr/release-notes/graphql-validations-migration-guide.mdx @@ -60,7 +60,7 @@ You can try out queries by sending them to: - `https://api-next.thegraph.com/subgraphs/id/` -ou +ou bien - `https://api-next.thegraph.com/subgraphs/name//` @@ -256,7 +256,7 @@ query { } ``` -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** +\*_Champs en conflit avec des arguments (#OverlappingFieldsCanBeMergedRule)_ ```graphql # Different arguments might lead to different data, diff --git a/website/pages/fr/substreams.mdx b/website/pages/fr/substreams.mdx index 3911bdf4273d..ff1e1abf13e0 100644 --- a/website/pages/fr/substreams.mdx +++ b/website/pages/fr/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams est une nouvelle technologie créée par les développeurs principaux du protocole The Graph. Elle est conçue pour permettre une consommation et un traitement extrêmement rapides des données indexées de la blockchain. Actuellement en version bêta ouverte, substream est disponible pour les tests et le développement sur plusieurs blockchains. +![Substreams Logo](/img/substreams-logo.png) -N'hésitez pas à consulter [la documentation de Substreams](https://substreams.streamingfast.io/) pour en savoir plus et pour commencer à construire des Substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Démarrage + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/fr/sunrise.mdx b/website/pages/fr/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/fr/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/fr/tokenomics.mdx b/website/pages/fr/tokenomics.mdx index 949796a99983..adade52a8925 100644 --- a/website/pages/fr/tokenomics.mdx +++ b/website/pages/fr/tokenomics.mdx @@ -7,11 +7,11 @@ description: The Graph Network is incentivized by powerful tokenomics. Here’s - GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -The Graph is a decentralized protocol that enables easy access to blockchain data. +The Graph est un protocole décentralisé qui facilite l'accès aux données de la blockchain. It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/ha/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ha/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..bdb0053bdc70 100644 --- a/website/pages/ha/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ha/arbitrum/l2-transfer-tools-faq.mdx @@ -2,281 +2,359 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. -## Subgraph Transfer +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### Me zai faru idan ban gama canja wuri na ba a cikin kwanaki bakwai? + +I- Kayan aikin Canja wurin L2 suna amfani da yan qasar inji na Arbitrum don aika saƙo daga L1 zuwa a alama, duk da gada na Arbitrum a GRT. Kuna iya karanta ƙara game da tikitin sake gwadawa a cikin \[Arbitrum docs (https://docs.arbitrum.io/arbos/11-to-12-messaging). + +Lokacin da kuka canja wurin dukiyan ku (subgraph, stake, delegation or curation) zuwa L2, Ana aika sako ta gadar Arbitrum GRT wanda ke halitta da tikitin sake gwadawa a L2. Kayan aikin canja wuri ya haɗa da wasu darajar ETH a cikin ma'amala, wanda ake amfani dashi 1) biya don halitta tikitin da 2) biya gas don aiwatar da tikitin a L2. Duk da Haka, saboda farashin gas na iya bambanta a cikin lokacin har sai tikitin ya shirya don aiwatar wa a L2, mai yiyuwa ne wannan yunƙurin aiwatar da kai ya gaza.- Lokacin da wannan ya faru, gada na Arbitrum zai ci gaba da tikitin sake gwadawa da za'a iya dawowa har zuwa kwanaki bakwai, kuma kowa zai iya dawo da "fansa" tikitin (wanda ke buƙatar walat tare da wasu gada na ETH zuwa Arbitrum). + +Wannan shine abin da muke kira matakin "Tabbatar" a cikin duk kayan aikin canja wuri - zai gudu ta atomatik a mafi yawan lokuta, kamar yadda aiwatar ta atomatik ya fi yawan nasara, amma yana da mahimmanci ku sake dubawa don tabbatar da ta wuce.Idan ba ta yi nasara ba kuma ba a sami nasara sake gwadawa cikin kwanaki bakwai ba, gada Arbitrum za ta watsar da tikitin, kuma kadarorin ku (Subgraph, gungumen azaba, Delegation or curation) za su ɓace kuma ba za a iya dawo dasu baThe Graph cibiya devs suna da tsarin sa ido don gano waɗannan yanayi kuma suyi ƙoƙarin fansar tikitin kafin lokaci ya kure, amma yana da alhakin ku don tabbatar da an cikakken canja wurin ku cikin lokaci.Idan kuna fuskantar matsala don tabbatar da cinikin ku, da fatan za a tuntuɓi ta amfani da \[this form (https://noteforms.com/forms/notionform-12-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) kuma core devs za su kasance a can suna taimaka muku. -## How do I transfer my subgraph? +### Na fara kawo delegation/gargadi/curation da na samu lambar sabon hutu, kuma ban sani me yake nema da shi zuwa L2, me ya sa na iya tabbatar da cewa an gano da shi ne aiki ne? -To transfer your subgraph, you will need to complete the following steps: +Idan ba ka ga talauci akan damuwan da kake bukatar tabbata ga niyyar tabbatar da shi, to a yayin da ya yi tattalin hutu damuwan ya samu wutar L2 da ba a bukatar tabbata aikin ba. Idan kuma ku duba, za ku iya duba idan tattalin delegation, gargadi, curation na dauka a Arbitrum One ta nuni. -1. Initiate the transfer on Ethereum mainnet +Idan akwai lamba na damuwan L1 (wanda za ku iya samu ta bayyana damuwa a lambar damuwa na kwanan damuwa a kofar damuwa na wallet), za ku iya tabbatar da cewa "retryable ticket" da ya dauki bayanin zuwa L2 ya samu sabon hutu a nan: https://retryable-dashboard.arbitrum.io/ - idan auto-redeem ba ya daina, za ku iya kawo lambar damuwa a nan da kuma dauko wallet na ku da aka yi redeem. Kuna da damuwa cewa matakin abubuwan da suka sauko, mai raba suna ganin su gudu, kuma za su iya dauki su kafin su gudu. + +## Canja wurin Subgraph + +### Ta yaya zan canja wurin subgraph? + + + +Don canja wurin subgraph ku, kuna buƙatar cikakken matakai masu zuwa: + +1. Fara canja wuri akan Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm subgraph transfer on Arbitrum\* +3. Tabbatar da canja wurin subgraph akan Arbitrum -4. Finish publishing subgraph on Arbitrum +4. Gama da bugu subgraph akan Arbitrum -5. Update Query URL (recommended) +5. Sabunta URL ɗin tambaya (an shawarta) -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +\*Lura cewa dole ne ka tabbatar da canja wurin a cikin kwanaki bakwai in ba haka ba za a iya rasa subgraph ku.A mafi yawan lokuta, wannan matakin zai gudu ta atomatik, amma ana iya buƙatar tabbatarwa ta hannu idan an sami hauhawar farashin gas akan Arbitrum.Idan akwai wasu batutuwa yayin wannan aikin, za a sami albarkatu don taimakawa: tuntuɓar tallafi a support@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). -## Where should I initiate my transfer from? +### Daga ina zan fara canja wuri na? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Kuna iya fara canja wurin ku daga wurin [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) ko kowane subgraph cikakken bayani. Danna maballin "Transfer Subgraph" a cikin cikakken bayanin subgraph don fara canja wuri. -## How long do I need to wait until my subgraph is transferred +### Har yaushe zan jira har sai an canjawa wurin subgraph dina -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +Lokacin canja wuri yana ɗaukar kusan minti ashirin. Gadar Arbitrum tana aiki a bango don cikakken canja wurin gadan ta atomatik. A wasu lokuta, farashin gas na iya ƙaruwa kuma kuna buƙatar sake tabbatar da ciniki. -## Will my subgraph still be discoverable after I transfer it to L2? +### Shin har yanzu za'a iya gano subgraph na bayan na canja wurin shi zuwa L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Subgraph ɗinku za a iya gano shi kawai akan hanyar cibiyar sadarwar da aka buga zuwa. Alal misali, idan ka subgraph ne a kan Arbitrum Daya, to za ka iya kawai samun shi a Explorer a kan Arbitrum One kuma ba za su iya samun shi a kan Ethereum. Don Allah a fatan za a tabbatar cewa kana da Arbitrum Daya da aka zaɓa a cikin cibiyar sadarwa a saman shafin don tabbatar da cewa kana kan hanyar cibiyar sadarwa daidai. Bayan canja wuri, subgraph L1 zai bayyana kamar yadda aka yanke. -## Does my subgraph need to be published to transfer it? +### Shin yana buƙatar buga subgraph na don canja wurin shi? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +Don cin amfani kayan aikin canja wuri, subgraph naku dole ne a riga an buga shi zuwa Ethereum mainnet kuma dole ne ya sami sigina curation mallakin walat ɗin da ya mallaki subgraph. Idan ba a buga subgraph ku ba, ana ba da shawarar ku kawai ku buga kai tsaye akan Arbitrum Daya- kudaden gas masu alaƙa za su yi ƙasa sosai. Idan kuna son canja wurin subgraph aka buga amma asusun mai shi bai curated wata sigina akansa ba, zaku iya sigina ƙaramin adadin (e.g.1 GRT) daga wannan asusun; tabbatar da zaba sigina "auto-migrating". -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Me zai faru da sigar na Ethereum mainnet akan subgraph bayan na canja wurin zuwa Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +Bayan canja wurin subgraph zuwa Arbitrum, sigar mainnet Ethereum za ta ƙare. Muna ba da shawarar ku sabunta URL ɗin tambayar ku a cikin awanni arba'in da takwas. Duk da Haka, akwai lokacin alheri a wurin da ke kiyaye mainnet URL ɗin ku don a iya sabunta kowane tallafin dapp na ɓangare na uku. -## After I transfer, do I also need to re-publish on Arbitrum? +### Bayan na canja wurin, shin ina kuma buƙatar sake bugawa akan Arbitrum? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Bayan taga canja wurin minti ashirin, kuna buƙatar tabbatar da canja wuri tare da ma'amala a cikin UI don gama canja wurin, amma kayan aikin canja wuri zai shiryar ku ta wannan. Za a ci gaba da samun goyan bayan ƙarshen L1 ɗinku yayin taga canja wuri da lokacin alheri bayan. Ana ƙarfafa ka sabunta ƙarshen ƙarshen lokacin da ya dace da kai. -## Will there be a down-time to my endpoint while re-publishing? +### Shin za a sami ƙarancin lokaci zuwa ƙarshen ƙarshena yayin sake bugawa? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Shin bugu da sigar iri ɗaya ne akan L2 Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Tabbatar da zaɓin Arbitrum Days azaman hanyar cibiyar sadarwar ku da aka buga lokacin bugawa a Studio Subgraph. A cikin Studio, sabon wurin ƙarshe zai kasance wanda ke nuna sabon sigar subgraph. -## Will my subgraph's curation move with my subgraph? +### Shin subgraph na curation zai motsa da subgraph nawa? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Idan kun zaba sigina ƙaura ta atomatik, 100% na curation ku zai matsa tare da subgraph zuwa Arbitrum Daya. Duk sigina curation na subgraph za a canja shi zuwa GRT a lokacin canja wuri, kuma GRT ɗin da ke daidai da sigina curation ɗin ku za a yi amfani da shi don mint sigina akan subgraph L2. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Sauran Curators na iya zaɓar ko za su janye sulusi da murabba'i na GRT, ko kuma su tura shi zuwa L2 zuwa sigina mint akan wannan subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Zan iya matsar da subgraph na zuwa Ethereum mainnet bayan na canja wurin? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Da zarar an canjawa wuri, za a soke sigar subgraph ku na Ethereum mainnet. Idan kuna son komawa zuwa mainnet, kuna buƙatar sake aiki da buga baya zuwa mainnet. Duk da Haka, komawa zuwa Ethereum mainnet yana da ƙarfi sosai saboda za a rarraba ladan indexing gaba ɗaya akan Arbitrum Daya. -## Why do I need bridged ETH to complete my transfer? +### Me yasa nake buƙatar gada ETH don cikakken canja wuri na? -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +Ana biyan kuɗin gas akan Arbitrum Daya ta hanyar amfani da gada ETH (i.e. watau ETH wanda aka gada haɗa zuwa Arbitrum One) Duk da haka, biyan kudi gas ya ragu sosai idan aka kwatanta da Ethereum mainnet. -## Curation Signal +## Delegation -## How do I transfer my curation? +### Ta yaya zan canja wurin delegation na? -To transfer your curation, you will need to complete the following steps: + -1. Initiate signal transfer on Ethereum mainnet +Don canja wurin delegation ku, kuna buƙatar cikakken matakai masu zuwa: -2. Specify an L2 Curator address\* +1. Fara canja wurin delegation akan Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Tabbatar da canja wurin delegation akan Arbitrum -3. Wait 20 minutes for confirmation +\*\*\*\*Dole ne ku tabbatar da ma'amala don cikakken canja wuri delegation akan Arbitrum. Dole ne a cikakken wannan matakin a cikin kwanaki bakwai ko kuma a rasa delegation.A mafi yawan lokuta, wannan matakin zai gudu ta atomatik, amma ana yin buƙatar tabbatarwa ta hannu idan akwai tashin farashin gas akan Arbitrum.Idan akwai wasu batutuwa yayin wannan tsari, za a sami albarkatu don taimakawa: tuntuɓar tallafi a tallafi@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). -\*If necessary - i.e. you are using a contract address. +### Menene zai faru da lada na idan na fara canja wuri tare da buɗe kasafi akan Ethereum mainnet? -## How will I know if the subgraph I curated has moved to L2? +Idan indexer wanda kuke delegating yana ci gaba da aiki akan L1, lokacin da kuka canja wurin zuwa Arbitrum zaku rasa duk wani ladan delegation daga buɗe kasafi akan Ethereum mainnet. Wannan yana nufin cewa za ku rasa lada daga, a mafi yawan, kwanakin ashirin da takwas na ƙarshe. Idan kun sanya lokacin canja wuri daidai bayan indexer ya rufe kasafi za ku iya tabbatar da wannan shine mafi ƙarancin adadin da zai yiwu. Idan kuna da tashar sadarwa tare da indexer(s), yi la'akari da tattauna da su don nemo mafi kyau lokacin canja wurin ku. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Menene zai faru idan Indexer da nake delegate a halin yanzu baya kan Arbitrum One? -## What if I do not wish to move my curation to L2? +Za a kunna kayan aikin canja wurin L2 ne kawai idan Indexer da kuka delegated ya canjawa wuri nasu stake zuwa Arbitrum. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Shin Delegators suna da zaɓi don delegate zuwa wani Indexer? -## How do I know my curation successfully transferred? +Idan kuna son delegate zuwa wani Indexer, zaku iya canja wurin zuwa mai indexer iri ɗaya akan Arbitrum, sannan undelegate kuma ku jira lokacin narkewa. Bayan wannan, zaku iya zaɓar wani Indexer mai aiki don delegate zuwa. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +### Idan na kasa samun Indexer da nake delegating akan L2 fa? -## Can I transfer my curation on more than one subgraph at a time? +Kayan aikin Canja wurin L2 zai gani Indexer ta atomatik wanda kuka delegated a baya. -There is no bulk transfer option at this time. +### Shin zan iya haɗawa da wasa ko 'baza' delegation a kan sabo ko da yawa Indexers maimakon Indexers da ta gaba? -## Indexer Stake +Kayan aikin canja wurin L2 koyaushe zai motsa delegation ku zuwa Indexer iri ɗaya da kuka delegated a baya. Da zarar kun ƙoma zuwa L2, zaku iya undelegate, jira lokacin narkewa, sannan ku yanke shawara idan kuna son raba delegation ku. -## How do I transfer my stake to Arbitrum? +### Shin ina batun lokacin kwantar da hankali ko zan iya janyewa nan da nan bayan amfani da kayan aikin canja wuri delegation L2? -To transfer your stake, you will need to complete the following steps: +Kayan aikin canja wuri yana ba ku damar matsawa zuwa L2 nan da nan. Idan kuna son undelegate za ku jira lokacin narkewa. Duk da Haka, idan Indexer ya canjawa wuri stake din su zuwa L2, zaku iya janye kan Ethereum mainnet nan da nan. -1. Initiate stake transfer on Ethereum mainnet +### Shin za a iya yin barnatar tasiri ga lada na idan ban canja wurin delegation na ba? -2. Wait 20 minutes for confirmation +Ana sa ran cewa duk haɗin sa hannu cibiyar sadarwa zai matsa zuwa Arbitrum One a nan gaba. -3. Confirm stake transfer on Arbitrum +### Tsawon wane lokaci ake ɗauka kafin a cikakken canja wurin delegation na zuwa L2? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Ana buƙatar tabbatarwa na minti ashirin don canja wurin delegation. Lura cewa bayan lokacin minti ashirin, dole ne ku dawo kuma ku cikakken mataki na uku na tsarin canja wuri a cikin kwanaki bakwai.Idan kun kasa yin wannan, delegation ku na iya ɓacewa. Lura cewa a mafi yawan lokuta kayan aikin canja wuri zai cikakken muku wannan matakin ta atomatik.Idan an kaza ƙoƙarin yin-kai, kuna buƙatar cikakken shi da hannu. Idan wata matsala ta taso yayin wannan aikin, kada ku damu, za mu kasance a nan don taimakawa: tuntuɓe mu a support@thegraph.com ko a kunne.[Discord] \(https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Zan iya canja wurin delegation ta idan ina amfani da kwangila na vesting GRT/walat kulle alama? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Ee! Tsarin ya ɗan bambanta saboda kwangila na vesting ba zai iya tura ETH da ake buƙata don biyan L2 gas ba, don haka kuna buƙatar saka shi tukuna. Idan kwangila ku na vesting ba ta cika ba, za ku fara kwangila akan L2 vesting kuma kawai za ku iya canja wurin delegation zuwa wannan kwangila hannun L2. UI akan Explorer zai iya shiryar ku ta wannan tsari lokacin da kuka haɗa zuwa Explorer ta amfani da walat ɗin kulle vesting. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## How much time do I have to confirm my stake transfer to Arbitrum? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### Akwai wani harajin delegation? -## What if I have open allocations? +A'a. Ana ba da alamun da aka karɓa akan L2 zuwa ajali Indexer a madadin ajali Delegator ba tare da caji harajin wakilai ba. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +### Will my unrealized rewards be transferred when I transfer my delegation? -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## How long will it take to transfer my stake? +### Is moving delegations to L2 mandatory? Is there a deadline? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## Do I have to index on Arbitrum before I transfer my stake? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## Can Delegators move their delegation before I move my indexing stake? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### I don't see a button to transfer my delegation. Why is that? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -## How do I transfer my delegation? +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -To transfer your delegation, you will need to complete the following steps: +## Curation Signal + +### Ta yaya zan canja wurin curation na? + +Fara canja wurin sigina akan Ethereum mainnet: + +1. Fara canja wurin sigina akan Ethereum mainnet + +2. Saka adireshin L2 Curator + +3. Wait 20 minutes for confirmation -1. Initiate delegation transfer on Ethereum mainnet +\*Idan ya wajibi-i.e. kana amfani da adireshin kwangila. + +### Ta yaya zan san idan subgraph da na curated ya koma L2? + +Lokacin duba cikakken bayanin shafi na subgraph, banner zai sanar da kai cewa an canja wurin wannan subgraph. Kuna iya bin saƙon don canja wurin curation ku. Hakanan zaka iya samun wannan bayanin na subgraph akan cikakken subgraph na kowane da ya motsa. + +### Idan bana so in matsar da curation na zuwa L2 fa? + +Lokacin da subgraph ya ƙare kuna da zazi don janye sigina ku. Kamar wancan, idan subgraph ya koma L2, zaku iya zaɓa janye sigina ku a cikin Ethereum mainnet ko aika sigina zuwa L2. + +### Ta yaya zan san curation na cikin nasarar canjawa wuri? + +Cikakkun bayani na sigina za a sami ta hanyar Explorer kamar minti ashirin bayan an qaddamar da kayan aikin canja wurin L2. + +### Zan iya canja wurin curation na akan fiye subgraph ɗaya a lokaci guda? + +Babu zaɓin canja wuri mai yawa a wannan lokacin. + +## Indexer gungumen azaba + +### Ta yaya zan canja wuri gugumen azaba na zuwa Arbitrum? + +> Disclaimer: Idan a halin yanzu kuna kwance kowane yanki na GRT ɗinku akan Indexer ɗinku, ba za ku iya amfani da Kayan aikin Canja wurin L2 ba. + + + +Don canja wurin stake, kuna buƙatar cikakken matakai masu zuwa: + +1. Fara canja wurin stake akan Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm delegation transfer on Arbitrum +3. Tabbatar da canja wurin stake akan Arbitrum -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Lura cewa dole ne ku tabbatar da canja wurin a cikin kwanaki bakwai in ba haka ba na iya rasa stake. A mafi yawan lokuta, wannan matakin zai gudi ta atomatik, amma ana iya buƙatar tabbatarwa ta hannu idan akwai tashin farashin gas akan Arbitrum. Idan akwai wasu batutuwa yayin wannan tsari, za a sami albarkatu don taimakawa: tuntuɓar tallafi a tallafi@thegraph.com or on [Discord](https://discord.gg/vtvv7FP). -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +### Duk stake na za a canja wuri? -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +Kuna iya zaɓa nawa na stake don canja wurin. Idan ka zaɓa canja wurin duk stake din ku a lokaci ɗaya, kuna buƙatar rufe duk wani buɗaɗɗen kasafi da farko. -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +Idan kuna shirin canja wurin sassa stake din ku akan ma'amaloli da yawa, dole ne ku saka adireshin mai amfana koyaushe. -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +Dole ne ku cika minimum buƙatun stake akan L2 a karon farko da kuka yi amfani da kayan aikin canja wuri. Dole ne Indexers su aika minimum 100k GRT (lokacin kiran wannan aikin a karon farko). Idan barin wani yanki na stake akan L1, dole ne kuma ya wuce minimum 100k GRT kuma ya isa (tare da delegations ku) don rufe kasafi da kuke buɗe. -## Do Delegators have the option to delegate to another Indexer? +### Yaya tsawon lokaci zan tabbatar da canja wurin stake na zuwa Arbitrum? -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +\*\*\* Dole ne ku tabbatar da ma’amala ku don cikakken canja wurin stake akan Arbitrum. Dole ne a cikakken wannan matakin a cikin kwanaki bakwai ko kuma za a iya stake. -## What if I can't find the Indexer I'm delegating to on L2? +### Idan ina da buɗaɗɗen kasafi? -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +Idan ba ku aika duk stake ba, kayan aikin canja wuri na L2 zai tabbatar da cewa kadan minimum 100k GRT ya rage a cikin Ethereum mainnet kuma ragowar stake din ku da delegation sun isa su rufe duk wani buɗaɗɗen kasafi. Kuna iya buƙatar rufe kasafi buɗewa idan ma'auni na GRT ɗinku bai rufe minimums kasafi buɗewa ba. -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +### Yin amfani da kayan aikin canja wuri, shin ya zama dole a jira kwanaki ashirin da takwas don unstake akan Ethereum mainnet kafin canja wurin? -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +A'a, zaku iya canja wurin stake zuwa L2 nan da nan, babu buƙatar unstake kuma jira kafin amfani da kayan aikin canja wuri. Jiran kwana ashirin da takwas ya shafi kawai idan kuna son janye stake zuwa walat ɗin ku, akan Ethereum mainnet ko L2. -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +### Yaya tsawon lokacin da za a ɗauka don canja wurin stake na? -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +Zai ɗauki kusan minti ashirin don kayan aikin canja wuri na L2 don cikakken canja wurin stake ku. -## Can my rewards be negatively impacted if I do not transfer my delegation? +### Shin dole ne in yi index akan Arbitrum kafin in canja wuri stake na? -It is anticipated that all network participation will move to Arbitrum One in the future. +Kuna iya fara canja wurin stake na ku yadda ya kamata kafin kafa indexing, amma ba za ku iya neman kowane lada akan L2 ba har sai kun ware subgraghs akan L2, index su, da ba POIs. -## How long does it take to complete the transfer of my delegation to L2? +### Delegators za su iya motsa delegation sun kafin in motsa indexing stake na? -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +A'a, domin Delegators su canja wurin GRT da aka delegated zuwa Arbitrum, Indexer da suke delegating dole ne yayi aiki akan L2. -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +### Zan iya canja wurin stake na idan ina amfani da kwangila na vesting na GRT/walat ɗin kulle alama? -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +Ee! Tsarin yana ɗa bambanta, saboda kwangila na vesting bai iya tura ETH gaba ɗin da ake buƙata don biyan kuɗin gas na L2 ba, don haka kuna buƙatar saka shi tukuna. Idan kwangila na vesting ba ta cika ba, za ku kuma fara kwangila na vesting sa hannun takwaran vesting akan L2 kuma kawai za ku iya canja wurin stake zuwa wannan kwangila sanya hannun L2 vesting. UI akan Explorer na iya shiryar ku ta wannan tsari lokacin da kuka haɗa zuwa Explorer ta amfani da walat ɗin kulle vesting. -## Is there any delegation tax? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -## Vesting Contract Transfer +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -## How do I transfer my vesting contract? +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -To transfer your vesting, you will need to complete the following steps: +## Canja wurin Kwangila Vesting -1. Initiate the vesting transfer on Ethereum mainnet +### Ta yaya zan canza wurin kwangila na vesting na? + +Don canja wurin vesting ku, kuna buƙatar kammala matakai masu zuwa: + +1. Fara da vesting na canja wuri Ethereum mainnet 2. Wait 20 minutes for confirmation -3. Confirm vesting transfer on Arbitrum +3. Tabbatar da vesting na canja wuri a kan Arbitrum + +### Ta yaya zan canza wurin kwangila na vesting na idan an ba ni wani bangare kawai? + + -## How do I transfer my vesting contract if I am only partially vested? +1. Sanya wasu ETH a cikin kwangila kayan aikin canja wuri (UI zai iya taimaka wajen adadin kuɗi) -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +2. Aika wasu GRT da aka kulle ta hanyar kwangila kayan aikin canja wuri, zuwa L2 don fara kulle L2 a vesting. Wannan kuma zai saita adireshin masu cin amfani L2. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +3. Aika stake / delegation zuwa L2 ta hanyar "kulle" ayyuka kayan aikin canja wuri a cikin kwangila Staking na L1. -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +4. Jare duk saura ETH daga kwangila kayan aikin canja wuri -4. Withdraw any remaining ETH from the transfer tool contract +### Ta yaya zan canja wurin kwangila na vesting idan na cika? -## How do I transfer my vesting contract if I am fully vested? + -For those that are fully vested, the process is similar: +Domin waɗan ke cikakken vested, tsarin yana kama da haka: -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +1. Sanya wasu ETH a cikin kwangila kayan aikin canja wuri (UI zai iya taimaka wajen adadin kuɗi) -2. Set your L2 address with a call to the transfer tool contract +2. Saita adireshin L2 ɗinku tare da kira zuwa kwangila kayan aikin canja wuri -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +3. Aika stake / delegation ku zuwa L2 ta hanyar "kulle" ayyuka kayan aikin canja wuri a cikin kwangila Staking na L1. -4. Withdraw any remaining ETH from the transfer tool contract +4. Jare duk saura ETH daga kwangila kayan aikin canja wuri -## Can I transfer my vesting contract to Arbitrum? +### Zan iya canza wurin kwangila na vesting nawa zuwa Arbitrum? -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +Kuna iya canja wurin ma'auni na GRT kwangila na vesting ku zuwa kwangilar a cikin L2. Wannan abin da ake bukata ne don canja wurin stake ko delegation daga kwangila na vesting zuwa L2. Dole ne kwangila na vesting ta riƙe adadin GRT mara sifili (zaku iya canja wurin ƙaramin adadin kamar GRT daya zuwa gare shi idan an buƙata). -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +Lokacin da ka canja wurin GRT daga kwangila na vesting na L1 zuwa L2, za ka iya zaɓar adadin da za ka aika kuma za ka iya yin haka sau da yawa yadda kake so. Za a fara vesting da kwangilar na L2 a farkon lokacin da kuka canja wurin GRT. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +Ana yin canja wurin ta amfani da Kayan aikin Canja wurin da za a iya gani akan Explorer profile lokacin da kuka haɗa tare da asusun kwangila na vesting. -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Don Allah za a lura cewa ba za ku iya sakin / janye GRT daga kwangila na vesting na L2 ba har zuwa ƙarshen lokacin vesting ku da kwangila ta cika. Idan kuna buƙatar sakin GRT kafin wannan lokacin, zaku iya canja wurin GRT zuwa kwangila na vesting ta L1 ta amfani da wani kayan aikin canja wuri wanda yake akwai don wannan dalili. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Idan baku canza ma'aunin kwangila na vesting ba zuwa L2, kuma kwangila ku na vesting da cikakkiyar vested, bai kamata ku canza wurin kwangila na vesting ku zuwa L2 ba. Madadin haka, zaku iya amfani da kayan aikin canja wuri don saita adireshin L2 walat, da kuma canja wurin stake ko delegation zuwa wannan walat na yau da kullun akan L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### Ina amfani da kwangila na vesting akan mainnet don stake. Zan iya canja wuri stake na zuwa Arbitrum? -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +Ee, amma idan har yanzu kwangila ku tana ci gaba, zaku iya canja wurin stake domin ya zama mallakin kwangila na vesting ku na L2. Dole ne ku fara wannan kwangila L2 ta hanyar canja wurin wasu ma'auni na GRT ta amfani da kayan aikin canja wurin kwangila na vesting a kan Explorer. Idan kwangila ku ta cika vested, za ku iya canja wurin stake din ku zuwa kowane adireshi a cikin L2, amma dole ne ku saita shi a gaba kuma ku saka wasu ETH don kayan aikin canja wuri na L2 don biyan gas na L2. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -294,22 +372,40 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi 6. Confirm the balance transfer on L2 -\*If necessary - i.e. you are using a contract address. +\*Idan ya wajibi-i.e. kana amfani da adireshin kwangila. \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/ha/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ha/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..ebaca7d1a7e6 100644 --- a/website/pages/ha/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ha/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. @@ -74,7 +74,7 @@ If you execute this step, **make sure you proceed until completing step 3 in les After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Da zarar wannan lokacin jira ya ƙare, Arbitrum zai yi ƙoƙarin aiwatar da canja wurin kai tsaye akan kwangila L2. ![Wait screen](/img/screenshotOfWaitScreenL2.png) @@ -148,7 +148,7 @@ Starting the transfer: After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Da zarar wannan lokacin jira ya ƙare, Arbitrum zai yi ƙoƙarin aiwatar da canja wurin kai tsaye akan kwangila L2. ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) diff --git a/website/pages/ha/billing.mdx b/website/pages/ha/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/ha/billing.mdx +++ b/website/pages/ha/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/ha/chain-integration-overview.mdx b/website/pages/ha/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/ha/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ha/cookbook/arweave.mdx b/website/pages/ha/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/ha/cookbook/arweave.mdx +++ b/website/pages/ha/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ha/cookbook/grafting.mdx b/website/pages/ha/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/ha/cookbook/grafting.mdx +++ b/website/pages/ha/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/ha/cookbook/near.mdx b/website/pages/ha/cookbook/near.mdx index 879e8e5c15aa..304e1202e278 100644 --- a/website/pages/ha/cookbook/near.mdx +++ b/website/pages/ha/cookbook/near.mdx @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/ha/cookbook/upgrading-a-subgraph.mdx b/website/pages/ha/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/ha/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ha/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ha/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/ha/deploying/hosted-service.mdx b/website/pages/ha/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/ha/deploying/hosted-service.mdx +++ b/website/pages/ha/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ha/deploying/subgraph-studio.mdx b/website/pages/ha/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/ha/deploying/subgraph-studio.mdx +++ b/website/pages/ha/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/ha/developing/creating-a-subgraph.mdx b/website/pages/ha/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/ha/developing/creating-a-subgraph.mdx +++ b/website/pages/ha/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/ha/developing/developer-faqs.mdx b/website/pages/ha/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/ha/developing/developer-faqs.mdx +++ b/website/pages/ha/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/ha/developing/graph-ts/api.mdx b/website/pages/ha/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/ha/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ha/developing/graph-ts/common-issues.mdx b/website/pages/ha/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/ha/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ha/developing/supported-networks.mdx b/website/pages/ha/developing/supported-networks.mdx index 58ce56345f7c..cd82305bfce2 100644 --- a/website/pages/ha/developing/supported-networks.mdx +++ b/website/pages/ha/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/ha/firehose.mdx b/website/pages/ha/firehose.mdx new file mode 100644 index 000000000000..02f0d63c72db --- /dev/null +++ b/website/pages/ha/firehose.mdx @@ -0,0 +1,22 @@ +--- +title: Firehose +--- + +![Firehose Logo](/img/firehose-logo.png) + +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. + +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. + +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ha/glossary.mdx b/website/pages/ha/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/ha/glossary.mdx +++ b/website/pages/ha/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ha/graphcast.mdx b/website/pages/ha/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/ha/graphcast.mdx +++ b/website/pages/ha/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/ha/index.json b/website/pages/ha/index.json index 9e28e13d5001..ddbbb68445fe 100644 --- a/website/pages/ha/index.json +++ b/website/pages/ha/index.json @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ha/mips-faqs.mdx b/website/pages/ha/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/ha/mips-faqs.mdx +++ b/website/pages/ha/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/ha/network/benefits.mdx b/website/pages/ha/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/ha/network/benefits.mdx +++ b/website/pages/ha/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/ha/network/indexing.mdx b/website/pages/ha/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/ha/network/indexing.mdx +++ b/website/pages/ha/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ha/new-chain-integration.mdx b/website/pages/ha/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/ha/new-chain-integration.mdx +++ b/website/pages/ha/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ha/operating-graph-node.mdx b/website/pages/ha/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/ha/operating-graph-node.mdx +++ b/website/pages/ha/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/ha/publishing/publishing-a-subgraph.mdx b/website/pages/ha/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/ha/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ha/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ha/querying/querying-the-hosted-service.mdx b/website/pages/ha/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/ha/querying/querying-the-hosted-service.mdx +++ b/website/pages/ha/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/ha/querying/querying-with-python.mdx b/website/pages/ha/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/ha/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ha/quick-start.mdx b/website/pages/ha/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/ha/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ha/substreams.mdx b/website/pages/ha/substreams.mdx new file mode 100644 index 000000000000..2a06de8ac868 --- /dev/null +++ b/website/pages/ha/substreams.mdx @@ -0,0 +1,44 @@ +--- +title: Substreams +--- + +![Substreams Logo](/img/substreams-logo.png) + +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. + +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ha/sunrise.mdx b/website/pages/ha/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ha/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ha/tokenomics.mdx b/website/pages/ha/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/ha/tokenomics.mdx +++ b/website/pages/ha/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/ha/translations.ts b/website/pages/ha/translations.ts new file mode 100644 index 000000000000..340f7eeea339 --- /dev/null +++ b/website/pages/ha/translations.ts @@ -0,0 +1,13 @@ +import supportedNetworks from './developing/supported-networks.json' +import docsearch from './docsearch.json' +import global from './global.json' +import index from './index.json' + +const translations = { + global, + index, + docsearch, + supportedNetworks, +} + +export default translations diff --git a/website/pages/hi/arbitrum/arbitrum-faq.mdx b/website/pages/hi/arbitrum/arbitrum-faq.mdx index c810787cf922..b15e01146e35 100644 --- a/website/pages/hi/arbitrum/arbitrum-faq.mdx +++ b/website/pages/hi/arbitrum/arbitrum-faq.mdx @@ -1,22 +1,22 @@ --- -title: आर्बिट्रम अक्सर पूछे जाने वाले प्रश्न +title: आर्बिट्रम एफएक्यू --- -यदि आप आर्बिट्रम बिलिंग एफएक्यू पर जाना चाहते हैं तो [here] \(#बिलिंग-ऑन-आर्बिट्रम-एफएक्यू) पर क्लिक करें। +Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. -## ग्राफ़ L2 समाधान क्यों लागू कर रहा है? +## ग्राफ़ एक L2 समाधान क्यों लागू कर रहा है? L2 पर ग्राफ़ को स्केल करके, नेटवर्क प्रतिभागी उम्मीद कर सकते हैं: -- गैस शुल्क पर 26 गुना से अधिक की बचत +- Upwards of 26x savings on gas fees -- तेज़ लेनदेन गति +- Faster transaction speed -- सुरक्षा एथेरियम से विरासत में मिली है +- Security inherited from Ethereum -प्रोटोकॉल स्मार्ट कॉन्ट्रैक्ट्स को L2 पर स्केल करने से नेटवर्क के सहभागियों को गैस शुल्क में कम खर्च पर अधिक बार इंटरैक्ट करने की अनुमति मिलती है। उदाहरण के लिए, इंडेक्सर्स अधिक संख्या के सबग्राफ को अधिक बार इंडेक्स करने के लिए आवंटन खोल सकते हैं और बंद कर सकते हैं, डेवलपर्स आसानी से अधिक सुबग्राफ को डिप्लॉय और अपडेट कर सकते हैं, डेलीगेटर्स अधिक बार GRT का डेलीगेशन कर सकते हैं, और क्यूरेटर्स एक बड़ी संख्या के सबग्राफ को सिग्नल जोड़ सकते हैं या हटा सकते हैं - जो पहले गैस के कारण नियमित रूप से करने के लिए बहुत महंगे माने जाते थे। +Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. -ग्राफ समुदाय ने पिछले साल [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) चर्चा के नतीजे के बाद आर्बिट्रम के साथ आगे बढ़ने का फैसला किया। +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. ## L2 पर ग्राफ़ का उपयोग करने के लिए मुझे क्या करना होगा? @@ -29,7 +29,7 @@ L2 पर ग्राफ़ को स्केल करके, नेटव L2 पर द ग्राफ़ का उपयोग करने का लाभ उठाने के लिए, इस ड्रॉपडाउन स्विचर का उपयोग जंजीरों के बीच टॉगल करने के लिए करें। -![आर्बिट्रम को टॉगल करने के लिए ड्रॉपडाउन स्विचर](/img/arbitrum-screenshot-toggle.png) +![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) ## सबग्राफ डेवलपर, डेटा उपभोक्ता, इंडेक्सर, क्यूरेटर, या डेलिगेटर के रूप में, अब मुझे क्या करने की आवश्यकता है? @@ -37,11 +37,11 @@ L2 पर द ग्राफ़ का उपयोग करने का ल कोर डेवलपर टीमें एल2 ट्रांसफर टूल बनाने के लिए काम कर रही हैं, जिससे डेलिगेशन, क्यूरेशन और सबग्राफ को आर्बिट्रम में स्थानांतरित करना काफी आसान हो जाएगा। नेटवर्क प्रतिभागी उम्मीद कर सकते हैं कि 2023 की गर्मियों तक एल2 ट्रांसफर टूल उपलब्ध हो जाएंगे। -10 अप्रैल, 2023 तक, सभी अनुक्रमण पुरस्कारों का 5% आर्बिट्रम पर डाला जा रहा है। जैसे-जैसे नेटवर्क भागीदारी बढ़ती है, और जैसे ही परिषद इसे मंजूरी देती है, अनुक्रमण पुरस्कार धीरे-धीरे एथेरियम से आर्बिट्रम में स्थानांतरित हो जाएगा, अंततः पूरी तरह से आर्बिट्रम में स्थानांतरित हो जाएगा। +10 अप्रैल, 2023 तक, सभी इंडेक्सिंग पुरस्कारों का 5% आर्बिट्रम पर खनन किया जा रहा है। जैसे-जैसे नेटवर्क की भागीदारी बढ़ती है, और जैसे ही परिषद इसे मंजूरी देती है, अनुक्रमण पुरस्कार धीरे-धीरे एथेरियम से आर्बिट्रम में स्थानांतरित हो जाएंगे, अंततः पूरी तरह से आर्बिट्रम में चले जाएंगे। ## यदि मैं L2 पर नेटवर्क में भाग लेना चाहता हूँ, तो मुझे क्या करना चाहिए? -कृपया L2 पर [नेटवर्क का परीक्षण करने में](https://testnet.thegraph.com/explorer) मदद करें और [Discord](https://discord.gg/graphprotocol) में अपने अनुभव के बारे में फीडबैक दें। +Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). ## क्या नेटवर्क को L2 तक स्केल करने से जुड़े कोई जोखिम हैं? @@ -55,11 +55,11 @@ L2 पर द ग्राफ़ का उपयोग करने का ल ## क्या जीआरटी के पास आर्बिट्रम पर एक नया स्मार्ट अनुबंध होगा? -हां, जीआरटी के पास एक अतिरिक्त [आर्बिट्रम पर स्मार्ट अनुबंध](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) है। हालाँकि, एथेरियम मेननेट [जीआरटी अनुबंध](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) चालू रहेगा। +Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. ## आर्बिट्रम एफएक्यू पर बिलिंग -## मुझे अपने बिलिंग शेष में जीआरटी के बारे में क्या करने की आवश्यकता है? +## मुझे अपने बिलिंग बैलेंस में GRT के बारे में क्या करना होगा? कुछ नहीं! आपके जीआरटी को आर्बिट्रम में सुरक्षित रूप से माइग्रेट कर दिया गया है और जब आप इसे पढ़ रहे हैं तो इसका उपयोग प्रश्नों के भुगतान के लिए किया जा रहा है। @@ -69,10 +69,10 @@ L2 पर द ग्राफ़ का उपयोग करने का ल ## मुझे कैसे पता चलेगा कि आर्बिट्रम ब्रिज सुरक्षित है? -सभी उपयोगकर्ताओं के लिए सुरक्षा सुनिश्चित करने के लिए पुल का [भारी ऑडिट](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) किया गया है। +The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. ## अगर मैं अपने एथेरियम मेननेट वॉलेट से ताजा जीआरटी जोड़ रहा हूं तो मुझे क्या करना होगा? आपके आर्बिट्रम बिलिंग बैलेंस में जीआरटी जोड़ना [सबग्राफ स्टूडियो] \(https://thegraph.com/studio/) में एक-क्लिक अनुभव के साथ किया जा सकता है। आप आसानी से अपने जीआरटी को आर्बिट्रम से जोड़ सकेंगे और एक लेनदेन में अपनी एपीआई कुंजी भर सकेंगे। -जीआरटी जोड़ने, निकालने या प्राप्त करने के बारे में अधिक विस्तृत निर्देशों के लिए [बिलिंग पृष्ठ](https://thegraph.com/docs/en/billing/) पर जाएं। +Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. diff --git a/website/pages/hi/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/hi/arbitrum/l2-transfer-tools-faq.mdx index 70c7b6557165..890f8bb4628b 100644 --- a/website/pages/hi/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/hi/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 स्थानांतरण उपकरण अक्सर पूछे जाने वाले प्रश्न +title: L2 Transfer Tools FAQ --- -> L2 स्थानांतरण उपकरण अभी तक जारी नहीं किये गये हैं। इनके 2023 की गर्मियों में उपलब्ध होने की उम्मीद है। +## आम -## L2 स्थानांतरण उपकरण क्या हैं? +### What are L2 Transfer Tools? -ग्राफ़ ने आर्बिट्रम वन में प्रोटोकॉल लागू करके योगदानकर्ताओं के लिए नेटवर्क में भाग लेना 26 गुना सस्ता कर दिया है। L2 ट्रांसफर टूल्स को कोर डेवलपर्स द्वारा L2 पर ले जाना आसान बनाने के लिए बनाया गया था। प्रत्येक प्रोटोकॉल भागीदार के लिए, स्थानांतरण सहायकों का एक सेट साझा किया जाएगा ताकि एल2 में जाने पर अनुभव को सहज बनाया जा सके, पिघलने की अवधि से बचा जा सके या जीआरटी को मैन्युअल रूप से वापस लेने और ब्रिज करने की आवश्यकता पड़े। इन उपकरणों के लिए आपको चरणों के एक विशिष्ट सेट का पालन करने की आवश्यकता होगी जो इस बात पर निर्भर करेगा कि ग्राफ़ के भीतर आपकी भूमिका क्या है और आप एल2 में क्या स्थानांतरित कर रहे हैं। +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## क्या मैं उसी वॉलेट का उपयोग कर सकता हूँ जिसका उपयोग मैं एथेरियम मेननेट पर करता हूँ? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -यदि आप [EOA](https://etherum.org/en/developers/docs/accounts/#types-of-account) वॉलेट का उपयोग कर रहे हैं तो आप उसी पते का उपयोग कर सकते हैं। यदि आपका एथेरियम मेननेट वॉलेट एक अनुबंध है (उदाहरण के लिए एक मल्टीसिग) तो आपको एक [आर्बिट्रम वॉलेट पता]\(/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the- निर्दिष्ट करना होगा) ग्राफ-ऑन-एल2) जहां आपका स्थानांतरण भेजा जाएगा। कृपया पते की सावधानीपूर्वक जांच करें क्योंकि गलत पते पर किसी भी स्थानांतरण के परिणामस्वरूप स्थायी नुकसान हो सकता है। यदि आप L2 पर एक मल्टीसिग का उपयोग करना चाहते हैं, तो सुनिश्चित करें कि आप आर्बिट्रम वन पर एक मल्टीसिग अनुबंध तैनात करें। +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? + +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. + +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### यदि मैं अपना स्थानांतरण 7 दिनों में पूरा नहीं कर पाता तो क्या होगा? + +L2 ट्रांसफर टूल L1 से L2 तक संदेश भेजने के लिए आर्बिट्रम के मूल तंत्र का उपयोग करते हैं। इस तंत्र को "पुनर्प्रयास योग्य टिकट" कहा जाता है और इसका उपयोग आर्बिट्रम जीआरटी ब्रिज सहित सभी देशी टोकन ब्रिजों द्वारा किया जाता है। आप पुनः प्रयास योग्य टिकटों के बारे में अधिक जानकारी [आर्बिट्रम डॉक्स](https://docs.arbitrum.io/arbos/l1-to-l2-messageing) में पढ़ सकते हैं। + +जब आप अपनी संपत्ति (सबग्राफ, हिस्सेदारी, प्रतिनिधिमंडल या क्यूरेशन) को एल2 में स्थानांतरित करते हैं, तो आर्बिट्रम जीआरटी ब्रिज के माध्यम से एक संदेश भेजा जाता है जो एल2 में एक पुनः प्रयास योग्य टिकट बनाता है। ट्रांसफ़र टूल में लेन-देन में कुछ ETH मान शामिल होते हैं, जिनका उपयोग 1) टिकट बनाने के लिए भुगतान करने और 2) L2 में टिकट निष्पादित करने के लिए गैस का भुगतान करने के लिए किया जाता है। हालाँकि, क्योंकि गैस की कीमतें L2 में निष्पादित होने के लिए टिकट तैयार होने तक के समय में भिन्न हो सकती हैं, यह संभव है कि यह ऑटो-निष्पादन प्रयास विफल हो जाए। जब ऐसा होता है, तो आर्बिट्रम ब्रिज पुनः प्रयास योग्य टिकट को 7 दिनों तक जीवित रखेगा, और कोई भी टिकट को "रिडीम" करने का पुनः प्रयास कर सकता है (जिसके लिए आर्बिट्रम में ब्रिज किए गए कुछ ईटीएच के साथ वॉलेट की आवश्यकता होती है)। + +इसे हम सभी स्थानांतरण टूल में "पुष्टि करें" चरण कहते हैं - यह ज्यादातर मामलों में स्वचालित रूप से चलेगा, क्योंकि ऑटो-निष्पादन अक्सर सफल होता है, लेकिन यह महत्वपूर्ण है कि आप यह सुनिश्चित करने के लिए वापस जांचें कि यह पूरा हो गया है। यदि यह सफल नहीं होता है और 7 दिनों में कोई सफल पुनर्प्रयास नहीं होता है, तो आर्बिट्रम ब्रिज टिकट को खारिज कर देगा, और आपकी संपत्ति (सबग्राफ, हिस्सेदारी, प्रतिनिधिमंडल या क्यूरेशन) खो जाएगी और पुनर्प्राप्त नहीं की जा सकेगी। ग्राफ़ कोर डेवलपर्स के पास इन स्थितियों का पता लगाने और बहुत देर होने से पहले टिकटों को भुनाने की कोशिश करने के लिए एक निगरानी प्रणाली है, लेकिन यह सुनिश्चित करना अंततः आपकी ज़िम्मेदारी है कि आपका स्थानांतरण समय पर पूरा हो जाए। यदि आपको अपने लेनदेन की पुष्टि करने में परेशानी हो रही है, तो कृपया [इस फॉर्म](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) और कोर डेव का उपयोग करके संपर्क करें आपकी मदद के लिए वहाँ मौजूद रहूँगा. + +### मैंने अपना डेलिगेशन/स्टेक/क्यूरेशन ट्रांसफर शुरू कर दिया है और मुझे यकीन नहीं है कि यह एल2 तक पहुंच गया है या नहीं, मैं कैसे पुष्टि कर सकता हूं कि इसे सही तरीके से ट्रांसफर किया गया था? + +यदि आपको अपनी प्रोफ़ाइल पर स्थानांतरण पूरा करने के लिए कहने वाला कोई बैनर नहीं दिखता है, तो संभव है कि लेन-देन सुरक्षित रूप से L2 पर पहुंच गया है और किसी और कार्रवाई की आवश्यकता नहीं है। यदि संदेह है, तो आप जांच सकते हैं कि एक्सप्लोरर आर्बिट्रम वन पर आपका प्रतिनिधिमंडल, हिस्सेदारी या क्यूरेशन दिखाता है या नहीं। + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. ## सबग्राफ स्थानांतरण -## मैं अपना सबग्राफ कैसे स्थानांतरित करूं? +### मैं अपना सबग्राफ कैसे स्थानांतरित करूं? -अपना सबग्राफ़ स्थानांतरित करने के लिए, आपको निम्नलिखित चरण पूरे करने होंगे: + -1. एथेरियम मेननेट पर स्थानांतरण आरंभ करें +अपने सबग्राफ को स्थानांतरित करने के लिए, आपको निम्नलिखित चरणों को पूरा करने होंगे: -2. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +1. Ethereum mainnet वर हस्तांतरण सुरू करा -3. आर्बिट्रम पर सबग्राफ़ स्थानांतरण की पुष्टि करें\* +2. पुष्टि के लिए 20 मिनट का इंतजार करें: + +3. आर्बिट्रमवर सबग्राफ हस्तांतरणाची पुष्टी करा\* 4. आर्बिट्रम पर सबग्राफ का प्रकाशन समाप्त करें 5. क्वेरी यूआरएल अपडेट करें (अनुशंसित) -\*ध्यान दें कि आपको 7 दिनों के भीतर स्थानांतरण की पुष्टि करनी होगी अन्यथा आपका सबग्राफ खो सकता है। ज्यादातर मामलों में, यह चरण स्वचालित रूप से चलेगा, लेकिन आर्बिट्रम पर गैस की कीमत बढ़ने पर मैन्युअल पुष्टि की आवश्यकता हो सकती है। यदि इस प्रक्रिया के दौरान कोई समस्या आती है, तो मदद के लिए संसाधन मौजूद होंगे: support@thegraph.com पर या [Discord](https://discord.gg/graphprotocol) पर सहायता से संपर्क करें। +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## मुझे अपना स्थानांतरण कहाँ से करना चाहिए? +### मुझे अपना स्थानांतरण कहाँ से आरंभ करना चाहिए? -आप अपना स्थानांतरण [सबग्राफ स्टूडियो](https://thegraph.com/studio/), [एक्सप्लोरर,](https://thegraph.com/explorer) या किसी सबग्राफ विवरण पृष्ठ से शुरू कर सकते हैं। स्थानांतरण शुरू करने के लिए सबग्राफ विवरण पृष्ठ में "ट्रांसफर सबग्राफ" बटन पर क्लिक करें। +आप[Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) या किसी भी सबग्राफ विवरण पृष्ठ से अपने अंतरण को प्रारंभ कर सकते हैं। सबग्राफ विवरण पृष्ठ में "Transfer Subgraph" बटन पर क्लिक करके अंतरण आरंभ करें। -## मेरा सबग्राफ़ स्थानांतरित होने तक मुझे कितने समय तक प्रतीक्षा करनी होगी +### मेरा सबग्राफ़ स्थानांतरित होने तक मुझे कितने समय तक प्रतीक्षा करनी होगी? -स्थानांतरण समय में लगभग 20 मिनट लगते हैं। ब्रिज ट्रांसफर को स्वचालित रूप से पूरा करने के लिए आर्बिट्रम ब्रिज पृष्ठभूमि में काम कर रहा है। कुछ मामलों में, गैस की लागत बढ़ सकती है और आपको लेनदेन की दोबारा पुष्टि करनी होगी। +अंतरण करने में लगभग 20 मिनट का समय लगता है। आर्बिट्रम सेतु स्वचालित रूप से सेतु अंतरण पूरा करने के लिए पृष्ठभूमि में काम कर रहा है। कुछ मामलों में, गैस लागत में वृद्धि हो सकती है और आपको लेन-देन की पुष्टि फिर से करनी होगी। -## क्या मैं अपने सबग्राफ़ को L2 पर स्केल करने के बाद भी ढूंढ पाऊंगा? +### क्या मेरा सबग्राफ L2 में स्थानांतरित करने के बाद भी खोजा जा सकेगा? -आपका सबग्राफ केवल उसी नेटवर्क पर खोजा जा सकेगा जिस पर इसे प्रकाशित किया गया है। उदाहरण के लिए, यदि आपका सबग्राफ आर्बिट्रम वन पर है, तो आप इसे केवल आर्बिट्रम वन पर एक्सप्लोरर में पा सकते हैं और एथेरियम पर नहीं पा सकेंगे। कृपया सुनिश्चित करें कि आपने पृष्ठ के शीर्ष पर नेटवर्क स्विचर में आर्बिट्रम वन का चयन किया है ताकि यह सुनिश्चित हो सके कि आप सही नेटवर्क पर हैं। स्थानांतरण के बाद, L1 सबग्राफ अप्रचलित के रूप में दिखाई देगा। +आपका सबग्राफ केवल उस नेटवर्क पर खोजने योग्य होगा जिस पर यह प्रकाशित किया गया है। उदाहरण स्वरूप, यदि आपका सबग्राफ आर्बिट्रम वन पर है, तो आपकेंद्रीय तंत्र पर केवल आर्बिट्रम वन के खोजक में ही ढूंढा जा सकता है और आप इथेरियम पर इसे नहीं खोज पाएंगे। कृपया सुनिश्चित करें कि आपने पृष्ठ के शीर्ष में नेटवर्क स्विचर में आर्बिट्रम वन को चुना है ताकि आप सही नेटवर्क पर हों। अंतरण के बाद, L1 सबग्राफ को पुराना किया गया माना जाएगा। -## क्या इसे स्थानांतरित करने के लिए मेरे सबग्राफ को प्रकाशित करने की आवश्यकता है? +### क्या मेरे सबग्राफ को स्थानांतरित करने के लिए इसे प्रकाशित किया जाना आवश्यक है? -सबग्राफ ट्रांसफर टूल का लाभ उठाने के लिए, आपका सबग्राफ पहले से ही एथेरियम मेननेट पर प्रकाशित होना चाहिए और सबग्राफ के मालिक वॉलेट के पास कुछ क्यूरेशन सिग्नल होना चाहिए। यदि आपका सबग्राफ प्रकाशित नहीं हुआ है, तो यह अनुशंसा की जाती है कि आप सीधे आर्बिट्रम वन पर प्रकाशित करें - संबंधित गैस शुल्क काफी कम होगा। यदि आप किसी प्रकाशित सबग्राफ को स्थानांतरित करना चाहते हैं, लेकिन मालिक खाते ने उस पर कोई सिग्नल क्यूरेट नहीं किया है, तो आप उस खाते से एक छोटी राशि (उदाहरण के लिए 1 जीआरटी) का संकेत दे सकते हैं; "ऑटो-माइग्रेटिंग" सिग्नल चुनना सुनिश्चित करें। +सबग्राफ अंतरण उपकरण का लाभ उठाने के लिए, आपके सबग्राफ को पहले ही ईथेरियम मेननेट पर प्रकाशित किया जाना चाहिए और सबग्राफ के मालिक वॉलेट द्वारा स्वामित्व सिग्नल का कुछ होना चाहिए। यदि आपका सबग्राफ प्रकाशित नहीं है, तो सिफ़ारिश की जाती है कि आप सीधे आर्बिट्रम वन पर प्रकाशित करें - जुड़े गए गैस शुल्क काफी कम होंगे। यदि आप किसी प्रकाशित सबग्राफ को अंतरण करना चाहते हैं लेकिन मालिक खाता ने उस पर कोई सिग्नल क्यूरेट नहीं किया है, तो आप उस खाते से थोड़ी सी राशि (जैसे 1 GRT) के सिग्नल कर सकते हैं; सुनिश्चित करें कि आपने "ऑटो-माइग्रेटिंग" सिग्नल को चुना है। -## आर्बिट्रम में स्थानांतरित होने के बाद मेरे सबग्राफ के एथेरियम मेननेट संस्करण का क्या होता है? +### मी आर्बिट्रममध्ये हस्तांतरित केल्यानंतर माझ्या सबग्राफच्या इथरियम मेननेट आवृत्तीचे काय होते? -आपके सबग्राफ को आर्बिट्रम में स्थानांतरित करने के बाद, एथेरियम मेननेट संस्करण को हटा दिया जाएगा। हमारा सुझाव है कि आप 48 घंटों के भीतर अपना क्वेरी यूआरएल अपडेट करें। हालाँकि, एक छूट अवधि मौजूद है जो आपके मेननेट यूआरएल को कार्यशील बनाए रखती है ताकि किसी भी तृतीय-पक्ष डीएपी समर्थन को अपडेट किया जा सके। +अपने सबग्राफ को आर्बिट्रम पर अंतरण करने के बाद, ईथेरियम मेननेट संस्करण को पुराना किया जाएगा। हम आपको 48 घंटों के भीतर अपनी क्वेरी URL को अद्यतन करने की सिफारिश करते हैं। हालांकि, एक ग्रेस पीरियड लागू होता है जिसके तहत आपकी मुख्यनेट URL को कार्यरत रखा जाता है ताकि किसी तिसरी पक्ष डैप समर्थन को अपडेट किया जा सके। -## स्थानांतरण के बाद, क्या मुझे आर्बिट्रम पर पुनः प्रकाशित करने की भी आवश्यकता है? +### स्थानांतरण करने के बाद, क्या मुझे आर्बिट्रम पर पुनः प्रकाशन की आवश्यकता होती है? -20 मिनट की ट्रांसफर विंडो के बाद, आपको ट्रांसफर पूरा करने के लिए यूआई में लेनदेन के साथ ट्रांसफर की पुष्टि करने की आवश्यकता होगी, लेकिन ट्रांसफर टूल इसके माध्यम से आपका मार्गदर्शन करेगा। आपका L1 समापन बिंदु स्थानांतरण विंडो और उसके बाद एक अनुग्रह अवधि के दौरान समर्थित रहेगा। यह प्रोत्साहित किया जाता है कि जब आपके लिए सुविधाजनक हो तो आप अपना समापन बिंदु अपडेट करें। +20 मिनट के अंतराल के बाद, आपको अंतरण को पूरा करने के लिए UI में एक लेन-देन की पुष्टि करनी होगी, लेकिन अंतरण उपकरण आपको इसके माध्यम से मार्गदर्शन करेगा। आपकी L1 इंड पॉइंट ट्रांसफर विंडो के दौरान और एक ग्रेस पीरियड के बाद भी समर्थित रहेगा। आपको यह सुझाव दिया जाता है कि आप अपनी इंड पॉइंट को अपनी सुविधा के अनुसार अपडेट करें। -## क्या पुनः प्रकाशित करते समय मेरे समापन बिंदु पर कोई डाउन-टाइम होगा? +### Will my endpoint experience downtime while re-publishing? -आपके सबग्राफ को L2 पर ले जाने के लिए ट्रांसफर टूल का उपयोग करते समय कोई डाउन टाइम नहीं होना चाहिए। आपका L1 एंडपॉइंट ट्रांसफर विंडो के दौरान और उसके बाद एक अनुग्रह अवधि के दौरान समर्थित रहेगा। यह प्रोत्साहित किया जाता है कि जब आपके लिए सुविधाजनक हो तो आप अपना एंडपॉइंट अपडेट करें। +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## क्या L2 पर एथेरियम एथेरियम मेननेट का प्रकाशन और संस्करणीकरण समान है? +### क्या L2 पर प्रकाशन और संस्करणीकरण Ethereum मेननेट के समान होते हैं? -हाँ। सबग्राफ स्टूडियो में प्रकाशित करते समय अपने प्रकाशित नेटवर्क के रूप में आर्बिट्रम वन का चयन करना सुनिश्चित करें। स्टूडियो में, नवीनतम समापन बिंदु उपलब्ध होगा जो सबग्राफ के नवीनतम अद्यतन संस्करण की ओर इशारा करता है। +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## क्या मेरे सबग्राफ का क्यूरेशन मेरे सबग्राफ के साथ चलेगा? +### क्या मेरे सबग्राफ की संरचना उसके साथ चलेगी जब मैं सबग्राफ को स्थानांतरित करूँगा? -यदि आपने ऑटो-माइग्रेटिंग सिग्नल चुना है, तो आपका अपना 100% क्यूरेशन आपके सबग्राफ के साथ आर्बिट्रम वन में चला जाएगा। स्थानांतरण के समय सबग्राफ के सभी क्यूरेशन सिग्नल को जीआरटी में परिवर्तित कर दिया जाएगा, और आपके क्यूरेशन सिग्नल के अनुरूप जीआरटी का उपयोग एल2 सबग्राफ पर सिग्नल को ढालने के लिए किया जाएगा। +यदि आपने "ऑटो-माइग्रेटिंग" सिग्नल का चयन किया है, तो आपके खुद के क्यूरेशन का 100% आपके सबग्राफ के साथ आर्बिट्रम वन पर जाएगा। सबग्राफ के सभी क्यूरेशन सिग्नल को अंतरण के समय GRT में परिवर्तित किया जाएगा, और आपके क्यूरेशन सिग्नल के समर्थन में उत्पन्न होने वाले GRT का उपयोग L2 सबग्राफ पर सिग्नल मिंट करने के लिए किया जाएगा। -अन्य क्यूरेटर यह चुन सकते हैं कि जीआरटी का अपना अंश वापस लेना है या नहीं, या इसे उसी सबग्राफ पर मिंट सिग्नल के लिए एल2 में स्थानांतरित करना है। +अन्य क्यूरेटर यह चुन सकते हैं कि जीआरटी का अपना अंश वापस लेना है या नहीं, या इसे उसी सबग्राफ पर मिंट सिग्नल के लिए एल2 में स्थानांतरित करना है या नहीं। -## क्या मैं स्थानांतरण के बाद अपने सबग्राफ को एथेरियम मेननेट पर वापस ले जा सकता हूँ? +### क्या मैं स्थानांतरण के बाद अपने सबग्राफ को एथेरियम मेननेट पर वापस ले जा सकता हूं? -एक बार स्थानांतरित होने के बाद, इस सबग्राफ का आपका एथेरियम मेननेट संस्करण अप्रचलित कर दिया जाएगा। यदि आप मेननेट पर वापस जाना चाहते हैं, तो आपको मेननेट पर पुनः तैनात और प्रकाशित करना होगा। हालाँकि, एथेरियम मेननेट पर वापस स्थानांतरित करने को दृढ़ता से हतोत्साहित किया जाता है क्योंकि अनुक्रमण पुरस्कार अंततः पूरी तरह से आर्बिट्रम वन पर वितरित किए जाएंगे। +एक बार अंतरित होने के बाद, आपके ईथेरियम मेननेट संस्करण को पुराना मान दिया जाएगा। अगर आप मुख्यनेट पर वापस जाना चाहते हैं, तो आपको पुनः डिप्लॉय और प्रकाशित करने की आवश्यकता होगी। हालांकि, वापस ईथेरियम मेननेट पर लौटने को मजबूरी से अनुशंसित किया जाता है क्योंकि सूचीकरण रिवॉर्ड आखिरकार पूरी तरह से आर्बिट्रम वन पर ही वितरित किए जाएंगे। -## मुझे अपना स्थानांतरण पूरा करने के लिए ब्रिजित ईटीएच की आवश्यकता क्यों है? +### मेरे स्थानांतरण को पूरा करने के लिए मुझे ब्रिज़्ड ईथ की आवश्यकता क्यों है? -आर्बिट्रम वन पर गैस शुल्क का भुगतान ब्रिज किए गए ईटीएच (यानी ईटीएच जिसे आर्बिट्रम वन से ब्रिज किया गया है) का उपयोग करके किया जाता है। हालाँकि, एथेरियम मेननेट की तुलना में गैस शुल्क काफी कम है। +आर्बिट्रम वन पर गैस शुल्क ब्रिज्ड ईथरियम (यानी ईथरियम जो आर्बिट्रम वन के लिए ब्रिज किया गया है) का उपयोग करके दिए जाते हैं। हालांकि, ईथेरियम मेननेट की तुलना में गैस शुल्क बहुत कम होते हैं। -## क्यूरेशन सिग्नल +## प्रतिनिधि -## मैं अपना क्यूरेशन कैसे स्थानांतरित करूं? +### मैं अपना प्रतिनिधिमंडल कैसे स्थानांतरित करूं? -अपना क्यूरेशन स्थानांतरित करने के लिए, आपको निम्नलिखित चरण पूरे करने होंगे: + -1. एथेरियम मेननेट पर सिग्नल ट्रांसफर शुरू करें +अपनी समर्पण को स्थानांतरित करने के लिए, आपको निम्नलिखित चरणों को पूरा करने की आवश्यकता होगी: -2. L2 क्यूरेटर पता निर्दिष्ट करें\* +1. ईथेरियम मेननेट पर समर्पण स्थानांतरण प्रारंभ करें: +2. पुष्टि के लिए 20 मिनट का इंतजार करें: +3. आर्बिट्रम पर समर्पण स्थानांतरण की पुष्टि करें: -3. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*यदि आवश्यक हो - यानी आप अनुबंध पते का उपयोग कर रहे हैं। +### अगर मैं ईथेरियम मेननेट पर खुली आवंटन के साथ स्थानांतरण प्रारंभ करता हूँ, तो मेरे पुरस्कारों के साथ क्या होता है? -## मुझे कैसे पता चलेगा कि मेरे द्वारा क्यूरेट किया गया सबग्राफ L2 पर चला गया है? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -सबग्राफ विवरण पृष्ठ को देखने पर, एक बैनर आपको सूचित करेगा कि यह सबग्राफ स्थानांतरित किया गया है। आप समायोजन को स्थानांतरित करने के लिए प्रोम्प्ट का पालन कर सकते हैं। आप इस सूचना को भी पा सकते हैं किसी भी सबग्राफ के सबग्राफ विवरण पृष्ठ पर जो स्थानांतरित हो गया है। +### यदि मैं जिस इंडेक्सर को वर्तमान में सौंप रहा हूं वह आर्बिट्रम वन पर नहीं है तो क्या होगा? -## यदि मैं अपने क्यूरेशन को एल2 पर ले जाना नहीं चाहता तो क्या होगा? +L2 हस्तांतरण उपकरण को केवल तब सक्षम किया जाएगा अगर वह इंडेक्सर जिसे आपने डेलीगेट किया है, अपना स्टेक Arbitrum पर हस्तांतरित कर चुका है। -जब एक सबग्राफ प्रतिष्ठानित किया जाता है, तो आपको अपनी सिग्नल वापस लेने का विकल्प होता है। उसी तरह, यदि कोई सबग्राफ L2 पर स्थानांतरित हो गया है, तो आपको एथेरियम मेननेट में अपनी सिग्नल वापस लेने या सिग्नल को L2 पर भेजने का विकल्प चुनने की सुविधा होती है। +### क्या सहायक व्यक्तियों को किसी अन्य इंडेक्सर को सहायकता देने का विकल्प होता है? -## मुझे कैसे पता चलेगा कि मेरा क्यूरेशन सफलतापूर्वक स्थानांतरित हो गया है? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -एल2 ट्रांसफर टूल शुरू होने के लगभग 20 मिनट बाद एक्सप्लोरर के माध्यम से सिग्नल विवरण पहुंच योग्य होगा। +### यदि मुझे L2 पर वह इंडेक्सर नहीं मिल रहा है जिसे मैं सौंप रहा हूँ तो क्या होगा? -## क्या मैं एक समय में एक से अधिक सबग्राफ पर अपना क्यूरेशन स्थानांतरित कर सकता हूं? +L2 हस्तांतरण उपकरण स्वचालित रूप से डिटेक्ट करेगा कि आप पहले किस इंडेक्सर को डेलीगेट कर चुके हैं। -इस समय कोई थोक स्थानांतरण विकल्प नहीं है. +### क्या मैं नए या कई इंडेक्सरों के बीच अपनी सौंपन को मिला कर देने की क्षमता रखूंगा, या पहले इंडेक्सर के बजाय। -## इंडेक्सर हिस्सेदारी +L2 हस्तांतरण उपकरण हमेशा आपकी डेलीगेशन को उसी इंडेक्सर के पास हस्तांतरित करेगा, जिसे आपने पहले से डेलीगेट किया था। जब आप L2 पर हस्तांतरित हो जाते हैं, तो आप अनडेलीगेट कर सकते हैं, थॉविंग पीरियड का इंतजार कर सकते हैं, और फिर तय कर सकते हैं कि क्या आप अपनी डेलीगेशन को विभाजित करना चाहते हैं। + +### क्या मैं कूलडाउन अवधि के अधीन हूं या क्या मैं L2 डेलिगेशन ट्रांसफर टूल का उपयोग करने के तुरंत बाद वापस ले सकता हूं? + +ट्रांसफर टूल आपको तुरंत L2 पर हस्तांतरित होने की अनुमति देता है। यदि आप अनडेलीगेट करना चाहते हैं, तो आपको थॉविंग पीरियड का इंतजार करना होगा। हालांकि, अगर किसी इंडेक्सर ने अपने सभी स्टेक को L2 पर हस्तांतरित कर दिया है, तो आप इसे Ethereum मुख्यनेट पर तुरंत निकाल सकते हैं। + +### यदि मैं अपने प्रतिनिधित्व को ट्रांसफर नहीं करता हूँ, क्या मेरे प्रतिष्ठान पर नकारात्मक प्रभाव पड़ सकता है? + +आंतिकाल में यह आनुमानित है कि सभी नेटवर्क प्रतिभागन आर्बिट्रम वन पर स्थानांतरित होगा। + +### मेरे प्रतिनिधित्व को L2 में ट्रांसफर करने का पूरा काम कितने समय तक लगता है? -## मैं अपनी हिस्सेदारी आर्बिट्रम में कैसे स्थानांतरित करूं? +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -अपनी हिस्सेदारी हस्तांतरित करने के लिए, आपको निम्नलिखित चरण पूरे करने होंगे: +### क्या मैं अपनी सौंपन को स्थानांतरित कर सकता हूँ अगर मैं एक जीआरटी वेस्टिंग अनुबंध/टोकन लॉक वॉलेट का उपयोग कर रहा हूँ? -1. एथेरियम मेननेट पर हिस्सेदारी हस्तांतरण शुरू करें +हाँ! प्रक्रिया थोड़ी अलग है क्योंकि वेस्टिंग कॉन्ट्रैक्ट्स आवश्यक L2 गैस के लिए आवश्यक ETH को फॉरवर्ड नहीं कर सकते, इसलिए आपको पहले ही इसे जमा करना होगा। यदि आपका वेस्टिंग कॉन्ट्रैक्ट पूरी तरह से वेस्ट नहीं होता है, तो आपको पहले L2 पर एक समकक्ष वेस्टिंग कॉन्ट्रैक्ट को प्रारंभ करना होगा और आप केवल इस L2 वेस्टिंग कॉन्ट्रैक्ट पर डेलीगेशन को हस्तांतरित कर सकेंगे। जब आप वेस्टिंग लॉक वॉलेट का उपयोग करके एक्सप्लोरर से जुड़ते हैं, तो यह प्रक्रिया आपको एक्सप्लोरर पर कनेक्ट करने के लिए गाइड कर सकती है। -2. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -3. आर्बिट्रम पर हिस्सेदारी हस्तांतरण की पुष्टि करें +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -\*ध्यान दें कि आपको 7 दिनों के भीतर स्थानांतरण की पुष्टि करनी होगी अन्यथा आपकी हिस्सेदारी खो सकती है। ज्यादातर मामलों में, यह चरण स्वचालित रूप से चलेगा, लेकिन आर्बिट्रम पर गैस की कीमत बढ़ने पर मैन्युअल पुष्टि की आवश्यकता हो सकती है। यदि इस प्रक्रिया के दौरान कोई समस्या आती है, तो मदद के लिए संसाधन मौजूद होंगे: support@thegraph.com पर या [Discord](https://discord.gg/graphprotocol) पर सहायता से संपर्क करें। +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -## क्या मेरी सारी हिस्सेदारी हस्तांतरित हो जाएगी? +### क्या कोई प्रतिनिधिमंडल कर है? -आप चुन सकते हैं कि आपको अपनी कितनी हिस्सेदारी हस्तांतरित करनी है। यदि आप अपनी सारी हिस्सेदारी एक साथ हस्तांतरित करना चुनते हैं, तो आपको पहले किसी भी खुले आवंटन को बंद करना होगा। +नहीं, L2 पर प्राप्त टोकनों को निर्दिष्ट इंडेक्सर की ओर से निर्दिष्ट डेलीगेटर के प्रतिनिधि रूप में डेलीगेट किया जाता है और डेलीगेशन टैक्स का कोई भुगतान नहीं होता है। -यदि आप अपनी हिस्सेदारी के कुछ हिस्सों को कई लेनदेन में स्थानांतरित करने की योजना बना रहे हैं, तो आपको हमेशा एक ही लाभार्थी का पता निर्दिष्ट करना होगा। +### Will my unrealized rewards be transferred when I transfer my delegation? -ध्यान दें: जब आप पहली बार ट्रांसफर टूल का उपयोग करते हैं तो आपको L2 पर न्यूनतम हिस्सेदारी आवश्यकताओं को पूरा करना होगा। इंडेक्सर्स को न्यूनतम 100k GRT भेजना होगा (पहली बार इस फ़ंक्शन को कॉल करते समय)। यदि हिस्सेदारी का एक हिस्सा एल1 पर छोड़ा जाता है, तो यह न्यूनतम 100k जीआरटी से अधिक होना चाहिए और आपके खुले आवंटन को कवर करने के लिए पर्याप्त (आपके प्रतिनिधिमंडलों के साथ) होना चाहिए। +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -## आर्बिट्रम में अपनी हिस्सेदारी हस्तांतरण की पुष्टि करने के लिए मेरे पास कितना समय है? +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -\*\*\* आर्बिट्रम पर हिस्सेदारी हस्तांतरण को पूरा करने के लिए आपको अपने लेनदेन की पुष्टि करनी होगी। यह चरण 7 दिनों के भीतर पूरा किया जाना चाहिए अन्यथा हिस्सेदारी खो सकती है। +### Is moving delegations to L2 mandatory? Is there a deadline? -## यदि मेरे पास खुला आवंटन हो तो क्या होगा? +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -यदि आप अपनी पूरी हिस्सेदारी नहीं भेज रहे हैं, तो एल2 ट्रांसफर टूल सत्यापित करेगा कि एथेरियम मेननेट में कम से कम 100k जीआरटी शेष है और आपकी शेष हिस्सेदारी और प्रतिनिधिमंडल किसी भी खुले आवंटन को कवर करने के लिए पर्याप्त है। यदि आपका जीआरटी शेष न्यूनतम + खुले आवंटन को कवर नहीं करता है तो आपको खुले आवंटन बंद करने की आवश्यकता हो सकती है। +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -## ट्रांसफर टूल का उपयोग करते हुए, क्या ट्रांसफर करने से पहले एथेरियम मेननेट पर स्टेक हटाने के लिए 28 दिनों तक इंतजार करना आवश्यक है? +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -नहीं, आप अपनी हिस्सेदारी तुरंत L2 में स्थानांतरित कर सकते हैं, स्थानांतरण टूल का उपयोग करने से पहले हिस्सेदारी हटाने और प्रतीक्षा करने की कोई आवश्यकता नहीं है। 28 दिन की प्रतीक्षा केवल तभी लागू होती है जब आप एथेरियम मेननेट या एल2 पर हिस्सेदारी को अपने वॉलेट में वापस लेना चाहते हैं। +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## मेरी हिस्सेदारी हस्तांतरित करने में कितना समय लगेगा? +### I don't see a button to transfer my delegation. Why is that? -L2 ट्रांसफर टूल को आपकी हिस्सेदारी को पूरा करने में लगभग 20 मिनट का समय लगेगा। +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -## क्या मुझे अपनी हिस्सेदारी हस्तांतरित करने से पहले आर्बिट्रम पर इंडेक्स करना होगा? +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -आप अनुक्रमण स्थापित करने से पहले अपनी हिस्सेदारी को प्रभावी ढंग से स्थानांतरित कर सकते हैं, लेकिन जब तक आप L2 पर सबग्राफ को आवंटित नहीं करते, उन्हें अनुक्रमित नहीं करते, और POI प्रस्तुत नहीं करते, तब तक आप L2 पर किसी भी पुरस्कार का दावा नहीं कर पाएंगे। +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## क्या मेरे अनुक्रमण हिस्सेदारी को स्थानांतरित करने से पहले प्रतिनिधि अपना प्रतिनिधिमंडल स्थानांतरित कर सकते हैं? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -नहीं, प्रतिनिधियों को अपने प्रत्यायोजित जीआरटी को आर्बिट्रम में स्थानांतरित करने के लिए, जिस इंडेक्सर को वे प्रत्यायोजित कर रहे हैं उसे एल2 पर सक्रिय होना चाहिए। +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -## यदि मैं जीआरटी वेस्टिंग अनुबंध/टोकन लॉक वॉलेट का उपयोग कर रहा हूं तो क्या मैं अपनी हिस्सेदारी हस्तांतरित कर सकता हूं? +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -हाँ! प्रक्रिया थोड़ी अलग है, क्योंकि निहित अनुबंध एल2 गैस के भुगतान के लिए आवश्यक ईटीएच को अग्रेषित नहीं कर सकते हैं, इसलिए आपको इसे पहले ही जमा करना होगा। यदि आपका निहित अनुबंध पूरी तरह से निहित नहीं है, तो आपको पहले L2 पर एक समकक्ष निहित अनुबंध शुरू करना होगा और केवल इस L2 निहित अनुबंध में हिस्सेदारी स्थानांतरित करने में सक्षम होंगे। जब आप वेस्टिंग लॉक वॉलेट का उपयोग करके एक्सप्लोरर से कनेक्ट होते हैं तो एक्सप्लोरर पर यूआई इस प्रक्रिया में आपका मार्गदर्शन कर सकता है। +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -## डैलिगेटर +## क्यूरेशन सिग्नल + +### मैं अपना क्यूरेशन कैसे स्थानांतरित करूं? -## मैं अपना प्रतिनिधिमंडल कैसे स्थानांतरित करूं? +तुमचे क्युरेशन हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: -अपना प्रतिनिधिमंडल स्थानांतरित करने के लिए, आपको निम्नलिखित चरण पूरे करने होंगे: +1. एथेरियम मेननेट पर सिग्नल ट्रांसफर शुरू करें -1. एथेरियम मेननेट पर प्रतिनिधिमंडल स्थानांतरण आरंभ करें +2. L2 क्यूरेटर पता निर्दिष्ट करें\* -2. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +3. पुष्टि के लिए 20 मिनट का इंतजार करें: -3. आर्बिट्रम पर प्रतिनिधिमंडल स्थानांतरण की पुष्टि करें +\*यदि आवश्यक हो - अर्थात्, आप एक कॉन्ट्रैक्ट पते का उपयोग कर रहे हैं | -\*\*\*\*आर्बिट्रम पर प्रतिनिधिमंडल हस्तांतरण को पूरा करने के लिए आपको लेनदेन की पुष्टि करनी होगी। यह चरण 7 दिनों के भीतर पूरा किया जाना चाहिए अन्यथा प्रतिनिधिमंडल खो सकता है। ज्यादातर मामलों में, यह चरण स्वचालित रूप से चलेगा, लेकिन आर्बिट्रम पर गैस की कीमत बढ़ने पर मैन्युअल पुष्टि की आवश्यकता हो सकती है। यदि इस प्रक्रिया के दौरान कोई समस्या आती है, तो मदद के लिए संसाधन मौजूद होंगे: support@thegraph.com पर या [Discord](https://discord.gg/graphprotocol) पर सहायता से संपर्क करें। +### मी क्युरेट केलेला सबग्राफ L2 वर गेला असल्यास मला कसे कळेल? -## यदि मैं एथेरियम मेननेट पर खुले आवंटन के साथ स्थानांतरण शुरू करता हूं तो मेरे पुरस्कारों का क्या होगा? +सबग्राफ विवरण पृष्ठ को देखते समय, एक बैनर आपको सूचित करेगा कि यह सबग्राफ अंतरण किया गया है। आप प्रोंप्ट का पालन करके अपने क्यूरेशन को अंतरण कर सकते हैं। आप इस जानकारी को भी उन सभी सबग्राफों के विवरण पृष्ठ पर पा सकते हैं जिन्होंने अंतरण किया है। -यदि जिस इंडेक्सर को आप सौंप रहे हैं वह अभी भी एल1 पर काम कर रहा है, तो जब आप आर्बिट्रम में स्थानांतरित होते हैं तो आप एथेरियम मेननेट पर खुले आवंटन से किसी भी प्रतिनिधिमंडल पुरस्कार को जब्त कर लेंगे। इसका मतलब यह है कि आप अधिकतम 28 दिनों की अवधि से पुरस्कार खो देंगे। यदि आप अनुक्रमणिका द्वारा आवंटन बंद करने के ठीक बाद स्थानांतरण का समय तय करते हैं तो आप यह सुनिश्चित कर सकते हैं कि यह न्यूनतम संभव राशि है। यदि आपके पास अपने अनुक्रमणिका(ओं) के साथ संचार चैनल है, तो अपना स्थानांतरण करने के लिए सबसे अच्छा समय खोजने के लिए उनके साथ चर्चा करने पर विचार करें। +### अगर मैं अपनी संरचना को L2 में स्थानांतरित करना नहीं चाहता हूँ तो क्या होगा? -## यदि मैं जिस इंडेक्सर को वर्तमान में सौंप रहा हूं वह आर्बिट्रम वन पर नहीं है तो क्या होगा? +जब एक सबग्राफ पुराना होता है, तो आपके पास सिग्नल वापस लेने का विकल्प होता है। उसी तरह, अगर कोई सबग्राफ L2 पर चल रहा है, तो आपको चुनने का विकल्प होता है कि क्या आप ईथेरियम मेननेट से सिग्नल वापस लेना चाहेंगे या सिग्नल को L2 पर भेजें। -L2 ट्रांसफर टूल केवल तभी सक्षम किया जाएगा जब आपने जिस इंडेक्सर को सौंपा है, उसने अपनी हिस्सेदारी आर्बिट्रम में स्थानांतरित कर दी है। +### माझे क्युरेशन यशस्वीरित्या हस्तांतरित झाले हे मला कसे कळेल? -## क्या प्रतिनिधि किसी अन्य अनुक्रमणिका को सौंप सकते हैं +एल2 स्थानांतरण उपकरण को प्रारंभ करने के बाद, सिग्नल विवरण एक्सप्लोरर के माध्यम से लगभग 20 मिनट के बाद उपलब्ध होंगे। -यदि आप किसी अन्य इंडेक्सर को प्रत्यायोजित करना चाहते हैं, तो आप आर्बिट्रम पर उसी इंडेक्सर को स्थानांतरित कर सकते हैं, फिर प्रत्यायोजित करें और विगलन अवधि की प्रतीक्षा करें। इसके बाद, आप प्रत्यायोजित करने के लिए किसी अन्य सक्रिय अनुक्रमणिका का चयन कर सकते हैं। +### क्या मैं एक समय पर एक से अधिक सबग्राफ पर अपनी संरचना को स्थानांतरित कर सकता हूँ? + +वर्तमान में कोई थोक स्थानांतरण विकल्प उपलब्ध नहीं है। + +## इंडेक्सर हिस्सेदारी -## यदि मुझे L2 पर वह इंडेक्सर नहीं मिल रहा है जिसे मैं सौंप रहा हूँ तो क्या होगा? +### मैं अपनी स्थानांतरण को कैसे आर्बिट्रम में स्थानांतरित कर सकता हूँ? -L2 ट्रांसफर टूल स्वचालित रूप से उस इंडेक्सर का पता लगाएगा जिसे आपने पहले सौंपा था। +> अस्वीकरण: यदि आप अपने इंडेक्सर पर ग्राफ टोकन का कोई हिस्सा अनस्थित कर रहे हैं, तो आप L2 ट्रांसफर टूल का उपयोग नहीं कर सकेंगे। -## क्या मैं पूर्व इंडेक्सर के बजाय नए या कई इंडेक्सर्स में अपने प्रतिनिधिमंडल को मिश्रित और मिलान या 'फैला' सकूंगा? + -L2 स्थानांतरण उपकरण हमेशा आपके प्रतिनिधिमंडल को उसी इंडेक्सर पर ले जाएगा जिसे आपने पहले सौंपा था। एक बार जब आप एल2 में चले जाते हैं, तो आप प्रत्यायोजन को रद्द कर सकते हैं, पिघलने की अवधि की प्रतीक्षा कर सकते हैं, और निर्णय ले सकते हैं कि क्या आप अपने प्रतिनिधिमंडल को विभाजित करना चाहते हैं। +अपने स्टेक को स्थानांतरित करने के लिए, आपको निम्नलिखित चरणों को पूरा करने होंगे: -## क्या मैं कूलडाउन अवधि के अधीन हूं या क्या मैं एल2 डेलिगेशन ट्रांसफर टूल का उपयोग करने के तुरंत बाद वापस ले सकता हूं? +1. ईथेरियम मेननेट पर स्थानांतरण प्रारंभ करें: -स्थानांतरण उपकरण आपको तुरंत L2 पर जाने की अनुमति देता है। यदि आप प्रत्यायोजित करना चाहते हैं तो आपको पिघलने की अवधि तक इंतजार करना होगा। हालाँकि, यदि किसी इंडेक्सर ने अपनी सारी हिस्सेदारी L2 में स्थानांतरित कर दी है, तो आप एथेरियम मेननेट पर तुरंत निकासी कर सकते हैं। +2. पुष्टि के लिए 20 मिनट का इंतजार करें: -## यदि मैं अपना प्रतिनिधिमंडल स्थानांतरित नहीं करता तो क्या मेरे पुरस्कारों पर नकारात्मक प्रभाव पड़ सकता है? +3. आर्बिट्रम पर स्थानांतरण की पुष्टि करें: -यह अनुमान है कि भविष्य में सभी नेटवर्क भागीदारी आर्बिट्रम वन में स्थानांतरित हो जाएगी। +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## मेरे प्रतिनिधिमंडल का L2 में स्थानांतरण पूरा होने में कितना समय लगेगा? +### क्या मेरा सम्पूर्ण स्थानांतरण हो जाएगा? -प्रतिनिधिमंडल स्थानांतरण के लिए 20 मिनट की पुष्टि आवश्यक है। कृपया ध्यान दें कि 20 मिनट की अवधि के बाद, आपको वापस आना होगा और 7 दिनों के भीतर स्थानांतरण प्रक्रिया के चरण 3 को पूरा करना होगा। यदि आप ऐसा करने में विफल रहते हैं, तो आपका प्रतिनिधिमंडल खो सकता है। ध्यान दें कि अधिकांश मामलों में स्थानांतरण उपकरण आपके लिए यह चरण स्वचालित रूप से पूरा कर देगा। असफल ऑटो-प्रयास के मामले में, आपको इसे मैन्युअल रूप से पूरा करना होगा। यदि इस प्रक्रिया के दौरान कोई समस्या आती है, तो चिंता न करें, हम मदद के लिए यहां मौजूद रहेंगे: support@thegraph.com पर या [Discord](https://discord.gg/graphprotocol) पर हमसे संपर्क करें। +आप यह चुन सकते हैं कि आपके स्टेक का कितना हिस्सा हस्तांतरित करना चाहते हैं। अगर आप एक साथ अपने पूरे स्टेक को हस्तांतरित करने का चयन करते हैं, तो आपको पहले किसी भी खुली आवंटन को बंद करने की आवश्यकता होगी। -## यदि मैं जीआरटी वेस्टिंग अनुबंध/टोकन लॉक वॉलेट का उपयोग कर रहा हूं तो क्या मैं अपना प्रतिनिधिमंडल स्थानांतरित कर सकता हूं? +अगर आप अपने स्टेक के भिन्न भिन्न हिस्सों को कई लेन-देनों के माध्यम से हस्तांतरित करने की योजना बना रहे हैं, तो आपको हमेशा एक ही प्राप्ति कर्ता पता निर्दिष्ट करना आवश्यक होगा। -हाँ! प्रक्रिया थोड़ी अलग है क्योंकि निहित अनुबंध एल2 गैस के भुगतान के लिए आवश्यक ईटीएच को अग्रेषित नहीं कर सकते हैं, इसलिए आपको इसे पहले ही जमा करना होगा। यदि आपका निहित अनुबंध पूरी तरह से निहित नहीं है, तो आपको पहले L2 पर एक समकक्ष निहित अनुबंध शुरू करना होगा और केवल इस L2 निहित अनुबंध में प्रतिनिधिमंडल को स्थानांतरित करने में सक्षम होंगे। जब आप वेस्टिंग लॉक वॉलेट का उपयोग करके एक्सप्लोरर से कनेक्ट होते हैं तो एक्सप्लोरर पर यूआई इस प्रक्रिया में आपका मार्गदर्शन कर सकता है। +\*ध्यान दें: आपको पहली बार ट्रांसफर टूल का उपयोग करते समय L2 पर न्यूनतम स्टेक आवश्यकताओं को पूरा करना होगा। इंडेक्सर्स को न्यूनतम 100k GRT भेजना होगा (जब वे पहली बार इस संचालन को बुलाते हैं)। यदि L1 पर स्टेक का कुछ हिस्सा छोड़ रहे हैं, तो यह भी 100k GRT की न्यूनतम आवश्यकता को पूरा करना चाहिए और आपके खुले आवंटनों को कवर करने के लिए पर्याप्त होना चाहिए (आपके डेलीगेशन के साथ मिलकर)। -## क्या कोई प्रतिनिधिमंडल कर है? +### आर्बिट्रममध्ये माझे स्टेक हस्तांतरण निश्चित करण्यासाठी मला किती वेळ लागेल? -नहीं, L2 पर प्राप्त टोकन निर्दिष्ट प्रतिनिधि की ओर से प्रतिनिधिमंडल कर लगाए बिना निर्दिष्ट इंडेक्सर को सौंप दिए जाते हैं। +\*\*\* आपको आर्बिट्रम पर स्थानांतरण पूरा करने के लिए अपने लेन-देन की पुष्टि करनी होगी। यह कदम 7 दिन के भीतर पूरा किया जाना चाहिए, अन्यथा स्थान खो सकता है। -## वेस्टिंग अनुबंध स्थानांतरण +### अगर मेरे पास खुली आवंटनें हैं तो क्या होगा? -## मैं अपना वेस्टिंग अनुबंध कैसे स्थानांतरित करूं? +अगर आप अपना पूरा स्टेक नहीं भेज रहे हैं, तो L2 ट्रांसफर टूल यह सत्यापित करेगा कि Ethereum मुख्यनेट में कम से कम 100k GRT बचे रहें और आपके बचे हुए स्टेक और डेलीगेशन खुले आवंटनों को कवर करने के लिए पर्याप्त हैं। अगर आपका GRT शेष राशि न्यूनतम + खुले आवंटनों को कवर नहीं करता है, तो आपको खुले आवंटनों को बंद करने की आवश्यकता हो सकती है। -अपना वेस्टिंग ट्रांसफर करने के लिए, आपको निम्नलिखित चरण पूरे करने होंगे: +### हस्तांतरण साधने वापरून, हस्तांतरण करण्यापूर्वी Ethereum mainnet वर अनस्टेक करण्यासाठी 28 दिवस प्रतीक्षा करणे आवश्यक आहे का? -1. एथेरियम मेननेट पर वेस्टिंग हस्तांतरण आरंभ करें +नहीं, आप अपने स्टेक को तुरंत L2 पर हस्तांतरित कर सकते हैं, ट्रांसफर टूल का उपयोग करने से पहले अनस्थित करने और प्रतीक्षा करने की कोई आवश्यकता नहीं है। 28-दिन की प्रतीक्षा केवल तब लागू होती है अगर आप अपने स्टेक को वापस अपने वॉलेट पर, Ethereum मुख्यनेट या L2 पर वापस लेना चाहते हैं। -2. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +### स्थान को स्थानांतरित करने में कितना समय लगेगा? -3. आर्बिट्रम पर निहित हस्तांतरण की पुष्टि करें +L2 ट्रान्स्फर टूलला तुमचा स्टेक हस्तांतरित करणे पूर्ण होण्यासाठी अंदाजे 20 मिनिटे लागतील. -## यदि मैं केवल आंशिक रूप से निहित हूं तो मैं अपने निहितीकरण अनुबंध को कैसे स्थानांतरित करूं? +### मी माझा हिस्सा हस्तांतरित करण्यापूर्वी मला आर्बिट्रमवर इंडेक्स करावे लागेल का? -1. ट्रांसफर टूल के संबंधित अनुबंध में कुछ ईथर जमा करें (यूआई एक संभावित राशि का अनुमान लगा सकता है)। +आप पहले ही अपने स्टेक को प्रभावी रूप से हस्तांतरित कर सकते हैं, लेकिन आप L2 पर किसी भी पुरस्कार का दावा नहीं कर पाएंगे जब तक आप L2 पर सबग्राफ्स को आवंटित नहीं करते हैं, उन्हें इंडेक्स करते हैं, और पॉइंट ऑफ इंटरेस्ट (POI) प्रस्तुत नहीं करते। -2. ट्रांसफर टूल अनुबंध के माध्यम से कुछ लॉक्ड GRT को L2 में भेजें, L2 वेस्टिंग लॉक को प्रारंभ करने के लिए। इसके साथ ही उनका L2 लाभार्थी पता भी सेट हो जाएगा। +### मी माझा इंडेक्सिंग स्टेक हलवण्यापूर्वी प्रतिनिधी त्यांचे प्रतिनिधी हलवू शकतात का? -3. L1स्टेकिंग अनुबंध में "लॉक" ट्रांसफर टूल फ़ंक्शंस के माध्यम से अपनी हिस्सेदारी/प्रतिनिधिमंडल L2 को भेजें। +नहीं, डेलीगेटर्स को अपने डेलीगेटेड GRT को Arbitrum पर हस्तांतरित करने के लिए, उनके डेलीगेट कर रहे इंडेक्सर को L2 पर सक्रिय होना चाहिए। -4. ट्रांसफर टूल अनुबंध से किसी भी शेष ईटीएच को वापस ले लें +### मी GRT वेस्टिंग कॉन्ट्रॅक्ट/टोकन लॉक वॉलेट वापरत असल्यास मी माझा स्टेक ट्रान्सफर करू शकतो का? -## यदि मैं पूरी तरह से निहित हूं तो मैं अपने निहित अनुबंध को कैसे स्थानांतरित करूं? +हाँ! प्रक्रिया कुछ अलग है, क्योंकि वेस्टिंग कॉन्ट्रैक्ट्स L2 गैस के लिए आवश्यक ETH को फॉरवर्ड नहीं कर सकते, इसलिए आपको पहले ही इसे जमा करना होगा। यदि आपका वेस्टिंग कॉन्ट्रैक्ट पूरी तरह से वेस्ट नहीं होता है, तो आपको पहले L2 पर एक समकक्ष वेस्टिंग कॉन्ट्रैक्ट को प्रारंभ करना होगा और आपको केवल इस L2 वेस्टिंग कॉन्ट्रैक्ट पर स्टेक को हस्तांतरित करने की अनुमति होगी। जब आप वेस्टिंग लॉक वॉलेट का उपयोग करके एक्सप्लोरर से जुड़ते हैं, तो यह प्रक्रिया आपको एक्सप्लोरर पर कनेक्ट करने के लिए गाइड कर सकती है। + +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? + +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ + +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? + +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. + +## निहित अनुबंध स्थानांतरण + +### मैं अपना वेस्टिंग अनुबंध कैसे ट्रांसफर करूं? + +अपने वेस्टिंग को ट्रांसफर करने के लिए, आपको निम्नलिखित चरणों को पूरा करने की आवश्यकता होगी: + +1. ईथिरियम मेननेट पर वेस्टिंग ट्रांसफर प्रारंभ करें। + +2. पुष्टि के लिए 20 मिनट का इंतजार करें: + +3. आर्बिट्रम पर वेस्टिंग ट्रांसफर की पुष्टि करें। + +### यदि मैं केवल आंशिक रूप से निहित हूं तो मैं अपना निहित अनुबंध कैसे स्थानांतरित करूं? + + + +1. स्थानांतरण उपकरण अनुबंध में कुछ ईटीएच जमा करें (यूआई उचित राशि का अनुमान लगाने में मदद कर सकता है) + +2. L2 वेस्टिंग लॉक को आरंभ करने के लिए ट्रांसफर टूल अनुबंध के माध्यम से कुछ लॉक किए गए GRT को L2 पर भेजें। इससे उनका L2 लाभार्थी पता भी सेट हो जाएगा। + +3. "लॉक्ड" स्थानांतरण उपकरण कार्यों के माध्यम से उनका स्टेक/सहायकता एल2 में "L1Staking" अनुबंध के माध्यम से भेजें। + +4. ट्रांसफर उपकरण अनुबंध से शेषित ETH को निकालें। + +### यदि मैं पूरी तरह से निहित हूं तो मैं अपने निहित अनुबंध को कैसे स्थानांतरित करूं? + + जो लोग पूरी तरह से निहित हैं, उनके लिए प्रक्रिया समान है: -1. ट्रांसफर टूल के संबंधित अनुबंध में कुछ ईथर जमा करें (यूआई एक संभावित राशि का अनुमान लगा सकता है)। +1. स्थानांतरण उपकरण अनुबंध में कुछ ईटीएच जमा करें (यूआई उचित राशि का अनुमान लगाने में मदद कर सकता है) -2. ट्रांसफर टूल अनुबंध पर कॉल के साथ अपना L2 पता सेट करें +2. ट्रांसफर उपकरण अनुबंध को कॉल करके अपना L2 पता सेट करें। -3. L1 स्टेकिंग अनुबंध में "लॉक" ट्रांसफर टूल फ़ंक्शंस के माध्यम से अपनी हिस्सेदारी/प्रतिनिधिमंडल L2 को भेजें। +3. आपके स्टेक/डिलीगेशन को "लॉक्ड" ट्रांसफर टूल के फ़ंक्शन के माध्यम से एल1 स्टेकिंग अनुबंध में एल2 में भेज दें। -4. ट्रांसफर टूल अनुबंध से किसी भी शेष ईटीएच को वापस ले लें +4. ट्रांसफर उपकरण अनुबंध से शेषित ETH को निकालें। -## क्या मैं अपना अंतर्निहित अनुबंध आर्बिट्रम में स्थानांतरित कर सकता हूँ? +### क्या मैं अपने निहित अनुबंध को आर्बिट्रम में स्थानांतरित कर सकता हूँ? -आप अपने निहित अनुबंध के जीआरटी शेष को एल2 में निहित अनुबंध में स्थानांतरित कर सकते हैं। यह आपके निहित अनुबंध से हिस्सेदारी या प्रतिनिधिमंडल को एल2 में स्थानांतरित करने के लिए एक शर्त है। निहित अनुबंध में जीआरटी की एक गैर-शून्य राशि होनी चाहिए (यदि आवश्यक हो तो आप इसमें 1 जीआरटी जैसी छोटी राशि स्थानांतरित कर सकते हैं)। +आप अपने वेस्टिंग कॉन्ट्रैक्ट के GRT शेष राशि को L2 में एक वेस्टिंग कॉन्ट्रैक्ट में हस्तांतरित कर सकते हैं। यह आपके वेस्टिंग कॉन्ट्रैक्ट से L2 पर स्टेक या डेलीगेशन को हस्तांतरित करने के लिए एक पूर्विशेष आवश्यकता है। वेस्टिंग कॉन्ट्रैक्ट को एक गैरशून्य राशि GRT को धारण करना चाहिए (यदि आवश्यक हो तो आप उसमें 1 GRT जैसी छोटी राशि को हस्तांतरित कर सकते हैं)। -जब आप अपने L1 निहित अनुबंध से GRT को L2 में स्थानांतरित करते हैं, तो आप भेजने के लिए राशि चुन सकते हैं और आप इसे जितनी बार चाहें उतनी बार कर सकते हैं। जब आप पहली बार जीआरटी स्थानांतरित करेंगे तो एल2 निहित अनुबंध आरंभ किया जाएगा। +जब आप अपने L1 वेस्टिंग कॉन्ट्रैक्ट से GRT को L2 में हस्तांतरित करते हैं, तो आप भेजने के लिए राशि का चयन कर सकते हैं और आप इसे जितनी बार चाहें कर सकते हैं। L2 वेस्टिंग कॉन्ट्रैक्ट को पहली बार जब आप GRT को हस्तांतरित करते हैं, तो इसे आरंभ किया जाएगा | -स्थानांतरण एक ट्रांसफर टूल का उपयोग करके किया जाता है जो आपके वेस्टिंग अनुबंध खाते से जुड़ने पर आपके एक्सप्लोरर प्रोफ़ाइल पर दिखाई देगा। +स्थानांतरण एक ट्रांसफर टूल का उपयोग करके किया जाता है जो आपके एक्सप्लोरर प्रोफ़ाइल पर तब दिखाई देगा जब आप वेस्टिंग अनुबंध खाते से जुड़ेंगे। -कृपया ध्यान दें कि जब आपका अनुबंध पूरी तरह से निहित हो जाता है, तब तक आप अपनी निहित समयसीमा के अंत तक L2 निहित अनुबंध से जीआरटी जारी/निकासी नहीं कर पाएंगे। यदि आपको उससे पहले जीआरटी जारी करने की आवश्यकता है, तो आप उस उद्देश्य के लिए उपलब्ध किसी अन्य स्थानांतरण उपकरण का उपयोग करके जीआरटी को एल1 निहित अनुबंध में वापस स्थानांतरित कर सकते हैं। +कृपया ध्यान दें कि आप अपने L2 वेस्टिंग कॉन्ट्रैक्ट से GRT को जब तक आपका कॉन्ट्रैक्ट पूरी तरह से वेस्ट नहीं होता है, तब तक रिलीज/विद्रोहित करने/निकालने के लिए नहीं कर सकेंगे। यदि आपको इससे पहले GRT को रिलीज करने की आवश्यकता है, तो आप उसके लिए उपलब्ध एक अन्य ट्रांसफर टूल का उपयोग करके GRT को L1 वेस्टिंग कॉन्ट्रैक्ट में वापस हस्तांतरित कर सकते हैं। -यदि आपने किसी भी निहित अनुबंध शेष को L2 में स्थानांतरित नहीं किया है, और आपका निहित अनुबंध पूरी तरह से निहित है, तो आपको अपने निहित अनुबंध को L2 में स्थानांतरित नहीं करना चाहिए। इसके बजाय, आप L2 वॉलेट पता सेट करने के लिए ट्रांसफर टूल का उपयोग कर सकते हैं, और अपनी हिस्सेदारी या प्रतिनिधिमंडल को सीधे L2 पर इस नियमित वॉलेट में स्थानांतरित कर सकते हैं। +अगर आपने किसी भी वेस्टिंग कॉन्ट्रैक्ट शेष राशि को L2 में हस्तांतरित नहीं किया है, और आपका वेस्टिंग कॉन्ट्रैक्ट पूरी तरह से वेस्ट हो गया है, तो आपको अपने वेस्टिंग कॉन्ट्रैक्ट को L2 में हस्तांतरित नहीं करना चाहिए। इसके बजाय, आप ट्रांसफर टूल का उपयोग करके एक L2 वॉलेट पता सेट करने और इस सामान्य वॉलेट पर L2 पर स्टेक या डेलीगेशन को सीधे हस्तांतरित कर सकते हैं। -## मैं मेननेट पर हिस्सेदारी के लिए अपने निहित अनुबंध का उपयोग कर रहा हूं। क्या मैं अपनी हिस्सेदारी आर्बिट्रम में स्थानांतरित कर सकता हूँ? +### मैं मेननेट पर हिस्सेदारी के लिए अपने निहित अनुबंध का उपयोग कर रहा हूं। क्या मैं अपनी हिस्सेदारी आर्बिट्रम में स्थानांतरित कर सकता हूँ | -हाँ, लेकिन यदि आपका अनुबंध अभी भी निहित है, तो आप केवल हिस्सेदारी हस्तांतरित कर सकते हैं ताकि यह आपके L2 निहित अनुबंध के स्वामित्व में हो। आपको पहले एक्सप्लोरर पर वेस्टिंग कॉन्ट्रैक्ट ट्रांसफर टूल का उपयोग करके कुछ जीआरटी बैलेंस ट्रांसफर करके इस एल2 कॉन्ट्रैक्ट को आरंभ करना होगा। यदि आपका अनुबंध पूरी तरह से निहित है, तो आप अपनी हिस्सेदारी एल2 में किसी भी पते पर स्थानांतरित कर सकते हैं, लेकिन आपको इसे पहले से सेट करना होगा और एल2 गैस के भुगतान के लिए एल2 ट्रांसफर टूल के लिए कुछ ईटीएच जमा करना होगा। +हां, लेकिन यदि आपका कॉन्ट्रैक्ट अब भी वेस्टिंग कर रहा है, तो आप केवल उस स्टेक को हस्तांतरित कर सकते हैं ताकि यह आपके L2 वेस्टिंग कॉन्ट्रैक्ट के स्वामित्व में हो। आपको सबसे पहले इस L2 कॉन्ट्रैक्ट को इनिशियलाइज़ करना होगा, एक्सप्लोरर पर वेस्टिंग कॉन्ट्रैक्ट ट्रांसफर टूल का उपयोग करके कुछ GRT बैलेंस को हस्तांतरित करके। यदि आपका कॉन्ट्रैक्ट पूरी तरह से वेस्टेड है, तो आप अपने स्टेक को L2 में किसी भी पते पर हस्तांतरित कर सकते हैं, लेकिन आपको पहले से सेट करना होगा और L2 गैस के लिए कुछ ETH जमा करना होगा ताकि L2 ट्रांसफर टूल इसे भुगतान कर सके। -## मैं मेननेट पर प्रत्यायोजित करने के लिए अपने निहित अनुबंध का उपयोग कर रहा हूं। क्या मैं अपने प्रतिनिधिमंडलों को आर्बिट्रम में स्थानांतरित कर सकता हूँ? +### मैं अपने वेस्टिंग अनुबंध का उपयोग मुख्य नेट पर प्रतिनियुक्ति के लिए कर रहा हूँ। क्या मैं अपनी प्रतिनियुक्तियों को आर्बिट्रम पर स्थानांतरित कर सकता हूँ? -हाँ, लेकिन यदि आपका अनुबंध अभी भी निहित है, तो आप केवल प्रतिनिधिमंडल को स्थानांतरित कर सकते हैं ताकि यह आपके L2 निहित अनुबंध के स्वामित्व में हो। आपको पहले एक्सप्लोरर पर वेस्टिंग कॉन्ट्रैक्ट ट्रांसफर टूल का उपयोग करके कुछ जीआरटी बैलेंस ट्रांसफर करके इस एल2 कॉन्ट्रैक्ट को आरंभ करना होगा। यदि आपका अनुबंध पूरी तरह से निहित है, तो आप अपने प्रतिनिधिमंडल को एल2 में किसी भी पते पर स्थानांतरित कर सकते हैं, लेकिन आपको इसे पहले से सेट करना होगा और एल2 गैस के भुगतान के लिए एल2 ट्रांसफर टूल के लिए कुछ ईटीएच जमा करना होगा। +हाँ, लेकिन यदि आपका कॉन्ट्रैक्ट अब भी वेस्टिंग कर रहा है, तो आप केवल डेलीगेशन को हस्तांतरित कर सकते हैं ताकि यह आपके L2 वेस्टिंग कॉन्ट्रैक्ट के स्वामित्व में हो। आपको सबसे पहले इस L2 कॉन्ट्रैक्ट को इनिशियलाइज़ करना होगा, एक्सप्लोरर पर वेस्टिंग कॉन्ट्रैक्ट ट्रांसफर टूल का उपयोग करके कुछ GRT बैलेंस को हस्तांतरित करके। अगर आपका कॉन्ट्रैक्ट पूरी तरह से वेस्ट हो गया है, तो आप अपनी डेलीगेशन को L2 में किसी भी पते पर हस्तांतरित कर सकते हैं, लेकिन आपको पहले से सेट करना होगा और L2 गैस के लिए कुछ ETH जमा करना होगा ताकि L2 ट्रांसफर टूल इसे भुगतान कर सके। -## क्या मैं L2 पर अपने वेस्टिंग अनुबंध के लिए एक अलग लाभार्थी निर्दिष्ट कर सकता हूँ? +### क्या मैं L2 पर अपने निहित अनुबंध के लिए एक अलग लाभार्थी निर्दिष्ट कर सकता हूँ? -हां, जब आप पहली बार बैलेंस ट्रांसफर करते हैं और अपना L2 निहित अनुबंध सेट करते हैं, तो आप एक L2 लाभार्थी निर्दिष्ट कर सकते हैं। सुनिश्चित करें कि यह लाभार्थी एक वॉलेट है जो आर्बिट्रम वन पर लेनदेन कर सकता है, यानी यह एक ईओए या आर्बिट्रम वन पर तैनात मल्टीसिग होना चाहिए। +हाँ, पहली बार जब आप बैलेंस को हस्तांतरित करते हैं और अपना L2 वेस्टिंग कॉन्ट्रैक्ट सेट करते हैं, तो आप एक L2 लाभार्थी को निर्दिष्ट कर सकते हैं। सुनिश्चित करें कि इस लाभार्थी का एक वॉलेट है जो Arbitrum One पर लेनदेन कर सकता है, अर्थात् यह EOA या Arbitrum One पर डिप्लॉय किया गया मल्टिसिग होना चाहिए। -यदि आपका अनुबंध सम्पूर्ण रूप से प्रदान किया गया है, तो आप L2 पर एक वेस्टिंग अनुबंध स्थापित नहीं करेंगे; बल्कि, आप एक L2 वॉलेट पता सेट करेंगे और यह आरबिट्रम पर आपके स्टेक या डिलीगेशन के लिए ग्राहक वॉलेट होगा। +अगर आपका कॉन्ट्रैक्ट पूरी तरह से वेस्ट हो गया है, तो आप L2 पर वेस्टिंग कॉन्ट्रैक्ट सेट नहीं करेंगे; इसके बजाय, आप एक L2 वॉलेट पता सेट करेंगे और यह आपके स्टेक या डेलीगेशन के लिए Arbitrum पर विद्वेषण करने वाले वॉलेट का भुगतान करने वाला होगा। -## मेरा अनुबंध पूर्णतः निहित है. क्या मैं अपनी हिस्सेदारी या प्रतिनिधिमंडल को किसी अन्य पते पर स्थानांतरित कर सकता हूं जो L2 निहित अनुबंध नहीं है? +### मेरा अनुबंध पूरी तरह से निहित है. क्या मैं अपनी हिस्सेदारी या प्रतिनिधिमंडल को किसी अन्य पते पर स्थानांतरित कर सकता हूं जो L2 निहित अनुबंध नहीं है? -हाँ। यदि आपने किसी भी निहित अनुबंध शेष को L2 में स्थानांतरित नहीं किया है, और आपका निहित अनुबंध पूरी तरह से निहित है, तो आपको अपने निहित अनुबंध को L2 में स्थानांतरित नहीं करना चाहिए। इसके बजाय, आप L2 वॉलेट पता सेट करने के लिए ट्रांसफर टूल का उपयोग कर सकते हैं, और अपनी हिस्सेदारी या प्रतिनिधिमंडल को सीधे L2 पर इस नियमित वॉलेट में स्थानांतरित कर सकते हैं। +हाँ। अगर आपने L2 में कोई वेस्टिंग कॉन्ट्रैक्ट शेष राशि हस्तांतरित नहीं की है और आपका वेस्टिंग कॉन्ट्रैक्ट पूरी तरह से वेस्ट हो गया है, तो आपको अपने वेस्टिंग कॉन्ट्रैक्ट को L2 में हस्तांतरित नहीं करना चाहिए। इसके बजाय, आप ट्रांसफर टूल्स का उपयोग करके एक L2 वॉलेट पता सेट करने के लिए और स्टेक या डेलीगेशन को इस सामान्य वॉलेट पर सीधे L2 पर हस्तांतरित कर सकते हैं। यह आपको अपनी हिस्सेदारी या प्रतिनिधिमंडल को किसी भी L2 पते पर स्थानांतरित करने की अनुमति देता है। -## मेरा निहितीकरण अनुबंध अभी भी निहित है। मैं अपने निहित अनुबंध शेष को एल2 में कैसे स्थानांतरित करूं? +### मेरा वेस्टिंग कॉन्ट्रैक्ट अभी भी प्रतिनियुक्ति के अधीन है। मैं अपने वेस्टिंग कॉन्ट्रैक्ट शेष को L2 में कैसे स्थानांतरित करूँ? -ये चरण केवल तभी लागू होते हैं यदि आपका अनुबंध अभी भी निहित है, या यदि आपने पहले इस प्रक्रिया का उपयोग किया है जब आपका अनुबंध अभी भी निहित था। +ये कदम केवल तब लागू होते हैं अगर आपका कॉन्ट्रैक्ट अब भी वेस्टिंग कर रहा है, या यदि आपने अपने कॉन्ट्रैक्ट का उपयोग किया है जब आपका कॉन्ट्रैक्ट अब भी वेस्टिंग कर रहा था। -अपने वेस्टिंग अनुबंध को L2 पर स्थानांतरित करने के लिए, आप ट्रांसफर टूल का उपयोग करके किसी भी GRT शेष राशि को L2 पर भेजेंगे, जिससे आपका L2 वेस्टिंग अनुबंध प्रारंभ हो जाएगा: +अपने वेस्टिंग कॉन्ट्रैक्ट को L2 में हस्तांतरित करने के लिए, आप ट्रांसफर टूल्स का उपयोग करके L2 में किसी भी GRT बैलेंस को हस्तांतरित करेंगे, जिससे आपका L2 वेस्टिंग कॉन्ट्रैक्ट आरंभ होगा: -1. ट्रांसफर टूल अनुबंध में कुछ ETH जमा करें (इसका उपयोग L2 गैस के भुगतान के लिए किया जाएगा) +1. कृपया स्थानांतरण उपकरण अनुबंध में कुछ ईथर जमा करें (यह एल2 गैस के लिए भुगतान करने के लिए उपयोग किया जाएगा)। -2. वेस्टिंग अनुबंध तक प्रोटोकॉल पहुंच रद्द करें (अगले चरण के लिए आवश्यक) +2. निहित अनुबंध तक प्रोटोकॉल पहुंच रद्द करें (अगले चरण के लिए आवश्यक) -3. निहित अनुबंध तक प्रोटोकॉल पहुंच प्रदान करें (आपके अनुबंध को स्थानांतरण टूल के साथ इंटरैक्ट करने की अनुमति देगा) +3. वेस्टिंग अनुबंध को प्रोटोकॉल एक्सेस दें (यह आपके अनुबंध को स्थानांतरण उपकरण के साथ इंटरैक्ट करने की अनुमति देगा)। -4. एक L2 लाभार्थी पता निर्दिष्ट करें\* और एथेरियम मेननेट पर शेष राशि हस्तांतरण आरंभ करें +4. एक एल2 लाभार्थी पता निर्दिष्ट करें\* और इथेरियम मेननेट पर शेष राशि का ट्रांसफर प्रारंभ करें। -5. पुष्टि के लिए 20 मिनट तक प्रतीक्षा करें +5. पुष्टि के लिए 20 मिनट का इंतजार करें: 6. L2 पर बैलेंस ट्रांसफर की पुष्टि करें -\*यदि आवश्यक हो - यानी आप अनुबंध पते का उपयोग कर रहे हैं। +\*यदि आवश्यक हो - अर्थात्, आप एक कॉन्ट्रैक्ट पते का उपयोग कर रहे हैं | + +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. -\*\*\*\*आर्बिट्रम पर शेष राशि हस्तांतरण को पूरा करने के लिए आपको अपने लेनदेन की पुष्टि करनी होगी। यह चरण 7 दिनों के भीतर पूरा किया जाना चाहिए अन्यथा शेष राशि खो सकती है। ज्यादातर मामलों में, यह चरण स्वचालित रूप से चलेगा, लेकिन आर्बिट्रम पर गैस की कीमत बढ़ने पर मैन्युअल पुष्टि की आवश्यकता हो सकती है। यदि इस प्रक्रिया के दौरान कोई समस्या आती है, तो मदद के लिए संसाधन मौजूद होंगे: support@thegraph.com पर या [Discord](https://discord.gg/graphprotocol) पर सहायता से संपर्क करें। +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. -## क्या मैं अपने निहित अनुबंध को वापस L1 पर ले जा सकता हूँ? +### क्या मैं अपने निहित अनुबंध को वापस L1 पर ले जा सकता हूँ? -ऐसा करने की कोई आवश्यकता नहीं है क्योंकि आपका निहित अनुबंध अभी भी L1 में है। जब आप स्थानांतरण टूल का उपयोग करते हैं, तो आप बस L2 में एक नया अनुबंध बनाते हैं जो आपके L1 निहित अनुबंध से जुड़ा होता है, और आप दोनों के बीच जीआरटी को आगे और पीछे भेज सकते हैं। +ऐसा करने की कोई आवश्यकता नहीं है क्योंकि आपका वेस्टिंग कॉन्ट्रैक्ट अब भी L1 में है। जब आप ट्रांसफर टूल्स का उपयोग करते हैं, तो आप सिर्फ एक नया कॉन्ट्रैक्ट L2 में बनाते हैं जो आपके L1 वेस्टिंग कॉन्ट्रैक्ट से जुड़ा होता है, और आप उन दोनों के बीच GRT को आगे-पीछे भेज सकते हैं। -## शुरुआत में मुझे अपने निहित अनुबंध को आगे बढ़ाने की आवश्यकता क्यों है? +### पहले से मेरे वेस्टिंग कॉन्ट्रैक्ट को क्यों स्थानांतरित करने की आवश्यकता है? -आपको एक L2 निहित अनुबंध स्थापित करने की आवश्यकता है ताकि यह खाता L2 पर आपकी हिस्सेदारी या प्रतिनिधिमंडल का मालिक बन सके। अन्यथा, आपके पास निहित अनुबंध से "बचने" के बिना हिस्सेदारी/प्रतिनिधिमंडल को L2 में स्थानांतरित करने का कोई रास्ता नहीं होगा। +आपको एक L2 वेस्टिंग कॉन्ट्रैक्ट सेट करने की आवश्यकता है ताकि इस खाता को L2 पर आपके स्टेक या डेलीगेशन का स्वामित्व हो सके। अन्यथा, आपके पास वेस्टिंग कॉन्ट्रैक्ट को "बाहर निकलने" के बिना स्टेक/डेलीगेशन को L2 पर हस्तांतरित करने का कोई तरीका नहीं होगा। -## यदि मैं अपने अनुबंध को केवल आंशिक रूप से निहित होने पर भुनाने का प्रयास करता हूं तो क्या होगा? क्या यह संभव है? +### अगर मैं कोशिश करता हूँ कि जब मेरा अनुबंध केवल आंशिक रूप से प्रतिनियुक्त है, तो क्या होता है? क्या यह संभव है? -यह कोई संभावना नहीं है. आप धनराशि को वापस L1 में ले जा सकते हैं और वहां से निकाल सकते हैं। +यह एक संभावना नहीं है। आप फंड्स को L1 में वापस स्थानांतरित कर सकते हैं और वहां से उन्हें निकाल सकते हैं। -## यदि मैं अपने निहित अनुबंध को एल2 में स्थानांतरित नहीं करना चाहता तो क्या होगा? +### अगर मुझे अपने वेस्टिंग कॉन्ट्रैक्ट को L2 में स्थानांतरित नहीं करना है तो क्या होगा? -आप L1 पर स्टेकिंग/प्रतिनिधित्व जारी रख सकते हैं। समय के साथ, आप आर्बिट्रम पर प्रोटोकॉल स्केल के रूप में पुरस्कारों को सक्षम करने के लिए एल2 पर जाने पर विचार कर सकते हैं। ध्यान दें कि ये स्थानांतरण उपकरण निहित अनुबंधों के लिए हैं जिन्हें प्रोटोकॉल में दांव लगाने और सौंपने की अनुमति है। यदि आपका अनुबंध हिस्सेदारी या प्रत्यायोजन की अनुमति नहीं देता है, या रद्द करने योग्य है, तो कोई स्थानांतरण उपकरण उपलब्ध नहीं है। उपलब्ध होने पर भी आप L1 से अपना GRT निकाल सकेंगे। +आप L1 पर स्टेकिंग/डिलीगेशन करते रह सकते हैं। समय के साथ, प्रोटोकॉल Arbitrum पर स्केल करने के साथ-साथ आपको L2 पर रिवॉर्ड्स सक्षम करने के लिए वहां माइग्रेट करने का विचार करना चाहिए। यह ध्यान दें कि ये हस्तांतरण उपकरण उन वेस्टिंग कॉन्ट्रैक्ट्स के लिए हैं जिन्हें प्रोटोकॉल में स्टेकिंग और डिलीगेशन की अनुमति है। यदि आपका कॉन्ट्रैक्ट स्टेकिंग या डिलीगेशन की अनुमति नहीं देता है, या वापस लिया जा सकता है, तो उपकरण उपलब्ध नहीं होगा। आपको फिर भी जब भी उपलब्ध हो, अपने GRT को L1 से निकाल सकेंगे। diff --git a/website/pages/hi/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/hi/arbitrum/l2-transfer-tools-guide.mdx index 16c77fb0152d..8315c648f6a0 100644 --- a/website/pages/hi/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/hi/arbitrum/l2-transfer-tools-guide.mdx @@ -1,144 +1,144 @@ --- -title: L2 स्थानांतरण उपकरण गाइड +title: L2 Transfer Tools Guide --- -> L2 स्थानांतरण उपकरण अभी तक जारी नहीं किये गये हैं। इनके 2023 की गर्मियों में उपलब्ध होने की उम्मीद है। +The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. -ग्राफ़ ने आर्बिट्रम वन पर L2 पर जाना आसान बना दिया है। प्रत्येक प्रोटोकॉल प्रतिभागी के लिए, सभी नेटवर्क प्रतिभागियों के लिए L2 में स्थानांतरण को निर्बाध बनाने के लिए L2 ट्रांसफर टूल का एक सेट है। आप जो स्थानांतरित कर रहे हैं उसके आधार पर इन उपकरणों के लिए आपको कुछ विशिष्ट चरणों का पालन करना होगा। - -इन उपकरणों के बारे में कुछ सामान्य प्रश्नों के उत्तर [L2 ट्रांसफर टूल्स FAQ](/arbitrum/l2-transfer-tools-faq) में दिए गए हैं। अक्सर पूछे जाने वाले प्रश्नों में टूल का उपयोग कैसे करें, वे कैसे काम करते हैं, और उनका उपयोग करते समय ध्यान में रखने योग्य बातों की गहन व्याख्या होती है। +Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## अपने सबग्राफ को आर्बिट्रम (L2) में कैसे स्थानांतरित करें + + ## अपने सबग्राफ़ स्थानांतरित करने के लाभ -ग्राफ़ का समुदाय और मुख्य डेवलपर पिछले वर्ष से आर्बिट्रम में जाने की तैयारी कर रहे हैं। आर्बिट्रम, एक परत 2 या "एल2" ब्लॉकचेन, एथेरियम से सुरक्षा प्राप्त करता है लेकिन काफी कम गैस शुल्क प्रदान करता है। +ग्राफ़ का समुदाय और मुख्य डेवलपर पिछले वर्ष से आर्बिट्रम में जाने की तैयारी कर रहे हैं (https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)। आर्बिट्रम, एक परत 2 या "एल2" ब्लॉकचेन, एथेरियम से सुरक्षा प्राप्त करता है लेकिन काफी कम गैस शुल्क प्रदान करता है। -जब आप अपने सबग्राफ को दी गई ग्राफ नेटवर्क पर प्रकाशित या अपग्रेड करते हैं, तो आप प्रोटोकॉल पर स्मार्ट कॉन्ट्रैक्ट्स के साथ इंटरैक्ट कर रहे होते हैं और इसके लिए ईथरियम (ETH) का उपयोग करके गैस के लिए भुगतान करने की आवश्यकता होती है। लेकिन अर्बिट्रम पर अपने सबग्राफ को ले जाने से भविष्य में आपके सबग्राफ के लिए अपडेट करने के लिए बहुत कम गैस शुल्क देने की आवश्यकता होगी। निचले शुल्क और एल2 पर क्युरेशन बॉन्डिंग कर्व भी यह आसान बनाते हैं कि अन्य क्युरेटर्स आपके सबग्राफ पर क्युरेट करें, जिससे आपके सबग्राफ पर इंडेक्सर्स को अधिक इन्सेंटिव मिलता है। यह कम कॉस्ट वातावरण इंडेक्सर्स को आपके सबग्राफ को इंडेक्स और सर्व करने के लिए सस्ते बनाता है। अर्बिट्रम पर इंडेक्सिंग इन्सेंटिव आगामी महीनों में बढ़ रहे हैं, जबकि ईथरियम मेननेट पर कम हो रहे हैं, इसलिए अधिक से अधिक इंडेक्सर्स अपने स्टेक को स्थानांतरित करेंगे और एल2 पर अपने ऑपरेशन स्थापित करेंगे। +जब आप अपने सबग्राफ को दी ग्राफ नेटवर्क पर प्रकाशित या अपग्रेड करते हैं, तो आप प्रोटोकॉल पर स्मार्ट कॉन्ट्रैक्ट्स के साथ इंटरैक्ट कर रहे होते हैं और इसके लिए ईथरियम (ETH) का उपयोग करके गैस के लिए भुगतान करना आवश्यक होता है। अपने सबग्राफ को Arbitrum पर स्थानांतरित करके, आपके सबग्राफ के किसी भी भविष्य के अपडेट के लिए गैस शुल्क बहुत कम होगा। कम शुल्कों के साथ, और L2 पर क्यूरेशन बॉन्डिंग कर्व्स फ्लैट होने के कारण, अन्य क्यूरेटर्स को भी आपके सबग्राफ पर क्यूरेट करने में आसानी होगी, जिससे आपके सबग्राफ पर इंडेक्सर्स के लिए पुरस्कार बढ़ेंगे। इस कम लागत वाले वातावरण से इंडेक्सर्स को आपके सबग्राफ को इंडेक्स करने और सेव करने में सस्तापन होगा। आगामी महीनों में Arbitrum पर इंडेक्सिंग पुरस्कार बढ़ जाएगा और ईथिरियम मेननेट पर कम हो जाएगा, इसलिए और भी अधिक इंडेक्सर्स अपने स्टेक को स्थानांतरित करेंगे और उनके संचालन को L2 पर सेटअप करेंगे। -## यह समझना कि सिग्नल, आपके एल1 सबग्राफ और क्वेरी यूआरएल के साथ क्या होता है +## सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ जो होता है, उसे समझने की प्रक्रिया: -एक सबग्राफ को आर्बिट्रम में स्थानांतरित करने के लिए आर्बिट्रम जीआरटी ब्रिज का उपयोग किया जाता है, जो बदले में सबग्राफ को एल2 पर भेजने के लिए देशी आर्बिट्रम ब्रिज का उपयोग करता है। "ट्रांसफर" मेननेट पर सबग्राफ को हटा देगा और ब्रिज का उपयोग करके L2 पर सबग्राफ को फिर से बनाने के लिए जानकारी भेज देगा। इसमें सबग्राफ स्वामी का संकेतित जीआरटी भी शामिल होगा, जो स्थानांतरण को स्वीकार करने के लिए पुल के लिए शून्य से अधिक होना चाहिए। +सबग्राफ को Arbitrum पर स्थानांतरित करने के लिए Arbitrum GRT ब्रिज का उपयोग किया जाता है, जिसमें प्राकृतिक अर्बिट्रम ब्रिज का उपयोग उस सबग्राफ को L2 पर भेजने के लिए किया जाता है। "स्थानांतरण" मुख्यनेट पर सबग्राफ को विलीन कर देगा और सबग्राफ को L2 पर ब्रिज का उपयोग करके पुनर्सृजन करने की जानकारी भेजेगा। इसमें सबग्राफ के मालिक के सिगनल किए गए GRT को भी शामिल किया जाएगा, जिसका प्रोत्साहन ब्रिज को स्थानांतरण स्वीकार करने के लिए शून्य से अधिक होना चाहिए। -जब आप सबग्राफ को स्थानांतरित करना चुनते हैं, तो यह सबग्राफ के सभी क्यूरेशन सिग्नल को जीआरटी में बदल देगा। यह मेननेट पर सबग्राफ को "बहिष्कृत" करने के बराबर है। आपके क्यूरेशन के अनुरूप जीआरटी को सबग्राफ के साथ एल2 पर भेजा जाएगा, जहां उनका उपयोग आपकी ओर से सिग्नल बनाने के लिए किया जाएगा। +जब आप सबग्राफ को स्थानांतरित करने का विकल्प चुनते हैं, तो यह सबग्राफ के सभी क्यूरेशन सिग्नल को GRT में रूपांतरित कर देगा। इसका मतलब है कि मुख्यनेट पर सबग्राफ को "विलीन" किया जाएगा। आपके क्यूरेशन के अनुरूप GRT को सबग्राफ के साथ L2 पर भेजा जाएगा, जहां वे आपके प्रतिनिधित्व में सिग्नल निर्माण करने के लिए उपयोग होंगे। -अन्य क्यूरेटर अपने अंशित GRT को वापस लेने का विकल्प चुन सकते हैं, या उसे L2 पर स्थानांतरित करके उसी सबग्राफ पर सिग्नल मिन्ट करने का विकल्प चुन सकते हैं। यदि कोई सबग्राफ स्वामी अपने सबग्राफ को L2 पर स्थानांतरित नहीं करता है और इसे एक कॉन्ट्रैक्ट कॉल के माध्यम से मैन्युअली विलोपित करता है, तो क्यूरेटर को सूचित किया जाएगा और वे अपनी समायोजन को वापस ले सकेंगे। +अन्य क्यूरेटर्स का विकल्प होता है कि क्या वे अपने अंशिक GRT को विद्वेष्टित करें या उसे भी L2 पर स्थानांतरित करें ताकि वे उसी सबग्राफ पर सिग्नल निर्मित कर सकें। अगर कोई सबग्राफ का मालिक अपने सबग्राफ को L2 पर स्थानांतरित नहीं करता है और अधिकारिक रूप से उसे एक कॉन्ट्रैक्ट कॉल के माध्यम से विलीन करता है, तो क्यूरेटर्स को सूचित किया जाएगा और उन्हें उनके क्यूरेशन को वापस लेने का अधिकार होगा। -सबग्राफ स्थानांतरित होते ही, क्योंकि सभी समायोजन GRT में परिवर्तित हो जाते हैं, इंडेक्सर्स को सबग्राफ को अनुक्रमण करने के लिए अब और पुरस्कार प्राप्त नहीं होंगे। हालांकि, इंडेक्सर्स होंगे जो 1) स्थानांतरित सबग्राफ को 24 घंटे तक सेवा करते रहेंगे, और 2) तुरंत L2 पर सबग्राफ को अनुक्रमण करना शुरू कर देंगे। क्योंकि इन इंडेक्सर्स के पास पहले से ही सबग्राफ अनुक्रमित होते हैं, इसलिए सबग्राफ को सिंक होने का इंतजार करने की कोई आवश्यकता नहीं होगी, और L2 सबग्राफ का क्वेरी करना लगभग तत्काल होगा। +सबग्राफ को स्थानांतरित करते ही, क्यूरेशन को GRT में रूपांतरित किये जाने के कारण इंडेक्सर्स को सबग्राफ को इंडेक्स करने के लिए अब और रिवॉर्ड नहीं मिलेगा। हालांकि, ऐसे इंडेक्सर्स भी होंगे जो 1) स्थानांतरित सबग्राफ की सेवा 24 घंटे तक करते रहेंगे, और 2) तुरंत L2 पर सबग्राफ को इंडेक्स करने की प्रारंभ करेंगे। क्योंकि इन इंडेक्सर्स ने पहले से ही सबग्राफ को इंडेक्स किया होता है, इसलिए सबग्राफ को सिंक करने की प्रतीक्षा करने की आवश्यकता नहीं होगी, और L2 सबग्राफ को तकनीकी रूप से तुरंत क्वेरी किया जा सकेगा। -L2 सबग्राफ के लिए क्वेरी एक अलग URL ( arbitrum-gateway.thegraph.com पर) पर की जानी चाहिए, लेकिन L1 URL कम से कम 48 घंटे तक काम करता रहेगा। उसके बाद, L1 गेटवे क्वेरी को L2 गेटवे को अग्रेषित करेगा (कुछ समय के लिए), लेकिन इससे लैटेंसी बढ़ेगी, इसलिए संभाविततः जितना जल्दी हो सके, अपने सभी क्वेरी को नए URL पर स्विच करने की सिफारिश की जाती है। +L2 सबग्राफ के क्वेरी को एक विभिन्न URL पर ( 'arbitrum-gateway.thegraph.com' पर) किया जाना चाहिए, लेकिन L1 URL काम करना जारी रखेगा कम से कम 48 घंटे तक। उसके बाद, L1 गेटवे क्वेरी को L2 गेटवे के लिए आगे प्रेषित करेगा (कुछ समय के लिए), लेकिन इससे लैटेंसी बढ़ सकती है, इसलिए संभावना है कि आपको सभी क्वेरी को नए URL पर जल्द से जल्द स्विच कर लेने की सिफारिश की जाए। ## अपना L2 वॉलेट चुनना -जब आपने मुख्यनेट पर अपने सबग्राफ को प्रकाशित किया था, तो आपने एक कनेक्टेड वॉलेट का उपयोग करके सबग्राफ बनाया था, और यह वॉलेट उस सबग्राफ को प्रतिनिधित्व करने वाला एनएफटी आवंटित है और आपको अपडेट प्रकाशित करने की अनुमति देता है। +जब आपने मुख्यनेट पर अपने सबग्राफ को प्रकाशित किया, तो आपने एक कनेक्टेड वॉलेट का उपयोग सबग्राफ बनाने के लिए किया और यह वॉलेट वह NFT स्वामित्व करता है जो इस सबग्राफ का प्रतिनिधित्व करता है और आपको अपडेट प्रकाशित करने की अनुमति देता है। -सबग्राफ को आर्बिट्रम में स्थानांतरित करते समय, आप एक अलग वॉलेट चुन सकते हैं जो L2 पर इस सबग्राफ एनएफटी का मालिक होगा। +सबग्राफ को Arbitrum पर स्थानांतरित करते समय, आप एक विभिन्न वॉलेट का चयन कर सकते हैं जो L2 पर इस सबग्राफ NFT का स्वामित्व करेगा। -यदि आप "साधारण" वॉलेट जैसे MetaMask (एक Externally Owned Account या EOA, अर्थात एक वॉलेट जो स्मार्ट कॉन्ट्रैक्ट नहीं है) का उपयोग कर रहे हैं, तो यह वैकल्पिक है और सलाह दी जाती है कि आप L1 में जैसा ही मालिक पता रखें। +अगर आप "सामान्य" वॉलेट जैसे MetaMask का उपयोग कर रहे हैं (जिसे बाह्यिक अधिकारित खाता या EOA कहा जाता है, यानी एक वॉलेट जो स्मार्ट कॉन्ट्रैक्ट नहीं है), तो यह वैकल्पिक है और सिफारिश की जाती है कि आप एल1 में के समान मालिक पता बनाए रखें। -यदि आप स्मार्ट कॉन्ट्रैक्ट वॉलेट, जैसे मल्टिसिग (उदाहरण के लिए, एक सेफ) का उपयोग कर रहे हैं, तो एक अलग L2 वॉलेट पता चुनना अनिवार्य है, क्योंकि यह अकाउंट संभाविततः केवल मेननेट पर मौजूद है और आप इस वॉलेट का उपयोग करके आरबिट्रम पर लेनदेन नहीं कर पाएंगे। यदि आप स्मार्ट कॉन्ट्रैक्ट वॉलेट या मल्टिसिग का उपयोग जारी रखना चाहते हैं, तो आरबिट्रम पर एक नया वॉलेट बनाएं और अपने सबग्राफ के L2 मालिक के रूप में इसका पता उपयोग करें। +अगर आप स्मार्ट कॉन्ट्रैक्ट वॉलेट का उपयोग कर रहे हैं, जैसे कि मल्टिसिग (उदाहरणस्वरूप, एक सेफ), तो एक विभिन्न L2 वॉलेट पता चुनना अनिवार्य है, क्योंकि यह बहुत संभावना है कि यह खाता केवल मुख्यनेट पर मौजूद है और आप इस वॉलेट का उपयोग अर्बिट्रम पर लेन-देन करने के लिए नहीं कर सकते हैं। अगर आप स्मार्ट कॉन्ट्रैक्ट वॉलेट या मल्टिसिग का उपयोग करना चाहते हैं, तो अर्बिट्रम पर एक नया वॉलेट बनाएं और उसका पता अपने सबग्राफ के L2 मालिक के रूप में उपयोग करें। -**उस वॉलेट पते का उपयोग करना बहुत महत्वपूर्ण है जिसे आप नियंत्रित करते हैं, और जो आर्बिट्रम पर लेनदेन कर सकता है। अन्यथा, सबग्राफ़ खो जाएगा और पुनर्प्राप्त नहीं किया जा सकेगा।** +**यह महत्वपूर्ण है कि आप एक वॉलेट पता का उपयोग करें जिस पर आपका नियंत्रण है, और जिससे आप अर्बिट्रम पर लेन-देन कर सकते हैं। अन्यथा, सबग्राफ हानि हो जाएगा और उसे पुनः प्राप्त नहीं किया जा सकता।** -## स्थानांतरण की तैयारी: कुछ ईटीएच को पाटना +## स्थानांतरण के लिए तैयारी: कुछ ETH को ब्रिज करना -सबग्राफ को स्थानांतरित करने के लिए, एक ट्रांजेक्शन को ब्रिज के माध्यम से भेजा जाता है, और फिर आर्बिट्रम पर एक और ट्रांजेक्शन को कार्यान्वित किया जाता है। पहली ट्रांजेक्शन में मुख्यनेट पर ETH का उपयोग किया जाता है, और L2 पर संदेश प्राप्त होने पर गैस के भुगतान के लिए कुछ ETH शामिल किए जाते हैं। हालांकि, यदि यह गैस पर्याप्त नहीं है, तो आपको ट्रांजेक्शन को पुनः प्रयास करना होगा और L2 पर सीधे गैस के लिए भुगतान करना होगा (यह "चरण 3: स्थानांतरण की पुष्टि करना" है नीचे)। इस चरण को स्थानांतरण शुरू करने के 7 दिनों के भीतर कार्यान्वित किया जाना चाहिए। इसके अलावा, दूसरी ट्रांजेक्शन ("चरण 4: L2 पर स्थानांतरण पूरा करना") को सीधे आर्बिट्रम पर किया जाएगा। इन कारणों से, आपको एक आर्बिट्रम वॉलेट पर कुछ ETH की आवश्यकता होगी। यदि आप मल्टिसिग या स्मार्ट कॉन्ट्रैक्ट खाता का उपयोग कर रहे हैं, तो ETH को आम (EOA) वॉलेट में होनी चाहिए, जिसे आप ट्रांजेक्शनों को कार्यान्वित करने के लिए उपयोग कर रहे हैं, मल्टिसिग वॉलेट स्वयं पर नहीं। +सबग्राफ को स्थानांतरित करने में एक लेन-देन को ब्रिज के माध्यम से भेजना शामिल है, और फिर अर्बिट्रम पर एक और लेन-देन को प्रारंभ करना। पहली लेन-देन मुख्यनेट पर ETH का उपयोग करता है, और जब संदेश L2 पर प्राप्त होता है, तो गैस के भुगतान के लिए कुछ ETH को शामिल करता है। हालांकि, अगर यह गैस पर्याप्त नहीं होता है, तो आपको लेन-देन को पुनः प्रयास करना होगा और गैस के लिए सीधे L2 पर भुगतान करना होगा (यह "चरण 3: स्थानांतरण की पुष्टि करना" है, नीचे दिए गए हैं)। यह कदम **स्थानांतरण की प्रारंभिक करने के 7 दिनों के भीतर कार्यान्वित किया जाना चाहिए।** इसके अलावा, दूसरी लेन-देन ("चरण 4: L2 पर स्थानांतरण को समाप्त करना") को सीधे अर्बिट्रम पर किया जाएगा। इन कारणों से, आपको किसी एक Arbitrum वॉलेट पर कुछ ETH की आवश्यकता होगी। यदि आप मल्टिसिग या स्मार्ट कॉन्ट्रैक्ट खाता का उपयोग कर रहे हैं, तो ETH को उन्हीं सामान्य (EOA) वॉलेट में होना चाहिए जिसका आप लेन-देन कार्यान्वित करने के लिए उपयोग कर रहे हैं, मल्टिसिग वॉलेट में नहीं। -आप कुछ एक्सचेंज पर ETH खरीद सकते हैं और इसे सीधे आर्बिट्रम पर निकाल सकते हैं, या आप आर्बिट्रम ब्रिज का उपयोग करके मुख्यनेट वॉलेट से L2 में ETH भेज सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io).। आर्बिट्रम पर गैस शुल्क कम होने के कारण, आपको केवल थोड़ी सी राशि की आवश्यकता होगी। आपको अपनी ट्रांजेक्शन को मंजूरी प्राप्त करने के लिए निम्न सीमा (जैसे 0.01 ETH) से शुरू करने की सिफारिश की जाती है। +आप कुछ एक्सचेंजों पर ETH खरीद सकते हैं और उसे सीधे अर्बिट्रम में विद्वेष्टित कर सकते हैं, या आप अर्बिट्रम ब्रिज का उपयोग करके ETH को मुख्यनेट वॉलेट से L2 में भेज सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io)। क्योंकि अर्बिट्रम पर गैस शुल्क कम होते हैं, आपको केवल थोड़ी सी राशि की आवश्यकता होनी चाहिए। यह सिफारिश की जाती है कि आप अपने लेन-देन को स्वीकृति प्राप्त करने के लिए कम थ्रेशहोल्ड (उदाहरणस्वरूप 0.01 ETH) से प्रारंभ करें। ## सबग्राफ ट्रांसफर टूल ढूँढना -जब आप सबग्राफ स्टूडियो पर अपने सबग्राफ के पेज को देख रहे हों तो आप एल2 ट्रांसफर टूल पा सकते हैं: +आप सबग्राफ स्टूडियो पर अपने सबग्राफ के पेज को देखते समय L2 ट्रांसफर टूल पा सकते हैं: ![transfer tool](/img/L2-transfer-tool1.png) -यह एक्सप्लोरर पर भी उपलब्ध है यदि आप उस वॉलेट से जुड़े हैं जिसके पास एक सबग्राफ है और एक्सप्लोरर पर उस सबग्राफ के पेज पर: +यह भी उपलब्ध है एक्सप्लोरर पर अगर आप ऐसे वॉलेट से कनेक्ट हो जाते हैं जिसका सबग्राफ का स्वामित्व है, और उस सबग्राफ के पेज पर एक्सप्लोरर पर: ![Transferring to L2](/img/transferToL2.png) -ट्रांसफर टू एल2 बटन पर क्लिक करने से ट्रांसफर टूल खुल जाएगा जहां आप ट्रांसफर प्रक्रिया शुरू कर सकते हैं। +"L2 पर स्थानांतरित करें" बटन पर क्लिक करने से स्थानांतरण टूल खुल जाएगा, जहाँ आप स्थानांतरण प्रक्रिया को आरंभ कर सकते हैं | -## चरण 1: स्थानांतरण प्रारंभ करना +## चरण 1: स्थानांतरण की प्रारंभिक कदम -स्थानांतरण शुरू करने से पहले, आपको निर्धारित करना होगा कि L2 पर सबग्राफ के मालिक पते कौन होगा (ऊपर "L2 वॉलेट चुनना" देखें) और यह मजबूती से अनुशंसित किया जाता है कि आपके पास आर्बिट्रम पर गैस के लिए कुछ ETH पहले से ही ब्रिज किया हुआ हो (ऊपर "स्थानांतरण के लिए तैयारी: कुछ ETH को ब्रिज करना" देखें)। +स्थानांतरण की प्रारंभिक करने से पहले, आपको तय करना होगा कि L2 पर सबग्राफ का स्वामित्व किस पते पर होगा (ऊपर "अपने L2 वॉलेट का चयन करना" देखें), और यह मजबूती से सिफारिश की जाती है कि अर्बिट्रम पर गैस के लिए कुछ ETH ब्रिज कर दिया गया हो (ऊपर "स्थानांतरण की तैयारी: कुछ ETH को ब्रिज करना" देखें)। -यहां ध्यान दें कि सबग्राफ को स्थानांतरित करने के लिए, सबग्राफ के मालिक खाते के साथ सबग्राफ पर कुछ अवैध राशि होना आवश्यक है; यदि आपने सबग्राफ पर संकेत किया नहीं है, तो आपको क्यूरेशन में थोड़ी सी राशि जोड़नी होगी (1 GRT जैसी छोटी राशि काफी होगी)। +यह भी ध्यान दें कि सबग्राफ को स्थानांतरित करने के लिए सबग्राफ के साथ एक ही खाते में कोई भी सिग्नल की गई राशि होनी चाहिए; अगर आपने सबग्राफ पर सिग्नल नहीं किया है तो आपको थोड़ी सी क्यूरेशन जोड़नी होगी (एक छोटी राशि जैसे 1 GRT जोड़ना काफी होगा)। -Transfer Tool खोलने के बाद, आप "प्राप्ति वॉलेट पता" के फ़ील्ड में L2 वॉलेट पता दर्ज कर सकेंगे - यहां सही पता दर्ज किया गया होने की सुनिश्चित करें। सबग्राफ को स्थानांतरित करने के लिए क्लिक करने पर आपको अपने वॉलेट पर ट्रांजेक्शन कार्यान्वित करने के लिए कहेगा (ध्यान दें कि L2 गैस के भुगतान के लिए कुछ ETH मान्यता शामिल है)। यह स्थानांतरण शुरू करेगा और आपके L1 सबग्राफ को विलोपित करेगा (जानकारी के लिए "संकेत के साथ क्या होता है, आपके L1 सबग्राफ और क्वेरी URL के बारे में अधिक विवरण के लिए ऊपर देखें)। +स्थानांतरण टूल खोलने के बाद, आपको "प्राप्ति वॉलेट पता" फ़ील्ड में L2 वॉलेट पता दर्ज करने की अनुमति मिलेगी - **सुनिश्चित करें कि आपने यहाँ सही पता डाला है।** "सबग्राफ स्थानांतरित करें" पर क्लिक करने से आपको अपने वॉलेट पर लेन-देन कार्यान्वित करने के लिए प्रोम्प्ट किया जाएगा (ध्यान दें कि L2 गैस के भुगतान के लिए कुछ ETH मान शामिल है)। इससे स्थानांतरण प्रारंभ होगा और आपका L1 सबग्राफ विलीन हो जाएगा (इसके पीछे के प्रक्रिया के बारे में अधिक जानकारी के लिए "सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ क्या होता है की समझ" देखें)। -इस कदम को कार्यान्वित करने की स्थिति में, सुनिश्चित करें कि आप 7 दिनों से कम समय में चरण 3 को पूरा करें, इससे अन्यथा सबग्राफ और आपका सिग्नल GRT हो सकता हैं। यह आरबिट्रम पर L1-L2 संदेश आपस में कैसे काम करते हैं के कारण है: ब्रिज के माध्यम से भेजे गए संदेश "पुनः प्रयास किए जा सकने वाले टिकट" होते हैं जिन्हें 7 दिनों के भीतर कार्यान्वित किया जाना चाहिए, और प्रारंभिक कार्यान्वयन को पुनः प्रयास करने की आवश्यकता हो सकती है अगर आरबिट्रम पर गैस कीमत में तेजी हो। +इस कदम को कार्यान्वित करते समय, **सुनिश्चित करें कि आप 7 दिन से कम समय में चरण 3 को पूरा करने जाते हैं, अन्यथा सबग्राफ और आपका सिग्नल GRT हानि हो सकते हैं।** यह अर्बिट्रम पर L1-L2 संदेशिकरण कैसे काम करता है के कारण है: ब्रिज के माध्यम से भेजे गए संदेश "पुनः प्रयासनीय टिकट" होते हैं जिन्हें 7 दिन के भीतर कार्यान्वित किया जाना चाहिए, और पहले कार्यान्वयन में अगर अर्बिट्रम पर गैस की मूल्य में वृद्धि होती है तो पुनः प्रयास की आवश्यकता हो सकती है। ![Start the trnasfer to L2](/img/startTransferL2.png) -## चरण 2: सबग्राफ के L2 पर पहुंचने की प्रतीक्षा की जा रही है +## चरण 2: सबग्राफ को L2 तक पहुँचने की प्रतीक्षा करना -जब आप स्थानांतरण की प्रक्रिया शुरू करते हैं, तो आपके L1 सबग्राफ को L2 पर भेजने वाला संदेश आरबिट्रम ब्रिज के माध्यम से प्रसारित होना चाहिए। इसमें लगभग 20 मिनट का समय लगता है (ब्रिज मुख्यनेट ब्लॉक की प्रतीक्षा करता है जिसमें लेनदेन समाप्त हो जाता है और संभावित चेन रिओर्ग से सुरक्षित होता है)। +जब आप स्थानांतरण की प्रारंभिक करते हैं, तो आपके L1 सबग्राफ को L2 भेजने वाले संदेश को अर्बिट्रम ब्रिज के माध्यम से प्रसारित होना चाहिए। यह लगभग 20 मिनट लगता है (ब्रिज मुख्यनेट ब्लॉक को "सुरक्षित" बनाने के लिए प्रत्येक लेनदेन के मुख्यनेट ब्लॉक के लिए प्रतीक्षा करता है, जिसमें संभावित चेन रीआर्ग से बचाया जा सकता है)। -एक बार यह प्रतीक्षा समय समाप्त हो जाने पर, आर्बिट्रम L2 अनुबंधों पर स्थानांतरण को स्वचालित रूप से निष्पादित करने का प्रयास करेगा। +इस प्रतीक्षा काल के बाद, अर्बिट्रम ल2 अनुबंधों पर स्थानांतरण को स्वतः कार्यान्वित करने का प्रयास करेगा। ![Wait screen](/img/screenshotOfWaitScreenL2.png) ## चरण 3: स्थानांतरण की पुष्टि करना -अधिकांश मामलों में, चरण 1 में शामिल ल2 गैस काफी होने के कारण यह कदम स्वचालित रूप से कार्यान्वित हो जाएगा, जिससे आरबिट्रम कॉन्ट्रैक्ट पर सबग्राफ प्राप्त करने वाली ट्रांजेक्शन कार्यान्वित हो सकती है। हालांकि, कुछ मामलों में, आरबिट्रम पर गैस कीमतों में एक वृद्धि के कारण यह स्वचालित कार्यान्वयन विफल हो सकता है। इस मामले में, आपके सबग्राफ को एल 2 पर भेजने वाले "टिकट" को पेंडिंग माना जाएगा और 7 दिनों के भीतर पुनः प्रयास की आवश्यकता होगी। +अधिकांश मामलों में, यह कदम स्वचालित रूप से क्रियान्वित हो जाएगा क्योंकि स्टेप 1 में शामिल एल2 गैस काफी होता है ताकि आर्बिट्रम कॉन्ट्रैक्ट पर सबग्राफ प्राप्त करने वाले लेनदेन को क्रियान्वित किया जा सके। हालांकि, कुछ मामलों में, यह संभावित है कि आर्बिट्रम पर गैस मूल्यों में एक उछाल के कारण यह स्वचालित क्रियान्वित होने में विफल हो सकता है। इस मामले में, जो "टिकट" आपके सबग्राफ को एल2 पर भेजता है, वह लंबित हो जाएगा और 7 दिनों के भीतर पुनः प्रयास की आवश्यकता होगी। -यदि यह मामला है, तो आपको एक एल2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसमें आर्बिट्रम पर कुछ ईटीएच है, अपने वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करें, और लेनदेन का पुनः प्रयास करने के लिए "ट्रांसफर की पुष्टि करें" पर क्लिक करें। +यदि यह मामला आपके साथ होता है, तो आपको ऐसे L2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसमें आर्बिट्रम पर कुछ ETH हो, अपनी वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करना होगा, और "पुनः प्रायोग की पुष्टि करें" पर क्लिक करके लेन-देन को पुनः प्रयास करने के लिए। ![Confirm the transfer to L2](/img/confirmTransferToL2.png) ## चरण 4: L2 पर स्थानांतरण समाप्त करना -इस बिंदु पर, आपके सबग्राफ और GRT को आरबिट्रम पर प्राप्ति मिल चुकी है, लेकिन सबग्राफ अभी तक प्रकाशित नहीं हुआ है। आपको प्राप्ति वॉलेट के रूप में चयनित L2 वॉलेट का उपयोग करके आरबिट्रम के लिए वॉलेट नेटवर्क कनेक्ट करना होगा, और "सबग्राफ प्रकाशित करें" पर क्लिक करें। +इस बिंदु पर, आपका सबग्राफ और GRT आर्बिट्रम पर प्राप्त हो चुके हैं, लेकिन सबग्राफ अबतक प्रकाशित नहीं हुआ है। आपको वह एल2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसे आपने प्राप्ति वॉलेट के रूप में चुना है, अपने वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करना होगा, और "पब्लिश सबग्राफ" पर क्लिक करना होगा। ![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) ![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -यह सबग्राफ प्रकाशित करेगा ताकि आर्बिट्रम पर काम करने वाले इंडेक्सर्स इसे परोसना शुरू कर सकें। यह L1 से स्थानांतरित किए गए GRT का उपयोग करके क्यूरेशन सिग्नल भी तैयार करेगा। +इससे सबग्राफ प्रकाशित हो जाएगा ताकि Arbitrum पर काम करने वाले इंडेक्सर उसकी सेवा करना शुरू कर सकें। यह भी उसी GRT का करेशन सिग्नल मिन्ट करेगा जो L1 से स्थानांतरित हुए थे। ## चरण 5: क्वेरी यूआरएल को अपडेट करना -आपका सबग्राफ सफलतापूर्वक आर्बिट्रम में स्थानांतरित कर दिया गया है! सबग्राफ को क्वेरी करने के लिए, नया यूआरएल होगा: +आपकी सबग्राफ सफलतापूर्वक Arbitrum में स्थानांतरित की गई है! सबग्राफ का प्रश्न करने के लिए, नया URL होगा: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -ध्यान दें कि आरबिट्रम पर सबग्राफ आईडी मुख्यनेट पर आपके पास होने वाले सबग्राफ आईडी से अलग होगा, लेकिन आप हमेशा इसे एक्सप्लोरर या स्टूडियो पर खोज सकते हैं। ऊपर उल्लिखित ( "संकेत के साथ क्या होता है, आपके L1 सबग्राफ और क्वेरी URL के बारे में समझें") के अनुसार पुराने L1 URL को कुछ समय तक समर्थित किया जाएगा, लेकिन आपको सबग्राफ L2 पर सिंक होने के बाद अपने क्वेरी को नए पते पर स्विच करना चाहिए। +ध्यान दें कि आर्बिट्रम पर सबग्राफ आईडी मुख्यनेट पर जितना भिन्न होगा, लेकिन आप हमेशा इसे एक्सप्लोरर या स्टूडियो पर ढूंढ सकते हैं। जैसा कि पहले उल्लिखित किया गया है ("सिग्नल, आपके L1 सबग्राफ और क्वेरी URL के साथ क्या होता है" देखें), पुराना L1 URL कुछ समय तक समर्थित किया जाएगा, लेकिन आपको सबग्राफ को L2 पर सिंक होने के बाद नए पते पर अपने क्वेरी को स्विच कर देना चाहिए। ## अपने क्यूरेशन को आर्बिट्रम (L2) में कैसे स्थानांतरित करें ## यह समझना कि एल2 में सबग्राफ़ स्थानांतरण पर क्यूरेशन का क्या होता है -जब सबग्राफ के मालिक सबग्राफ को आरबिट्रम पर स्थानांतरित करते हैं, तब सबग्राफ का सभी सिग्नल एक साथ जीआरटी में परिवर्तित हो जाता है। यह "स्वचालित स्थानांतरित" सिग्नल के लिए लागू होता है, अर्थात सबग्राफ संस्करण या डिप्लॉयमेंट के साथ संबंधित नहीं है, लेकिन यह सबसे नवीन संस्करण का पालन करता है। +जब कोई सबग्राफ के मालिक सबग्राफ को आर्बिट्रम पर ट्रांसफर करते हैं, तो सबग्राफ की सभी सिग्नल को एक साथ GRT में रूपांतरित किया जाता है। यह "ऑटो-माइग्रेटेड" सिग्नल के लिए भी लागू होता है, अर्थात्, सिग्नल जो सबग्राफ के किसी वर्शन या डिप्लॉयमेंट के लिए विशिष्ट नहीं है, लेकिन जो सबग्राफ के नवीनतम संस्करण का पालन करते हैं। -यदि सबग्राफ मालिक एल1 में सबग्राफ को विलोपित करता है, तो सिग्नल से जीआरटी में यही परिवर्तन होता है। जब सबग्राफ विलोपित या स्थानांतरित होता है, तो सभी क्यूरेशन सिग्नल साथ ही "जला" दिया जाता है (क्यूरेशन बॉन्डिंग कर्व का उपयोग करके) और परिणित जीआरटी जीएनएस स्मार्ट कॉन्ट्रैक्ट द्वारा रखा जाता है (यह स्मार्ट कॉन्ट्रैक्ट सबग्राफ अपग्रेड और स्वचालित स्थानांतरित सिग्नल का संचालन करता है)। इस प्रकार, सबग्राफ पर प्रत्येक क्यूरेटर का दावा उस जीआरटी के लिए होता है जो उनके द्वारा सबग्राफ के लिए आवंटित शेयरों के अनुपात में होता है। +सिग्नल से GRT में इस परिवर्तन को वही होता है जो होता है अगर सबग्राफ के मालिक ने L1 में सबग्राफ को विच्छेद किया होता। जब सबग्राफ को विच्छेदित या स्थानांतरित किया जाता है, तो सभी क्यूरेशन सिग्नल को समयानुसार "जलाया" जाता है (क्यूरेशन बॉन्डिंग कर्व का उपयोग करके) और परिणित GRT को GNS स्मार्ट कॉन्ट्रैक्ट द्वारा रखा जाता है (जो सबग्राफ अपग्रेड और ऑटो-माइग्रेटेड सिग्नल को संभालता है)। इस प्रकार, उस सबग्राफ के प्रत्येक क्यूरेटर के पास उस GRT का दावा होता है जो उनके लिए उपग्रहानुशासित था। -सबग्राफ स्वामी के अनुरूप इन जीआरटी का एक अंश सबग्राफ के साथ एल2 को भेजा जाता है +इन जीआरटी की एक भाग, जो सबग्राफ के मालिक के संवर्ग के साथ मेल खाते हैं, वह एल2 में भेजे जाते हैं। -इस बिंदु पर, नियंत्रित जीआरटी को अब और क्वेरी शुल्क नहीं मिलेगा, इसलिए क्यूरेटर्स अपनी जीआरटी निकाल सकते हैं या उसे एल2 पर एक ही सबग्राफ में स्थानांतरित कर सकते हैं, जहां इसे नई क्यूरेशन सिग्नल बनाने के लिए उपयोग किया जा सकता है। इसे करने के लिए कोई जल्दी नहीं है क्योंकि जीआरटी अविच्छिन्न रूप से संचयित की जा सकती है और हर कोई अपने शेयर के अनुपात में एक राशि प्राप्त करता है, इसके लिए यह मायने नहीं रखता कि वे इसे कब करते हैं। +इस बिंदु पर, क्यूरेटेड GRT को अब और क्वेरी शुल्क नहीं बढ़ेंगे, इसलिए क्यूरेटर्स अपने GRT को वापस निकालने का चयन कर सकते हैं या उसे L2 पर उसी सबग्राफ में ट्रांसफर कर सकते हैं, जहां उसे नई क्यूरेशन सिग्नल बनाने के लिए उपयोग किया जा सकता है। इसे करने के लिए कोई जल्दी नहीं है क्योंकि GRT को अनिश्चितकाल तक रखा जा सकता है और हर कोई अपने हिस्से के अनुपात में एक निश्चित राशि प्राप्त करता है, चाहे वो जब भी करे। ## अपना L2 वॉलेट चुनना -यदि आप अपने क्यूरेटेड जीआरटी को एल2 में स्थानांतरित करने का निर्णय लेते हैं, तो आप एक अलग वॉलेट चुन सकते हैं जिसके पास एल2 पर क्यूरेशन सिग्नल होगा। +अगर आप निर्णय लेते हैं कि आप अपने करेशित जीआरटी को एल2 में स्थानांतरित करना चाहते हैं, तो आप एक ऐसे वॉलेट का चयन कर सकते हैं जो एल2 में करेशन सिग्नल के मालिक होगा। -यदि आप Metamask जैसी "साधारण" वॉलेट का उपयोग कर रहे हैं (एक बाह्य निजी खाता या EOA, अर्थात एक वॉलेट जो एक स्मार्ट कॉन्ट्रैक्ट नहीं है), तो यह वैकल्पिक है और यह सिफारिश की जाती है कि आप एल1 में जैसे ही क्यूरेटर पता रखें। +अगर आप "सामान्य" वॉलेट जैसे Metamask का उपयोग कर रहे हैं (एक बाहरी स्वामित संकेतक खाता या EOA, अर्थात एक वॉलेट जो एक स्मार्ट कॉन्ट्रैक्ट नहीं है), तो यह वैकल्पिक है और सिफारिश की जाती है कि आप वैसे ही करेशक पता रखें जैसा L1 में है। -यदि आप स्मार्ट कॉन्ट्रैक्ट वॉलेट, जैसे मल्टीसिग (उदा। एक सेफ) का उपयोग कर रहे हैं, तो एल2 वॉलेट पते को अलग चुनना अनिवार्य है, क्योंकि यह संभावित है कि यह खाता केवल मुख्यनेट पर मौजूद होता है और आप इस वॉलेट का उपयोग करके आरबिट्रम पर लेनदेन नहीं कर पाएंगे। यदि आप स्मार्ट कॉन्ट्रैक्ट वॉलेट या मल्टीसिग का उपयोग करना जारी रखना चाहते हैं, तो आरबिट्रम पर एक नया वॉलेट बनाएं और इसका पता उपयोग करें जैसा कि एल2 प्राप्ति वॉलेट पता। +अगर आप एक स्मार्ट कॉन्ट्रैक्ट वॉलेट का उपयोग कर रहे हैं, जैसे कि मल्टिसिग (उदाहरणस्वरूप, एक सेफ), तो एक विभिन्न L2 वॉलेट पता चुनना अनिवार्य है, क्योंकि यह संभावना है कि यह खाता केवल मुख्यनेट पर मौजूद होता है और आप इस वॉलेट का उपयोग अर्बिट्रम पर लेनदेन करने के लिए नहीं कर सकेंगे। यदि आप एक स्मार्ट कॉन्ट्रैक्ट वॉलेट या मल्टिसिग का उपयोग करना चाहते हैं, तो अर्बिट्रम पर एक नया वॉलेट बनाएं और इसका पता L2 प्राप्ति वॉलेट पते के रूप में उपयोग करें। -**उस वॉलेट पते का उपयोग करना बहुत महत्वपूर्ण है जिसे आप नियंत्रित करते हैं, और जो आर्बिट्रम पर लेनदेन कर सकता है, अन्यथा अवधि खो जाएगी और पुनर्प्राप्त नहीं की जा सकेगी।** +**यह बहुत महत्वपूर्ण है कि आप एक ऐसे वॉलेट पता का उपयोग करें जिसे आप नियंत्रण में रखते हैं, और जो Arbitrum पर लेन-देन कर सकता है, क्योंकि अन्यथा करेशन खो जाएगा और इसे पुनर्प्राप्त नहीं किया जा सकता है।** ## क्यूरेशन को L2 पर भेजा जा रहा है: चरण 1 -स्थानांतरण शुरू करने से पहले, आपको तय करना होगा कि एल2 पर क्यूरेशन किस पते के स्वामी होगा (ऊपर "अपने एल2 वॉलेट का चयन करें" देखें), और सलाह दी जाती है कि आपके पास पहले से ही आरबिट्रम पर गैस के लिए कुछ ETH होना चाहिए, यदि आपको एल2 पर संदेश के कार्यान्वयन को पुनः प्रयास करने की आवश्यकता हो। आप कुछ एक्सचेंजों से ETH खरीद सकते हैं और इसे सीधे आरबिट्रम पर विद्युत रूप में निकाल सकते हैं, या आप आरबिट्रम ब्रिज का उपयोग करके मुख्यनेट वॉलेट से ETH को एल2 पर भेजने के लिए आरबिट्रम ब्रिज का उपयोग कर सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io) - क्योंकि आरबिट्रम पर गैस शुल्क बहुत कम हैं, इसलिए आपको केवल थोड़ी सी मात्रा में आवश्यकता होगी, जैसे 0.01 ETH, यह काफी होने की संभावना है। +ट्रांसफर शुरू करने से पहले, आपको निर्णय लेना होगा कि L2 पर क्यूरेशन किस पते का स्वामित्व करेगा (ऊपर "अपने L2 वॉलेट का चयन करना" देखें), और संदेश को L2 पर पुनः क्रियान्वित करने की आवश्यकता पड़ने पर आपके पास गैस के लिए पहले से ही कुछ ETH होने की सिफारिश की जाती है। आप कुछ एक्सचेंजों पर ETH खरीद सकते हैं और उसे सीधे Arbitrum पर निकाल सकते हैं, या आप मुख्यनेट वॉलेट से L2 में ETH भेजने के लिए आर्बिट्रम ब्रिज का उपयोग कर सकते हैं: [bridge.arbitrum.io](http://bridge.arbitrum.io) - क्योंकि आर्बिट्रम पर गैस शुल्क इतने कम होते हैं, तो आपको केवल थोड़ी सी राशि की आवश्यकता होगी, जैसे कि 0.01 ETH शायद पर्याप्त हो। -यदि आप जिस सबग्राफ को क्यूरेट करते हैं उसे एल2 में स्थानांतरित कर दिया गया है, तो आपको एक्सप्लोरर पर एक संदेश दिखाई देगा जो आपको बताएगा कि आप एक स्थानांतरित सबग्राफ को क्यूरेट कर रहे हैं। +अगर वह सबग्राफ जिसे आप करेशन कर रहे हैं L2 पर स्थानांतरित किया गया है, तो आपको एक संदेश दिखाई देगा जो आपको एक स्थानांतरित सबग्राफ करेशन की जानकारी देगा। -सबग्राफ पेज को देखते समय, आप क्यूरेशन को वापस लेने या स्थानांतरित करने का विकल्प चुन सकते हैं। "ट्रांसफर सिग्नल टू आर्बिट्रम" पर क्लिक करने से ट्रांसफर टूल खुल जाएगा। +सबग्राफ पेज को देखते समय, आपको करेशन को वापस लेने या स्थानांतरित करने का चयन करने का विकल्प होता है। "Transfer Signal to Arbitrum" पर क्लिक करने से स्थानांतरण उपकरण खुल जाता है। ![Transfer signal](/img/transferSignalL2TransferTools.png) -इंस्ट्रूमेंट टूल के बाद, यदि आपके पास कोई ETH नहीं है, तो आप अपनी मशीन में कुछ ETH कनेक्शन के लिए जा सकते हैं। फिर आप "रिसिविंग वैगन पता" को समुद्र तट में L2 में दर्ज कर लें - यहां सही पता दर्ज किया गया है यह सुनिश्चित करें। "सिग्नल ट्रांसफर" पर क्लिक करने पर आपको अपने आइडिया पर ट्रांज़ेक्शन को कार्यान्वित करने के लिए कहा जाएगा (ध्यान दें कि L2 गैस के लिए कुछ ETH शामिल है)। यह नियुक्ति प्रारंभ है। +ट्रांसफर टूल खोलने के बाद, आपको यदि आपके पास कोई ETH नहीं है तो अपने वॉलेट में कुछ ETH जोड़ने के लिए कह सकता है। फिर आप "प्राप्ति वॉलेट पता" क्षेत्र में L2 वॉलेट पता दर्ज कर सकेंगे - **यहाँ सही पता दर्ज किया है यह सुनिश्चित करें।** "सिग्नल ट्रांसफर" पर क्लिक करने पर आपको अपने वॉलेट पर लेनदेन को पूरा करने के लिए कहा जा सकता है (ध्यान दें कि L2 गैस के लिए कुछ ETH मूल्य शामिल होता है); यह ट्रांसफर को प्रारंभ करेगा। -इस कदम को कार्यान्वित करने पर, सुनिश्चित करें कि आप 7 दिन से कम समय में कदम 3 को पूरा करने तक बढ़ें, अन्यथा आपका सिग्नल जीआरटी खो जाएगा। यह आरबिट्रम पर L1-L2 संदेशन कार्य के कारण होता है: ब्रिज के माध्यम से भेजे गए संदेश "पुनः प्रयास योग्य टिकट" होते हैं जिन्हें 7 दिन के भीतर कार्यान्वित किया जाना चाहिए, और यदि आरबिट्रम पर गैस मूल्य में उछाल होती है तो प्रारंभिक कार्यान्वयन को पुनः प्रयास की आवश्यकता हो सकती है। +अगर आप इस कदम को पूरा करते हैं, ध्यान दें कि 7 दिनों से कम समय में स्टेप 3 पूरा करना आवश्यक है, अन्यथा आपका सिग्नल GRT खो जाएगा। यह आर्बिट्रम पर L1-L2 संदेशन काम कैसे करता है के कारण है: जो संदेश ब्रिज के माध्यम से भेजे जाते हैं, वे "पुनर्प्रयासी टिकट" होते हैं जिन्हें 7 दिनों के भीतर क्रियान्वित किया जाना चाहिए, और प्रारंभिक क्रियान्वयन के लिए यदि आर्बिट्रम पर गैस मूल्य में वृद्धि होती है, तो पुनर्प्रयास की आवश्यकता हो सकती है। ## क्यूरेशन को L2 पर भेजा जा रहा है: चरण 2 @@ -146,20 +146,20 @@ Transfer Tool खोलने के बाद, आप "प्राप्ति ![Send signal to L2](/img/sendingCurationToL2Step2First.png) -जब आप स्थानांतरण को शुरू करते हैं, जो आपके L1 क्यूरेशन को L2 में भेजता है, उस संदेश को आरबिट्रम ब्रिज के माध्यम से प्रसारित होना चाहिए। इसमें लगभग 20 मिनट का समय लगता है (ब्रिज मुख्यनेट ब्लॉक के प्रसंस्करण को "सुरक्षित" मानने के लिए संभावित चेन रीऑर्ग से पहले इंतजार करता है)। +जब आप ट्रांसफर को प्रारंभ करते हैं, आपके L1 क्यूरेशन को L2 में भेजने वाले संदेश को आर्बिट्रम ब्रिज के माध्यम से प्रसारित होने की आवश्यकता होती है। यह लगभग 20 मिनट लेता है (ब्रिज मुख्यनेट ब्लॉक को देखता है जिसमें लेनदेन शामिल है, और संभावित चेन रीआर्ग से "सुरक्षित" होता है)। -एक बार यह प्रतीक्षा समय समाप्त हो जाने पर, आर्बिट्रम L2 अनुबंधों पर स्थानांतरण को स्वचालित रूप से निष्पादित करने का प्रयास करेगा। +इस प्रतीक्षा काल के बाद, अर्बिट्रम ल2 अनुबंधों पर स्थानांतरण को स्वतः कार्यान्वित करने का प्रयास करेगा। ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) ## क्यूरेशन को L2 पर भेजा जा रहा है: चरण 3 -अधिकांश मामलों में, यह कदम स्वतः कार्यान्वित हो जाएगा क्योंकि स्टेप 1 में शामिल एल2 गैस कार्यान्वयन के लिए पर्याप्त माना जाना चाहिए जो आरबिट्रम अनुबंधों पर क्षेत्राधिकार प्राप्त करने वाले लेनदेन को कार्यान्वित करने के लिए पर्याप्त होगा। हालांकि, कुछ मामलों में, आरबिट्रम पर गैस मूल्य में एक तेजी संभव है जो इस स्वतः कार्यान्वयन को असफल बना सकती है। इस मामले में, आपकी क्यूरेशन को L2 पर भेजने वाली "टिकट" लंबित रहेगी और 7 दिन के भीतर पुनः प्रयास की आवश्यकता होगी। +अधिकांश मामलों में, यह कदम स्वतः क्रियान्वित हो जाएगा क्योंकि स्टेप 1 में शामिल L2 गैस की पर्याप्तता होनी चाहिए जो सौदे पर क्यूरेशन प्राप्ति क्रियान्वित करता है। हालांकि, कुछ मामलों में, आर्बिट्रम पर गैस मूल्यों में वृद्धि के कारण यह स्वतः क्रियान्वित नहीं हो सकता है। इस मामले में, जो "टिकट" आपके क्यूरेशन को L2 पर भेजता है, वह पैंडिंग हो जाएगा और 7 दिनों के भीतर पुनः प्रयास की आवश्यकता होगी। -यदि यह मामला है, तो आपको एक एल2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसमें आर्बिट्रम पर कुछ ईटीएच है, अपने वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करें, और लेनदेन का पुनः प्रयास करने के लिए "ट्रांसफर की पुष्टि करें" पर क्लिक करें। +यदि यह मामला आपके साथ होता है, तो आपको ऐसे L2 वॉलेट का उपयोग करके कनेक्ट करना होगा जिसमें आर्बिट्रम पर कुछ ETH हो, अपनी वॉलेट नेटवर्क को आर्बिट्रम पर स्विच करना होगा, और "पुनः प्रायोग की पुष्टि करें" पर क्लिक करके लेन-देन को पुनः प्रयास करने के लिए। ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) -## L1 पर क्यूरेशन ले रहा हूँ +## L1 पर अपना कार्यकाल वापस ले रहा हूँ -यदि आप अपना GRT L2 पर नहीं भेजना चाहते हैं या आप प्राथमिकता देते हैं कि आप अपना GRT अद्यतन रूप में भेजें, तो आप अपने L1 पर संगठनित GRT को निकाल सकते हैं। सबग्राफ पृष्ठ पर बैनर पर, "Withdraw Signal" चुनें और सौदे की पुष्टि करें; GRT आपके क्यूरेटर पते पर भेजा जाएगा। +अगर आप चाहते हैं कि आप अपने GRT को L2 पर नहीं भेजें, या फिर आप पसंद करते हैं कि GRT को मैन्युअल रूप से ब्रिज करें, तो आप अपने क्यूरेटेड GRT को L1 पर निकाल सकते हैं। सबग्राफ पृष्ठ पर बैनर पर, "सिग्नल निकालें" चुनें और लेनदेन की पुष्टि करें; GRT आपके क्यूरेटर पते पर भेज दिया जाएगा। diff --git a/website/pages/hi/billing.mdx b/website/pages/hi/billing.mdx index ae1dde8fd2b3..1b81a32005de 100644 --- a/website/pages/hi/billing.mdx +++ b/website/pages/hi/billing.mdx @@ -37,8 +37,12 @@ title: बिलिंग ### क्रिप्टो वॉलेट का उपयोग करके जीआरटी जोड़ना + + > यह खंड यह मानते हुए लिखा गया है कि आपके क्रिप्टो वॉलेट में पहले से ही जीआरटी है, और आप एथेरियम मेननेट पर हैं। यदि आपके पास जीआरटी नहीं है, तो आप [यहां](#getting-grt) जीआरटी प्राप्त करने का तरीका सीख सकते हैं। +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. [सबग्राफ स्टूडियो बिलिंग पेज](https://thegraph.com/studio/billing/) पर जाएं। 2. पृष्ठ के ऊपरी दाएं कोने पर "कनेक्ट वॉलेट" बटन पर क्लिक करें। आपको बटुआ चयन पृष्ठ पर पुनर्निर्देशित किया जाएगा। अपना बटुआ चुनें और "कनेक्ट" पर क्लिक करें। @@ -71,13 +75,15 @@ title: बिलिंग ### मल्टीसिग वॉलेट का उपयोग करके जीआरटी जोड़ना + + 1. [सबग्राफ स्टूडियो बिलिंग पेज](https://thegraph.com/studio/billing/) पर जाएं। 2. पृष्ठ के ऊपरी दाएं कोने पर "कनेक्ट वॉलेट" बटन पर क्लिक करें। अपना बटुआ चुनें और "कनेक्ट" पर क्लिक करें। यदि आप [Gnosis-Safe](https://gnosis-safe.io/) का उपयोग कर रहे हैं, तो आप अपने मल्टीसिग के साथ-साथ अपने साइनिंग वॉलेट को भी कनेक्ट कर सकेंगे। फिर, संबंधित संदेश पर हस्ताक्षर करें। इससे कोई गैस खर्च नहीं होगी। 3. Click the 'Add GRT' button at the center of the page. A side panel will appear. -4. एक बार लेन-देन की पुष्टि हो जाने पर, आप एक घंटे के भीतर अपने खाते की शेष राशि में जीआरटी जोड़ हुआ देखेंगे। +4. एक बार लेन-देन की पुष्टि हो जाने के बाद, आप एक घंटे के भीतर अपने खाते की शेष राशि में जोड़ा गया जीआरटी देखेंगे। ### मल्टीसिग वॉलेट का उपयोग करके जीआरटी निकालना @@ -85,7 +91,7 @@ title: बिलिंग 1. [सबग्राफ स्टूडियो बिलिंग पेज](https://thegraph.com/studio/billing/) पर जाएं। -2. पृष्ठ के ऊपरी दाएं कोने पर "कनेक्ट वॉलेट" बटन पर क्लिक करें। अपना वॉलेट चुनें और "कनेक्ट" पर क्लिक करें। +2. पृष्ठ के ऊपरी दाएं कोने पर "कनेक्ट वॉलेट" बटन पर क्लिक करें। अपना बटुआ चुनें और "कनेक्ट" पर क्लिक करें। 3. पृष्ठ के मध्य में 'जीआरटी जोड़ें' बटन के बगल में स्थित ड्रॉपडाउन पर क्लिक करें। जीआरटी वापस लें चुनें। एक साइड पैनल दिखाई देगा। @@ -97,18 +103,18 @@ title: बिलिंग ## क्रिप्टो वॉलेट का उपयोग करके जीआरटी निकालना -यह खंड आपको दिखाएगा कि प्रश्न शुल्क के भुगतान के लिए जीआरटी कैसे प्राप्त करें। +This section will show you how to get GRT to pay for query fees. ### कॉइनबेस -यह कॉइनबेस पर जीआरटी खरीदने के लिए एक कदम दर कदम गाइड होगा। +This will be a step by step guide for purchasing GRT on Coinbase. 1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. एक बार जब आप एक खाता बना लेते हैं, तो आपको केवाईसी (या अपने ग्राहक को जानें) नामक एक प्रक्रिया के माध्यम से अपनी पहचान सत्यापित करने की आवश्यकता होगी। यह सभी केंद्रीकृत या कस्टोडियल क्रिप्टो एक्सचेंजों के लिए एक मानक प्रक्रिया है। 3. एक बार जब आप अपनी पहचान सत्यापित कर लेते हैं, तो आप जीआरटी खरीद सकते हैं। आप पृष्ठ के शीर्ष दाईं ओर "खरीदें/बेचें" बटन पर क्लिक करके ऐसा कर सकते हैं। 4. वह मुद्रा चुनें जिसे आप खरीदना चाहते हैं। जीआरटी का चयन करें। 5. भुगतान विधि का चयन करें। अपनी पसंदीदा भुगतान विधि चुनें। -6. जीआरटी की वह मात्रा चुनें जिसे आप खरीदना चाहते हैं। +6. जीआरटी की वह राशि चुनें जिसे आप खरीदना चाहते हैं। 7. अपनी खरीदारी की समीक्षा करें। अपनी खरीद की समीक्षा करें और "जीआरटी खरीदें" पर क्लिक करें। 8. अपनी खरीद की पुष्टि करें। अपनी खरीदारी की पुष्टि करें और आपने सफलतापूर्वक GRT खरीद लिया होगा। 9. आप अपने खाते से जीआरटी को [MetaMask](https://metamask.io/) जैसे अपने क्रिप्टो वॉलेट में स्थानांतरित कर सकते हैं। @@ -117,11 +123,11 @@ title: बिलिंग - आप जिस जीआरटी को भेजना चाहते हैं उसकी राशि दर्ज करें और जिस वॉलेट पते पर आप इसे भेजना चाहते हैं। - "जारी रखें" पर क्लिक करें और अपने लेन-देन की पुष्टि करें। -कृपया ध्यान दें कि बड़ी खरीद राशि के लिए, कॉइनबेस को क्रिप्टो वॉलेट में पूरी राशि स्थानांतरित करने से पहले आपको 7-10 दिनों तक प्रतीक्षा करने की आवश्यकता हो सकती है। -कॉइनबेस पर जीआरटी प्राप्त करने के बारे में आप [यहां](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) अधिक जान सकते हैं। +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -यह Binance पर GRT खरीदने के लिए चरण-दर-चरण मार्गदर्शिका होगी। +This will be a step by step guide for purchasing GRT on Binance. 1. [Binance](https://www.binance.com/en) पर जाएं और एक खाता बनाएं। 2. एक बार जब आप एक खाता बना लेते हैं, तो आपको केवाईसी (या अपने ग्राहक को जानें) नामक एक प्रक्रिया के माध्यम से अपनी पहचान सत्यापित करने की आवश्यकता होगी। यह सभी केंद्रीकृत या कस्टोडियल क्रिप्टो एक्सचेंजों के लिए एक मानक प्रक्रिया है। @@ -137,11 +143,11 @@ title: बिलिंग - आप जिस जीआरटी को भेजना चाहते हैं उसकी राशि दर्ज करें और जिस श्वेतसूची वाले वॉलेट पते पर आप इसे भेजना चाहते हैं। - "जारी रखें" पर क्लिक करें और अपने लेन-देन की पुष्टि करें। -आप Binance पर GRT प्राप्त करने के बारे में [यहां](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) अधिक जान सकते हैं। +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -इस प्रकार आप Uniswap पर GRT खरीद सकते हैं। +This is how you can purchase GRT on Uniswap. 1. [Uniswap](https://app.uniswap.org/#/swap) पर जाएं और अपना वॉलेट कनेक्ट करें। 2. उस टोकन का चयन करें जिससे आप स्वैप करना चाहते हैं। ईटीएच का चयन करें। @@ -151,8 +157,52 @@ title: बिलिंग 5. "स्वैप" पर क्लिक करें। 6. अपने वॉलेट में लेन-देन की पुष्टि करें और आप लेन-देन के संसाधित होने की प्रतीक्षा करें। -आप Uniswap पर GRT प्राप्त करने के बारे में अधिक जानकारी [यहां](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) से प्राप्त कर सकते हैं। +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### कॉइनबेस + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - "जारी रखें" पर क्लिक करें और अपने लेन-देन की पुष्टि करें। + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. [Binance](https://www.binance.com/en) पर जाएं और एक खाता बनाएं। +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - "जारी रखें" पर क्लिक करें और अपने लेन-देन की पुष्टि करें। + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Arbitrum Bridge -बिलिंग अनुबंध केवल एथेरियम मेननेट से आर्बिट्रम नेटवर्क तक जीआरटी को पाटने के लिए बनाया गया है। अगर आप अपने जीआरटी को आर्बिट्रम से वापस एथेरियम मेननेट में स्थानांतरित करना चाहते हैं, तो आपको [आर्बिट्रम ब्रिज](https://bridge.arbitrum.io/?l2ChainId=42161) का उपयोग करना होगा। +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/hi/chain-integration-overview.mdx b/website/pages/hi/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/hi/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/hi/cookbook/arweave.mdx b/website/pages/hi/cookbook/arweave.mdx index 950e6cc71e00..c9411377d829 100644 --- a/website/pages/hi/cookbook/arweave.mdx +++ b/website/pages/hi/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: आरवीव पर सब-ग्राफ्र्स बनाना --- -> ग्राफ नोड और होस्टेड सेवाओं पर आरवीव अभी बीटा डेवलपमेंट फेज में है | आरवीव सब ग्राफ बनाते वक़्त किसी भी प्रकार की सहायता हेतु [ डिस्कॉर्ड ](https://discord.gg/graphprotocol) पर संपर्क करें | +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! इस गाइड में आप आरवीव ब्लॉकचेन पर सब ग्राफ्स बनाना और डेप्लॉय करना सीखेंगे! @@ -83,7 +83,7 @@ dataSources: ``` - आरवीव सब-ग्राफ्स के नए प्रकार का डाटा सोर्स लाते हैं (`आरवीव`) -- नेटवर्क को ग्राफ नोड होस्ट करने वाले नेटवर्क के अनुरूप होना चाहिए| होस्टेड सर्विस पर आरवीव का मेन नेट `arweave-mainnet` है +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - अरवीव डाटा सोर्स द्वारा एक वैकल्पिक source.owner फील्ड लाया गया, जो की एक आरवीव वॉलेट का मपब्लिक key है| आरवीव डाटा सोर्स द्वारा दो प्रकार के हैंडलर्स उपयोग किये जा सकते हैं: @@ -150,9 +150,9 @@ class Transaction { आरवीवे सब-ग्राफ की मैपिंग लिखना एथेरेयम के लिए सब-ग्राफ लिखने जैसा ही है| अधिक जानकारी [यहां](/developing/creating-a-subgraph/#writing-mappings) क्लिक करें| -## आरवीव सब-ग्राफ को होस्टेड सर्विस पर डेप्लॉय करना +## Deploying an Arweave Subgraph on the hosted service -एक बार होस्टेड सर्विस डैशबोर्ड पर सब-ग्राफ बन जाने के बाद उसको `graph deploy` CLI कमांड से डेप्लॉय किया जा सकता है| +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/hi/cookbook/base-testnet.mdx b/website/pages/hi/cookbook/base-testnet.mdx index caf0b047fd97..4d31dc39037e 100644 --- a/website/pages/hi/cookbook/base-testnet.mdx +++ b/website/pages/hi/cookbook/base-testnet.mdx @@ -13,7 +13,7 @@ title: आधार पर सबग्राफ बनाना ### 1. ग्राफ़ सीएलआई इनस्टॉल करें -ग्राफ़ सीएलआई (>=v0.41.0) जावास्क्रिप्ट में लिखा गया है और इसका उपयोग करने के लिए आपको या तो `npm` या `यार्न` स्थापित करने की आवश्यकता होगी। +ग्राफ़ सीएलआई (>=v0.41.0) JavaScript में लिखा गया है और इसका उपयोग करने के लिए आपको या तो `npm` या `यार्न` स्थापित करने की आवश्यकता होगी। ```sh # NPM diff --git a/website/pages/hi/cookbook/cosmos.mdx b/website/pages/hi/cookbook/cosmos.mdx index 96cfe877b4f4..b98bba42b139 100644 --- a/website/pages/hi/cookbook/cosmos.mdx +++ b/website/pages/hi/cookbook/cosmos.mdx @@ -178,7 +178,7 @@ class Any { यह गौर करना अत्यंत जरुरी है कि कॉसमॉस के मैसेज चेन-विशिष्ट हैं और उन्हें सब-ग्राफ्स में एक क्रमबद्ध [प्रोटोकॉल बफर्स](https://developers.google.com/protocol-buffers/) की तरह पास किया जाता है| परिणामस्वरूप, मैसेज के डाटा को प्रोसेस करने से पहले मैपिंग फंक्शन में डिकोड करने की आवश्यकता होती है| -सबग्राफ में संदेश डेटा को डीकोड करने का एक उदाहरण पाया जा सकता है [यहां](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). ## कॉसमॉस सब-ग्राफ्स बनाना और निर्माण करना diff --git a/website/pages/hi/cookbook/grafting.mdx b/website/pages/hi/cookbook/grafting.mdx index 8b9d8aaf9244..b1e3a33c3759 100644 --- a/website/pages/hi/cookbook/grafting.mdx +++ b/website/pages/hi/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: एक कॉन्ट्रैक्ट बदलें और उसक इस अनुशिक्षण में हम एक बुनियादी उदहारण देखेंगे| हम एक मौजूदा कॉन्ट्रैक्ट को एक समान कॉन्ट्रैक्ट से बदल देंगे( नए एड्रेस के साथ, मगर सामान कोड). उसके बाद हम एक मौजूदा सब-ग्राफ एक "बेस" सब-ग्राफ में ग्राफ्ट कर देंगे नए कॉन्ट्रैक्ट की निगरानी करेगा| +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## एक मौजूदा सब-ग्राफ बनाना सब-ग्राफ बनाना द ग्राफ का एक अहम हिस्सा है, गहराई में [यहां](http://localhost:3000/en/cookbook/quick-start/)बताया गया है| इस अनुशिक्षां में दिए -ग्राफ को बनाने और डेप्लोये करने में सक्षम होने के लिए यह रेपो प्रदान की गयी है: @@ -131,7 +147,7 @@ graft: 1. [द ग्राफ स्टूडियो यू आई](https://thegraph.com/studio/) पर जायें और एक गोएर्ली टेस्ट नेट पर एक सब-ग्राफ बनाएं जिसका नाम `graft-replacement` होना चाहिए| 2. एक नया मैनिफेस्ट बनाएं| `subgraph.yaml`, `graph-replacement` के लिए एक अलग कॉन्ट्रैक्ट एड्रेस और नयी जानकारी, कि उसे कैसे ग्राफ्ट किया जाना चाहिए रखता है | यह `block` पुराने कॉन्ट्रैक्ट द्वारा [आखिरी बार मापी गयी गतिविधि](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) जिसकी फिलहाल हमें ज़रूरत है और पुराने कॉन्ट्रैक्ट का `बेस` हैं| `base` सब-ग्राफ आईडी मूल `graph-example` सब-ग्राफ उदहारण के लिए `डेप्लॉयमेंट आईडी` है| आप इसे द ग्राफ स्टूडियो यूआई में भी देख सकते है| 3. अपने सब-ग्राफ पेज पर `AUTH & DEPLOY` भाग में `graft-replacement` फोल्डर में दिए गए दिशा निर्देशों का पालन करें| -4. एक बार समाप्त होने पर, सत्यापित करें कि सबग्राफ ठीक से अनुक्रमित हो रहा है। यदि आप द ग्राफ़ प्लेग्राउंड में निम्न कमांड चलाते हैं +4. एक बार पूरा होने पर, सत्यापित करें की इंडेक्सिंग सही ढंग से हो गयी है| यदि आप निम्न कमांड ग्राफ प्लेग्राउंड में चलाते हैं ```graphql { diff --git a/website/pages/hi/cookbook/near.mdx b/website/pages/hi/cookbook/near.mdx index edb4dc0400e9..96ef313be0b4 100644 --- a/website/pages/hi/cookbook/near.mdx +++ b/website/pages/hi/cookbook/near.mdx @@ -245,7 +245,7 @@ NEAR सबग्राफ के लिए ग्राफक्यूएल NEAR समर्थन बीटा में है, जिसका मतलब है कि एपीआई में बदलाव हो सकते हैं क्योंकि हम इंटीग्रेशन में सुधार पर काम करना जारी रखेंगे। कृपया near@thegraph.com पर ईमेल करें ताकि हम NEAR सबग्राफ बनाने में आपकी सहायता कर सकें, और आपको नवीनतम विकासों के बारे में अपडेट रख सकें! -### क्या एक सबग्राफ एनईएआर और ईवीएम दोनों श्रृंखलाओं को अनुक्रमित कर सकता है? +### Can a subgraph index both NEAR and EVM chains? नहीं, एक सबग्राफ केवल एक श्रृंखला/नेटवर्क से डेटा स्रोतों का समर्थन कर सकता है। @@ -271,13 +271,13 @@ accounts: यह वर्तमान में समर्थित नहीं है। हम मूल्यांकन कर रहे हैं कि अनुक्रमण के लिए यह कार्यक्षमता आवश्यक है या नहीं। -### एथेरियम सबग्राफ "लंबित" और "वर्तमान" संस्करणों का समर्थन करते हैं, मैं NEAR सबग्राफ के "लंबित" संस्करण को कैसे तैनात कर सकता हूं? +### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? NEAR सबग्राफ के लिए पेंडिंग कार्यक्षमता अभी तक समर्थित नहीं है। अंतरिम में, आप एक अलग "नामित" सबग्राफ के लिए एक नया संस्करण तैनात कर सकते हैं, और फिर जब वह चेन हेड के साथ सिंक हो जाता है, तो आप अपने प्राथमिक "नामित" सबग्राफ में फिर से तैनात कर सकते हैं, जो उसी अंतर्निहित डेप्लॉयमेंट आईडी का उपयोग करेगा, इसलिए मुख्य सबग्राफ तुरंत सिंक हो जाएगा। -### मेरे प्रश्न का उत्तर नहीं दिया गया है, मुझे NEAR सबग्राफ बनाने में अधिक सहायता कहाँ से मिल सकती है? +### My question hasn't been answered, where can I get more help building NEAR subgraphs? -यदि यह सबग्राफ विकास के बारे में एक सामान्य प्रश्न है, तो शेष [डेवलपर डॉक्यूमेंटेशन](/cookbook/quick-start) में बहुत अधिक जानकारी है। अन्यथा कृपया [द ग्राफ प्रोटोकॉल डिस्कॉर्ड](https://discord.gg/graphprotocol) से जुड़ें और #नियर चैनल या ईमेल near@thegraph.com पर पूछें। +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## संदर्भ diff --git a/website/pages/hi/cookbook/substreams-powered-subgraphs.mdx b/website/pages/hi/cookbook/substreams-powered-subgraphs.mdx index 47f01ea9e064..6b84c84358c8 100644 --- a/website/pages/hi/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/hi/cookbook/substreams-powered-subgraphs.mdx @@ -1,12 +1,12 @@ --- -title: सबस्ट्रीम-संचालित सबग्राफ +title: Substreams-powered subgraphs --- -[Substreams](/सबस्ट्रीम/रीडमी) ब्लॉकचेन डेटा को संसाधित करने के लिए एक नया ढांचा है, जिसे ग्राफ़ नेटवर्क के लिए स्ट्रीमिंगफ़ास्ट द्वारा विकसित किया गया है। एक सबस्ट्रीम मॉड्यूल इकाई परिवर्तनों को आउटपुट कर सकता है, जो सबग्राफ इकाइयों के साथ संगत हैं। एक सबग्राफ ऐसे सबस्ट्रीम मॉड्यूल को डेटा स्रोत के रूप में उपयोग कर सकता है, जो सबग्राफ डेवलपर्स के लिए सबस्ट्रीम की अनुक्रमण गति और अतिरिक्त डेटा लाता है। +[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. -## आवश्यकताएं +## Requirements -इस कुकबुक के लिए [yarn](https://yarnpkg.com/), [स्थानीय सबस्ट्रीम विकास के लिए आवश्यक निर्भरताएं](https://substreams.streamingfast.io/developers-guide/installation-requirements), और ग्राफ सीएलआई का नवीनतम संस्करण (>=0.52.0) की आवश्यकता है: +This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): ``` npm install -g @graphprotocol/graph-cli @@ -14,17 +14,17 @@ npm install -g @graphprotocol/graph-cli ## Get the cookbook -> यह कुकबुक इस [संदर्भ के रूप में सबस्ट्रीम-संचालित सबग्राफ] \(https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph) का उपयोग करती है। +> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). ``` graph init --from-example substreams-powered-subgraph ``` -## सबस्ट्रीम पैकेज को परिभाषित करना +## Defining a Substreams package -एक सबस्ट्रीम पैकेज प्रकारों से बना होता है ([प्रोटोकॉल बफ़र्स] \(https://protobuf.dev/) के रूप में परिभाषित), मॉड्यूल (रस्ट में लिखा गया), और एक `substreams.yaml` फ़ाइल जो प्रकारों को संदर्भित करती है, और निर्दिष्ट करती है कि मॉड्यूल कैसे ट्रिगर होते हैं। [सबस्ट्रीम विकास के बारे में अधिक जानने के लिए सबस्ट्रीम दस्तावेज़ पर जाएँ](/substreams), और अधिक उदाहरणों के लिए [awesome-substreams](https://github.com/pinax-network/awesome-substreams) और [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) देखें। +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. -विचाराधीन सबस्ट्रीम पैकेज मेननेट एथेरियम पर अनुबंध परिनियोजन का पता लगाता है, सभी नए तैनात अनुबंधों के लिए निर्माण ब्लॉक और टाइमस्टैम्प को ट्रैक करता है। ऐसा करने के लिए, `/proto/example.proto` में एक समर्पित `अनुबंध` प्रकार है ([प्रोटोकॉल बफ़र्स को परिभाषित करने के बारे में और जानें](https://protobuf.dev/programming-guides/proto3/#simple)): +The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -सबस्ट्रीम पैकेज का मुख्य तर्क `lib.rs` में एक `map_contract` मॉड्यूल है, जो प्रत्येक ब्लॉक को संसाधित करता है, क्रिएट कॉल के लिए फ़िल्टर करता है जो वापस नहीं आया, और `कॉन्ट्रैक्ट` लौटाता है: +The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result `substreams_entity_change` क्रेट में केवल इकाई परिवर्तन उत्पन्न करने के लिए एक समर्पित `टेबल्स` फ़ंक्शन भी है ([documentation] \(https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html))। उत्पन्न इकाई परिवर्तन संबंधित सबग्राफ के `subgraph.graphql` में परिभाषित `schema.graphql` इकाइयों के साथ संगत होना चाहिए। +> The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. ``` #[substreams::handlers::map] @@ -88,7 +88,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -इस सबस्ट्रीम पैकेज को सबग्राफ द्वारा उपभोग के लिए तैयार करने के लिए, आपको निम्नलिखित कमांड चलाने होंगे: +To prepare this Substreams package for consumption by a subgraph, you must run the following commands: ```bash yarn substreams:protogen # generates types in /src/pb @@ -147,19 +147,19 @@ yarn substreams:package # packages the substreams in a .spkg file # alternatively, yarn substreams:prepare calls all of the above commands ``` -> यदि आप अंतर्निहित सबस्ट्रीम कमांड को समझना चाहते हैं तो इन स्क्रिप्ट को `package.json` फ़ाइल में परिभाषित किया गया है +> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands -यह `substreams.yaml` से पैकेज नाम और संस्करण के आधार पर एक `spkg` फ़ाइल उत्पन्न करता है। `spkg` फ़ाइल में वह सारी जानकारी है जो ग्राफ़ नोड को इस सबस्ट्रीम पैकेज को ग्रहण करने के लिए आवश्यक है। +This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. -> यदि आप सबस्ट्रीम पैकेज को अपडेट करते हैं, तो आपके द्वारा किए गए परिवर्तनों के आधार पर, आपको उपरोक्त कुछ या सभी कमांड चलाने की आवश्यकता हो सकती है ताकि `spkg` अद्यतित रहे। +> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. -## सबस्ट्रीम-संचालित सबग्राफ को परिभाषित करना +## Defining a Substreams-powered subgraph -सबस्ट्रीम-संचालित सबग्राफ एक नए प्रकार के डेटा स्रोत, "सबस्ट्रीम" का परिचय देते हैं। ऐसे सबग्राफ में केवल एक डेटा स्रोत हो सकता है। +Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. -इस डेटा स्रोत को अनुक्रमित नेटवर्क, सबस्ट्रीम पैकेज (`spkg`) को एक सापेक्ष फ़ाइल स्थान के रूप में निर्दिष्ट करना होगा, और उस सबस्ट्रीम पैकेज के भीतर मॉड्यूल जो सबग्राफ-संगत इकाई परिवर्तन उत्पन्न करता है (इस मामले में `map_entity_changes`, उपरोक्त सबस्ट्रीम पैकेज से)। मैपिंग निर्दिष्ट है, लेकिन केवल मैपिंग प्रकार ("सबस्ट्रीम/ग्राफ-एंटिटीज") और एपीवर्जन की पहचान करती है। +This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. -> वर्तमान में सबग्राफ स्टूडियो और ग्राफ नेटवर्क सबस्ट्रीम-संचालित सबग्राफ का समर्थन करते हैं जो `मेननेट` (मेननेट एथेरियम) को अनुक्रमित करते हैं। +> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). ```yaml specVersion: 0.0.4 @@ -180,7 +180,7 @@ dataSources: apiVersion: 0.0.5 ``` -`subgraph.yaml` एक स्कीमा फ़ाइल का भी संदर्भ देता है। इस फ़ाइल के लिए आवश्यकताएँ अपरिवर्तित हैं, लेकिन निर्दिष्ट इकाइयाँ `subgraph.yaml` में संदर्भित सबस्ट्रीम मॉड्यूल द्वारा उत्पादित इकाई परिवर्तनों के साथ संगत होनी चाहिए। +The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. ```graphql type Contract @entity { @@ -194,9 +194,9 @@ type Contract @entity { } ``` -उपरोक्त को देखते हुए, सबग्राफ डेवलपर्स इस सबस्ट्रीम-संचालित सबग्राफ को तैनात करने के लिए ग्राफ़ सीएलआई का उपयोग कर सकते हैं। +Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. -> मेननेट एथेरियम को अनुक्रमित करने वाले सबस्ट्रीम-संचालित सबग्राफ को [Subgraph Studio] \(https://thegraph.com/studio/) पर तैनात किया जा सकता है। +> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). ```bash yarn install # install graph-cli @@ -204,11 +204,11 @@ yarn subgraph:build # build the subgraph yarn subgraph:deploy # deploy the subgraph ``` -इतना ही! आपने एक सबस्ट्रीम-संचालित सबग्राफ बनाया और तैनात किया है। +That's it! You have built and deployed a Substreams-powered subgraph. -## सबस्ट्रीम-संचालित सबग्राफ की सेवा +## Serving Substreams-powered subgraphs -सबस्ट्रीम-संचालित सबग्राफ की सेवा के लिए, ग्राफ़ नोड को संबंधित नेटवर्क के लिए सबस्ट्रीम प्रदाता के साथ-साथ चेन हेड को ट्रैक करने के लिए फ़ायरहोज़ या आरपीसी के साथ कॉन्फ़िगर किया जाना चाहिए। इन प्रदाताओं को `config.toml` फ़ाइल के माध्यम से कॉन्फ़िगर किया जा सकता है: +In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: ```toml [chains.mainnet] diff --git a/website/pages/hi/cookbook/upgrading-a-subgraph.mdx b/website/pages/hi/cookbook/upgrading-a-subgraph.mdx index bf5abf6c39e5..951cdba14406 100644 --- a/website/pages/hi/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/hi/cookbook/upgrading-a-subgraph.mdx @@ -1,25 +1,25 @@ --- -title: ग्राफ नेटवर्क को एक मौजूदा सबग्राफ को अपग्रेड करना +title: Upgrading an Existing Subgraph to The Graph Network --- ## परिचय -यह एक गाइड है जो आपको बताएगा कि होस्टेड सेवा से ग्राफ के डीसेंट्रलाइज्ड नेटवर्क में अपने सबग्राफ को कैसे अपग्रेड करें। 1,000 से अधिक सबग्राफ सफलतापूर्वक ग्राफ नेटवर्क में अपग्रेड किए गए हैं, जिनमें स्नैपशॉट, लूपरिंग, ऑडियस, प्रीमिया, लाइवपीर, यूमा, कर्व, लिडो, और बहुत से अन्य प्रोजेक्ट्स शामिल हैं! +This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! -अपग्रेड करने की प्रक्रिया तेज़ है और आपके सबग्राफ हमेशा के लिए विश्वसनीयता और प्रदर्शन का लाभ उठाएंगे, जिसे आप केवल ग्राफ नेटवर्क पर ही प्राप्त कर सकते हैं। +The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. ### आवश्यक शर्तें - आप पहले से ही होस्ट की गई सेवा पर एक सबग्राफ तैनात कर चुके हैं। -- सबग्राफ एक चैन को इंडेक्स कर रहा है जो ग्राफ नेटवर्क पर उपलब्ध है (या बीटा में उपलब्ध है)। -- आपके पास ईथर (ETH) वॉलेट है जिसका उपयोग ऑन-चेन सबग्राफ प्रकाशित करने के लिए किया जाता है। -- आपके पास लगभग 10,000 जीआरटी (GRT) है, जिसका उपयोग आपके सबग्राफ को क्यूरेट करने के लिए होता है, ताकि इंडेक्सर्स उसे इंडेक्स करना शुरू कर सकें। +- The subgraph is indexing a chain available on The Graph Network. +- You have a wallet with ETH to publish your subgraph on-chain. +- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. -## ग्राफ नेटवर्क को एक मौजूदा सबग्राफ को अपग्रेड करना +## Upgrading an Existing Subgraph to The Graph Network -> आप अपने सबग्राफ के लिए विशिष्ट कमांड [सबग्राफ स्टूडियो](https://thegraph.com/studio/) में पा सकते हैं। +> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). -1. ग्राफ-सीएलआई का नवीनतम संस्करण स्थापित करें: +1. Graph-cli का नवीनतम संस्करण प्राप्त करें: ```sh npm install -g @graphprotocol/graph-cli @@ -29,29 +29,29 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -सुनिश्चित करें कि सबग्राफ.yaml में आपका `apiVersion` `0.0.5` या इससे अधिक है। +Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. -2. सबग्राफ के मुख्य परियोजना रिपॉजिटरी में, सबग्राफ को स्टूडियो पर डिप्लॉय और बिल्ड करने के लिए प्रमाणित करें। +2. सबग्राफ के मुख्य प्रोजेक्ट रिपॉजिटरी के अंदर, सबग्राफ को स्टूडियो पर तैनात करने और बनाने के लिए प्रमाणित करें: ```sh graph auth --studio ``` -3. फ़ाइलें उत्पन्न करें और सबग्राफ बनाएं: +3. फाइलें उत्पन्न करें और सबग्राफ बनाएं: ```sh graph codegen && graph build ``` -यदि आपके सबग्राफ में बिल्ड त्रुटियाँ हैं, तो [असेंबलीस्क्रिप्ट माइग्रेशन गाइड](/release-notes/assemblyscript-migration-guide/) देखें। +If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). -4. अपने वॉलेट से [सबग्राफ स्टूडियो](https://thegraph.com/studio/) में साइन इन करें और सबग्राफ तैनात करें। आप अपना `` स्टूडियो यूआई में पा सकते हैं, जो आपके सबग्राफ के नाम पर आधारित है। +4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. ```sh graph deploy --studio ``` -5. स्टूडियो के खेल के मैदान पर परीक्षण प्रश्न। यहां [सुशी - मेननेट एक्सचेंज सबग्राफ] \(https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground) के लिए कुछ उदाहरण दिए गए हैं: +5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): ```sh { @@ -68,27 +68,27 @@ graph deploy --studio } ``` -6. इस बिंदु पर, आपका सबग्राफ अब सबग्राफ स्टूडियो पर डिप्लॉय हो गया है, लेकिन इसे अभी तक डीसेंट्रलाइज्ड नेटवर्क पर प्रकाशित नहीं किया गया है। आप अब सबग्राफ का टेस्ट कर सकते हैं ताकि यह आपकी इच्छा के अनुसार काम कर रहा है यह सुनिश्चित कर सकते हैं, जिसका टेम्पररी क्वेरी URL ऊपर दाएं स्तंभ में दिखाई देता है। जैसा कि नाम से स्पष्ट है, यह एक अस्थायी URL है और उत्पादन में उपयोग नहीं करना चाहिए। +6. इस समय पर, आपका सबग्राफ अब सबग्राफ स्टूडियो पर तैनात है, लेकिन अभी तक विकेंद्रीकृत नेटवर्क पर प्रकाशित नहीं हुआ है। अब आप यह सुनिश्चित करने के लिए सबग्राफ का परीक्षण कर सकते हैं कि यह अस्थायी क्वेरी URL का उपयोग करके काम कर रहा है, जैसा कि ऊपर दाएं कॉलम के शीर्ष पर दिखाया गया है। जैसा कि इस नाम से पहले ही पता चलता है, यह एक अस्थायी URL है और इसे उत्पादन में इस्तेमाल नहीं किया जाना चाहिए। -- अपडेट करना सिर्फ अपने मौजूदा सबग्राफ का एक और संस्करण ऑन-चेन प्रकाशित करना है। -- क्योंकि इससे लागत उत्पन्न होती है, इसलिए सबग्राफ को प्रकाशित करने से पहले "डेवलपमेंट क्वेरी URL" का उपयोग करके सबग्राफ को सबग्राफ स्टूडियो में डिप्लॉय और टेस्ट करना अत्यंत सिफारिश किया जाता है। एक उदाहरण लेन-देन को देखे [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). कीमतें लगभग 0.0425 ETH पर 100 ग्वेई परिवर्तन के आस-पास हैं। -- जब भी आपको अपने सबग्राफ को अपडेट करने की आवश्यकता होगी, तो आपको एक अपडेट शुल्क देना होगा। क्योंकि इससे लागत उत्पन्न होती है, इसलिए गोएर्ली पर अपने सबग्राफ को डिप्लॉय और टेस्ट करने की आपको सुझाव दिया जाता है इसे मेननेट पर डिप्लॉय करने से पहले। कुछ केसों में, यदि उस सबग्राफ पर कोई सिग्नल नहीं है, तो उसे आपको कुछ GRT भी चाहिए हो सकता है। उस सबग्राफ के उस संस्करण पर सिग्नल/क्युरेशन होने की स्थिति में (ऑटो-माइग्रेट का उपयोग करके) टैक्स विभाजित होंगे। +- Updating is just publishing another version of your existing subgraph on-chain. +- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. +- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. -7. "प्रकाशित करें" बटन दबाकर ग्राफ़ के विकेन्द्रीकृत नेटवर्क पर सबग्राफ प्रकाशित करें। +7. "प्रकाशित करें" बटन दबाकर ग्राफ़ के विकेंद्रीकृत नेटवर्क पर सबग्राफ प्रकाशित करें। -यह सुनिश्चित करने के लिए कि यह इंडेक्सर्स द्वारा अनुक्रमित है, आपको अपने सबग्राफ को जीआरटी के साथ क्यूरेट करना चाहिए। गैस की लागत बचाने के लिए, आप अपने सबग्राफ को उसी लेनदेन में क्यूरेट कर सकते हैं जिसे आप नेटवर्क पर प्रकाशित करते हैं। उच्च गुणवत्ता वाली सेवा के लिए अपने सबग्राफ को कम से कम 10,000 जीआरटी के साथ क्यूरेट करने की अनुशंसा की जाती है। +You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. -और बस! प्रकाशन पूरा करने के बाद, आप अपने सबग्राफ़ को [द ग्राफ़ एक्सप्लोरर](https://thegraph.com/explorer) के माध्यम से विकेंद्रीकृत नेटवर्क पर लाइव देख पाएंगे। +And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). -डिस्कॉर्ड पर बेझिझक [#क्यूरेटर चैनल](https://discord.gg/s5HfGMXmbW) का लाभ उठाएं ताकि क्यूरेटर को पता चल सके कि आपका सबग्राफ सिग्नल के लिए तैयार है। यदि आप अपनी अपेक्षित क्वेरी मात्रा उनके साथ साझा करते हैं तो यह भी सहायक होगा। इसलिए, वे अनुमान लगा सकते हैं कि उन्हें आपके सबग्राफ पर कितना जीआरटी संकेत देना चाहिए। +Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. ### एक एपीआई key बनाएँ -आप सबग्राफ स्टूडियो में एक एपीआई कुंजी उत्पन्न कर सकते हैं [here] \(https://thegraph.com/studio/apikeys/)। +You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). ![API key creation page](/img/api-image.png) -प्रत्येक सप्ताह के अंत में, उस अवधि के दौरान हुए क्वेरी शुल्कों पर आधारित एक चालान तैयार किया जाएगा। यह चालान आपके शेष राशि में उपलब्ध GRT का उपयोग करके स्वचालित रूप से भुगतान किया जाएगा। आपके शेष राशि को आपके क्वेरी शुल्कों के खर्च के निकाले जाने के बाद अपडेट किया जाएगा। क्वेरी शुल्क GRT में आर्बिट्रम नेटवर्क के माध्यम से भुगतान किये जाएंगे। आपको अपने एपीआई कुंजी को सक्षम करने के लिए आर्बिट्रम बिलिंग अनुबंध में GRT जोड़ने की आवश्यकता होगी जिसके लिए निम्नलिखित कदमों का पालन करें: +प्रत्येक सप्ताह के अंत में, इस अवधि के दौरान किए गए क्वेरी शुल्क के आधार पर एक चालान जनरेट किया जाएगा। आपके बैलेंस में उपलब्ध GRT का उपयोग करके इस चालान का स्वचालित रूप से भुगतान किया जाएगा। आपकी क्वेरी शुल्क की लागत वापस लेने के बाद आपकी शेष राशि अपडेट की जाएगी। क्वेरी शुल्क का भुगतान आर्बिट्रम नेटवर्क के माध्यम से जीआरटी में किया जाता है। आपको निम्नलिखित चरणों के माध्यम से अपनी एपीआई key को सक्षम करने के लिए आर्बिट्रम बिलिंग कॉन्ट्रैक्ट में जीआरटी जोड़ने की आवश्यकता होगी: - अपनी पसंद के एक्सचेंज पर जीआरटी खरीदें। - अपने वॉलेट में जीआरटी भेजें। @@ -96,12 +96,12 @@ graph deploy --studio ![Add GRT in billing](/img/Add-GRT-New-Page.png) -- अपनी बिलिंग शेष राशि में अपना GRT जोड़ने के लिए steps का पालन करें। +- अपनी बिलिंग शेष राशि में अपना GRT जोड़ने के लिए चरणों का पालन करें। - आपका GRT स्वचालित रूप से आर्बिट्रम नेटवर्क से जुड़ जाएगा और आपके बिलिंग बैलेंस में जुड़ जाएगा। ![Billing pane](/img/New-Billing-Pane.png) -> नोट: अपने बिलिंग शेष में जीआरटी जोड़ने पर पूर्ण निर्देशों के लिए [आधिकारिक बिलिंग पृष्ठ](../billing.mdx) देखें। +> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. ### अपनी एपीआई key सुरक्षित करना @@ -110,13 +110,13 @@ graph deploy --studio 1. अधिकृत सबग्राफ 2. अधिकृत डोमेन -आप अपनी एपीआई कुंजी [here](https://thegraph.com/studio/apikeys/test/) सुरक्षित कर सकते हैं। +You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). ![Subgraph lockdown page](/img/subgraph-lockdown.png) -### विकेंद्रीकृत नेटवर्क पर आपके सबग्राफ को क्वेरी करना +### विकेंद्रीकृत नेटवर्क पर अपने सबग्राफ को क्वेरी करना -अब आप ग्राफ़ एक्सप्लोरर में नेटवर्क पर इंडेक्सर्स की अनुक्रमण स्थिति की जांच कर सकते हैं (उदाहरण [here] \(https://thegraph.com/explorer/subgraph?id=S9ihana8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers))। शीर्ष पर हरी रेखा इंगित करती है कि पोस्टिंग के समय 8 इंडेक्सर्स ने उस सबग्राफ को सफलतापूर्वक अनुक्रमित किया था। इसके अलावा इंडेक्सर टैब में आप देख सकते हैं कि किन इंडेक्सर्स ने आपका सबग्राफ उठाया है। +Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) @@ -124,13 +124,13 @@ graph deploy --studio `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -महत्वपूर्ण: `[api-key]` को उपरोक्त अनुभाग में उत्पन्न वास्तविक एपीआई कुंजी से बदलना सुनिश्चित करें। +Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. अब आप अपने ग्राफ़िकल अनुरोधों को भेजने के लिए अपने डैप में उस क्वेरी URL का उपयोग कर सकते हैं। बधाई हो! अब आप विकेंद्रीकरण के अग्रणी हैं! -> नोट: नेटवर्क की वितरित प्रकृति के कारण संभव है कि विभिन्न इंडेक्सर्स ने विभिन्न ब्लॉक तक इंडेक्स किया हो। केवल ताज़ा डेटा प्राप्त करने के लिए आप इंडेक्सर को स्पष्ट कर सकते हैं कि उसे किस न्यूनतम ब्लॉक तक इंडेक्स किया होना चाहिए जिसके साथ आपका क्वेरी सेव किया जाएगा, यह विकल्प दिए गए उदाहरण में दिखाया गया है: `{ number_gte: $minBlock }` (यहां $minBlock को वास्तविक न्यूनतम ब्लॉक नंबर से बदल दें)। +> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: ```graphql { @@ -140,13 +140,13 @@ graph deploy --studio } ``` -नेटवर्क की प्रकृति और पुनर्गठन को कैसे संभालना है, इसके बारे में अधिक जानकारी दस्तावेज़ीकरण लेख [डिस्ट्रीब्यूटेड सिस्टम्स](/querying/distributed-systems/) में वर्णित है। +More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). -## नेटवर्क पर एक सबग्राफ़ अपडेट करना +## Updating a Subgraph on the Network -यदि आप नेटवर्क पर किसी मौजूदा सबग्राफ को अपडेट करना चाहते हैं, तो आप ग्राफ सीएलआई का उपयोग करके अपने सबग्राफ के एक नए संस्करण को सबग्राफ स्टूडियो में तैनात करके ऐसा कर सकते हैं। +If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. -1. यदि आप नेटवर्क पर किसी मौजूदा सबग्राफ को अपडेट करना चाहते हैं, तो आप ग्राफ सीएलआई का उपयोग करके अपने सबग्राफ के एक नए संस्करण को सबग्राफ स्टूडियो में तैनात करके ऐसा कर सकते हैं। +1. अपने वर्तमान सबग्राफ में परिवर्तन करें। गोएर्ली में प्रकाशित करके सबग्राफ स्टूडियो पर छोटे सुधारों का परीक्षण करना एक अच्छा विचार है। 2. निम्नलिखित को तैनात करें और कमांड में नया संस्करण निर्दिष्ट करें (जैसे। v0.0.1, v0.0.2, आदि): ```sh @@ -154,72 +154,72 @@ graph deploy --studio ``` 3. खेल के मैदान में क्वेरी करके सबग्राफ स्टूडियो में नए संस्करण का परीक्षण करें -4. ग्राफ़ नेटवर्क पर नया संस्करण प्रकाशित करें। याद रखें कि इसके लिए गैस की आवश्यकता होती है (जैसा कि ऊपर अनुभाग में वर्णित है)। +4. ग्राफ़ नेटवर्क पर नया संस्करण प्रकाशित करें। याद रखें कि इसके लिए गैस की आवश्यकता होती है (जैसा कि ऊपर अनुभाग में बताया गया है)। -### मालिक अद्यतन शुल्क: डीप डाइव +### Owner Update Fee: Deep Dive -> ध्यान दें: आर्बिट्रम पर क्यूरेशन बॉन्डिंग कर्व्स का उपयोग नहीं करता है। आर्बिट्रम के बारे में और जानें [here] \(/ आर्बिट्रम/आर्बिट्रम-एफएक्यू/)। +> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). -अपडेट के लिए जीआरटी को सबग्राफ के पुराने संस्करण से नए संस्करण में स्थानांतरित करना आवश्यक है। इसका मतलब यह है कि प्रत्येक अपडेट के लिए, एक नया बॉन्डिंग कर्व बनाया जाएगा (बॉन्डिंग कर्व्स पर अधिक जानकारी [here](/network/curating#bonding-curve-101))। +An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). -नए बॉन्डिंग कर्व द्वारा नए संस्करण में माइग्रेट होने वाले सभी GRT पर 1% क्यूरेशन कर लगाया जाता है। स्वामी 1.25% या 1.25% तक का यह कर भुगतान करना होता है। दूसरे 1.25% को सभी क्यूरेटर्स के रूप में एक शुल्क के रूप में शामिल किया जाता है। यह प्रोत्साहन डिजाइन इसलिए है कि सबग्राफ के मालिक को पुनरावृत्ति अपडेट कॉल्स के साथ अपने क्यूरेटर्स के धन को सूखा नहीं सकता। यदि क्यूरेशन गतिविधि नहीं है, तो अपने ही सबग्राफ को सिग्नल करने के लिए आपको 100 GRT का न्यूनतम भुगतान करना होगा। +The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. आइए एक उदाहरण बनाते हैं, यह केवल तभी होता है जब आपका सबग्राफ सक्रिय रूप से क्यूरेट किया जा रहा हो: -- सबग्राफ के v1 पर ऑटो-माइग्रेट का उपयोग करके 100,000 जीआरटी का संकेत दिया जाता है -- स्वामी v2 पर अद्यतन करता है। 100,000 जीआरटी को एक नए बॉन्डिंग वक्र में स्थानांतरित कर दिया जाता है, जहां 97,500 जीआरटी को नए वक्र में डाल दिया जाता है और 2,500 जीआरटी को जला दिया जाता है -- उसके बाद, स्वामी को आधे कर के फीस के लिए 1250 GRT को दहन करना पड़ता है। स्वामी को इसे अपडेट से पहले अपने वॉलेट में होना चाहिए, अन्यथा अपडेट सफल नहीं होगा। यह अपडेट के साथी ट्रांजेक्शन में होता है। +- एक सबग्राफ के v1 पर ऑटो-माइग्रेट का उपयोग करके 100,000 GRT का संकेत दिया जाता है +- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned +- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. -_हालांकि यह तंत्र वर्तमान में नेटवर्क पर लाइव है, समुदाय वर्तमान में सबग्राफ डेवलपर्स के लिए अपडेट की लागत को कम करने के तरीकों पर चर्चा कर रहा है।_ +_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ ### एक सबग्राफ का एक स्थिर संस्करण बनाए रखना -यदि आप अपने सबग्राफ में बहुत सारे बदलाव कर रहे हैं, तो इसे लगातार अपडेट करना और अपडेट की लागत का सामना करना अच्छा विचार नहीं है। आपके सबग्राफ का एक स्थिर और सुसंगत संस्करण बनाए रखना न केवल लागत के नजरिए से महत्वपूर्ण है, बल्कि इसलिए भी कि इंडेक्सर्स अपने सिंकिंग समय में आत्मविश्वास महसूस कर सकें। जब आप अपडेट की योजना बनाते हैं तो इंडेक्सर्स को चिह्नित किया जाना चाहिए ताकि इंडेक्सर सिंकिंग समय प्रभावित न हो। जब आप अपने सबग्राफ का संस्करण बना रहे हों तो इंडेक्सर्स को यह बताने के लिए डिस्कॉर्ड पर [#इंडेक्सर्स चैनल](https://discord.gg/JexvtHa7dq) का बेझिझक लाभ उठाएं। +If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. -सबग्राफ हरिज APIs हैं जिन्हें बाहरी डेवलपर्स उपयोग कर रहे हैं। खुले APIs को सख्त मानकों का पालन करना चाहिए ताकि ये बाहरी डेवलपर्स के एप्लिकेशन को ख़राब ना करें। द ग्राफ नेटवर्क में, सबग्राफ डेवलपर को इंडेक्सर्स को ध्यान में रखना चाहिए और देखना चाहिए कि वे एक नए सबग्राफ को सिंक करने में कितना समय लगते हैं, साथ ही अन्य डेवलपर्स जो उनके सबग्राफ का उपयोग कर रहे हैं, को भी ध्यान में रखना चाहिए। +Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. ### सबग्राफ के मेटाडेटा को अपडेट करना -आप नई संस्करण प्रकाशित किए बिना अपने सबग्राफ की मेटाडेटा को अपडेट कर सकते हैं। मेटाडेटा में सबग्राफ का नाम, छवि, विवरण, वेबसाइट URL, स्रोत कोड URL, और श्रेणियां शामिल होती हैं। डेवलपर्स इसे सबग्राफ स्टूडियो में अपने सबग्राफ के विवरण को अपडेट करके कर सकते हैं, जहां आप सभी लागू फ़ील्ड को संपादित कर सकते हैं। +आप एक नया संस्करण प्रकाशित किए बिना अपने सबग्राफ के मेटाडेटा को अपडेट कर सकते हैं। मेटाडेटा में सबग्राफ नाम, छवि, विवरण, वेबसाइट URL, मुल कोड के URL और श्रेणियां शामिल हैं। डेवलपर सबग्राफ स्टूडियो में अपने सबग्राफ विवरण अपडेट करके ऐसा कर सकते हैं जहां आप सभी लागू फ़ील्ड संपादित कर सकते हैं। -सुनिश्चित करें कि **Explorer में सबग्राफ विवरण अपडेट करें** चेकबॉक्स चिह्नित है और **सेव** पर क्लिक करें। यदि यह चेकबॉक्स चिह्नित है, तो एक ऑन-चेन ट्रांजैक्शन उत्पन्न होगा जो नए डिप्लॉयमेंट के साथ नए संस्करण को प्रकाशित किए बिना, एक्सप्लोरर में सबग्राफ विवरण को अपडेट करेगा। +Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. -## ग्राफ़ नेटवर्क में एक सबग्राफ़ तैनात करने के लिए सर्वोत्तम अभ्यास +## ग्राफ़ नेटवर्क में एक सबग्राफ को तैनात करने के लिए सर्वोत्तम अभ्यास -1. सबग्राफ विकास के लिए ईएनएस नाम का लाभ उठाना: +1. सबग्राफ डेवलपमेंट के लिए ENS नाम का लाभ उठाना: - Set up your ENS [here](https://app.ens.domains/) -- अपना ENS नाम अपनी सेटिंग में जोड़ें [here](https://thegraph.com/explorer/settings?view=display-name)। +- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). -2. आपकी प्रोफ़ाइलें जितनी अधिक भरी होंगी, आपके सबग्राफ़ को अनुक्रमित और क्यूरेट किए जाने की संभावना उतनी ही बेहतर होगी। +2. आपके प्रोफाइल जितने अधिक भरे हुए हैं, आपके सबग्राफ के अनुक्रमित और क्यूरेट होने की संभावना उतनी ही बेहतर है। ## ग्राफ़ नेटवर्क पर एक सबग्राफ का बहिष्कार करना -अपने सबग्राफ को अस्वीकृत करने और उसे ग्राफ़ नेटवर्क से हटाने के लिए [here] \(/manageing/deprecating-a-subgraph) चरणों का पालन करें। +Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. ## ग्राफ़ नेटवर्क पर एक सबग्राफ + बिलिंग को क्वेरी करना -होस्टेड सेवा को उन्हें बिना किसी प्रतिबंध के अपने सबग्राफ डिप्लॉय करने की अनुमति देने के लिए स्थापित किया गया था। +The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. -द ग्राफ नेटवर्क को वास्तविक रूप से डिसेंट्रलाइज़ किया जाने के लिए, क्वेरी शुल्कों का भुगतान प्रोटोकॉल के प्रोत्साहनों का एक मुख्य हिस्सा होना आवश्यक है। एपीआईस की सदस्यता और क्वेरी शुल्क भुगतान करने के बारे में अधिक जानकारी के लिए, बिलिंग दस्तावेज़ीकरण पर जांच करें यहाँ [here](/billing/)। +In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). ### नेटवर्क पर क्वेरी शुल्क का अनुमान लगाना -यह उत्पाद UI में एक लाइव सुविधा नहीं होने के बावजूद, आप अपने प्रति क्वेरी के लिए अधिकतम बजट तय कर सकते हैं, मासिक रूप से देने के लिए राशि को लेकर जिसे आप प्रत्याशित क्वेरी वॉल्यूम से विभाजित कर सकते हैं। +जबकि यह उत्पाद UI में एक लाइव विशेषता नहीं है, आप प्रति माह भुगतान करने के इच्छुक राशि को लेकर और इसे अपनी अपेक्षित क्वेरी मात्रा से विभाजित करके अपना अधिकतम बजट प्रति क्वेरी निर्धारित कर सकते हैं। -जब आप अपने क्वेरी बजट का निर्धारण करते हैं, तो यह निश्चित नहीं है कि एक इंडेक्सर उसमें आपको सेव करने के लिए तैयार होगा। यदि गेटवे आपको एक इंडेक्सर से मिला देता है जो आपको आपके बजट या उससे कम कीमत पर क्वेरी सेव करने के लिए तैयार होता है, तो आपको अपने बजट और उनकी कीमत के अंतर का भुगतान करना होगा। इससे, एक कम क्वेरी कीमत आपको उपलब्ध इंडेक्सर्स का समूह कम कर देती है, जिससे आपको प्राप्त की गई सेवा की गुणवत्ता प्रभावित हो सकती है। उच्च क्वेरी शुल्क होना लाभकारी है, क्योंकि इससे क्युरेशन और प्रमुख इंडेक्सर्स को आपके सबग्राफ को आकर्षित कर सकता है। +While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. -ध्यान रखें कि यह एक गतिशील और विकसित हो रहा बाजार है, लेकिन आप इसके साथ कैसे इंटरैक्ट करते हैं वह आपके नियंत्रण में है। प्रोटोकॉल या गेटवे में कोई अधिकतम या न्यूनतम मूल्य निर्दिष्ट नहीं है। उदाहरण के लिए, नीचे दिए गए संदर्भ में नेटवर्क पर कुछ dapps द्वारा भुगतान किए गए मूल्यों को देख सकते हैं (प्रति सप्ताह आधार पर)। नीचे दिखाए गए अंतिम स्तंभ में, GRT में क्वेरी शुल्क दिखाया गया है। +याद रखें कि यह एक गतिशील और बढ़ता हुआ बाजार है, लेकिन आप इसके साथ कैसे इंटरैक्ट करते हैं यह आपके नियंत्रण में है। प्रोटोकॉल या गेटवे में निर्दिष्ट कोई अधिकतम या न्यूनतम मूल्य नहीं है। उदाहरण के लिए, आप नीचे नेटवर्क पर (प्रति सप्ताह के आधार पर) कुछ डैप द्वारा भुगतान की गई कीमत को देख सकते हैं। अंतिम कॉलम देखें, जो जीआरटी में क्वेरी फीस दिखाता है। ![QueryFee](/img/QueryFee.png) ## अतिरिक्त संसाधन -यदि आप अभी भी भ्रमित हैं, तो डरें नहीं! निम्नलिखित संसाधनों की जाँच करें या नीचे दिए गए विकेंद्रीकृत नेटवर्क में सबग्राफ को अपग्रेड करने पर हमारी वीडियो गाइड देखें: +If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: -- [द ग्राफ़ नेटवर्क कॉन्ट्रैक्ट्स](https://github.com/graphprotocol/contracts) -- [क्यूरेशन कॉन्ट्रैक्ट](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - अंतर्निहित कॉन्ट्रैक्ट जिसे GNS लपेटता है +- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) +- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [सबग्राफ़ स्टूडियो दस्तावेज़ीकरण](/तैनाती/सबग्राफ़-स्टूडियो) +- [Subgraph Studio documentation](/deploying/subgraph-studio) diff --git a/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx index 4eb9b918adfd..28fc2c1b043e 100644 --- a/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/hi/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: सबग्राफ स्टूडियो में सबग्राफ तैनात करना --- -> सुनिश्चित करें कि आपका सबग्राफ जिस नेटवर्क से डेटा इंडेक्स कर रहा है वह विकेंद्रीकृत नेटवर्क पर [समर्थित](/Developing/supported-chains) हो। +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). ये आपके सबग्राफ को सबग्राफ स्टूडियो में तैनात करने के चरण हैं: diff --git a/website/pages/hi/deploying/hosted-service.mdx b/website/pages/hi/deploying/hosted-service.mdx index 57cbcd4e9c9e..795e9de62df6 100644 --- a/website/pages/hi/deploying/hosted-service.mdx +++ b/website/pages/hi/deploying/hosted-service.mdx @@ -2,11 +2,11 @@ title: होस्टेड सेवा क्या है? --- -> कृपया ध्यान दें, होस्ट की गई सेवा 2023 में समाप्त होनी शुरू हो जाएगी, लेकिन यह उन नेटवर्कों के लिए उपलब्ध रहेगी जो विकेंद्रीकृत नेटवर्क पर समर्थित नहीं हैं। डेवलपर्स को [अपने सबग्राफ को द ग्राफ़ नेटवर्क में अपग्रेड करने के लिए प्रोत्साहित किया जाता है](/cookbook/upgrading-a-subgraph) क्योंकि अधिक नेटवर्क समर्थित हैं। यह सुनिश्चित करने के लिए कि डेवलपर्स के पास विकेंद्रीकृत नेटवर्क में सबग्राफ को अपग्रेड करने के लिए पर्याप्त समय है, प्रत्येक नेटवर्क में उनके होस्ट किए गए सेवा समकक्ष धीरे-धीरे समाप्त हो जाएंगे। होस्ट की गई सेवा के बंद होने के बारे में [यहां](https://thegraph.com/blog/sunsetting-hosted-service) और अधिक पढ़ें। +> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). -यह अनुभाग आपको [होस्ट की गई सेवा](https://thegraph.com/hosted-service/) पर एक सबग्राफ तैनात करने के बारे में बताएगा। +This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). -यदि आपके पास होस्टेड सेवा पर खाता नहीं है, तो आप अपने GitHub खाते से साइन अप कर सकते हैं। एक बार प्रमाणीकरण करने के बाद, आप यूआई के माध्यम से सबग्राफ्स बनाना और अपने टर्मिनल से उन्हें डिप्लॉय करना शुरू कर सकते हैं। होस्टेड सेवा पॉलिगन, ज्ञानस चेन, बीएनबी चेन, ऑप्टिमिज़म, आर्बिट्रम, और अन्य कई नेटवर्कों का समर्थन करती है। +If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. विस्तृत सूची के लिए, [समर्थित नेटवर्क](/Developing/supported-networks/#hosted-service) देखें। @@ -16,7 +16,7 @@ title: होस्टेड सेवा क्या है? ### एक मौजूदा कॉन्ट्रैक्ट से -यदि आपके पास पहले से ही अपनी पसंद के नेटवर्क पर एक स्मार्ट अनुबंध तैनात है, तो इस अनुबंध से एक नया सबग्राफ बूटस्ट्रैप करना होस्टेड सेवा पर आरंभ करने का एक अच्छा तरीका हो सकता है। +If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. आप इस आदेश का उपयोग एक सबग्राफ बनाने के लिए कर सकते हैं जो मौजूदा कॉन्ट्रैक्ट से सभी घटनाओं को अनुक्रमित करता है। यह [Etherscan](https://etherscan.io/) से कॉन्ट्रैक्ट ABI प्राप्त करने का प्रयास करेगा। @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / उदाहरण सबग्राफ दानी ग्रांट द्वारा ग्रेविटी कॉन्ट्रैक्ट पर आधारित है जो उपयोगकर्ता अवतारों का प्रबंधन करता है और जब भी अवतार बनाए या अपडेट किए जाते हैं तो `NewGravatar` या `UpdateGravatar` ईवेंट उत्सर्जित करता है। सबग्राफ इन घटनाओं को ग्राफ़ नोड स्टोर में `Gravatar` संस्थाओं को लिखकर और सुनिश्चित करता है कि इन्हें घटनाओं के अनुसार अपडेट किया जाता है। बेहतर ढंग से समझने के लिए [सबग्राफ मेनिफ़ेस्ट](/developing/creating-a-subgraph#the-subgraph-manifest) पर जारी रखें कि आपके स्मार्ट कॉन्ट्रैक्ट से किन इवेंट्स पर ध्यान देना है, मैपिंग आदि। -## होस्ट की गई सेवा पर समर्थित नेटवर्क +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## Supported Networks on the hosted service आप समर्थित नेटवर्क की सूची पा सकते हैं [Here](/developing/supported-networks). diff --git a/website/pages/hi/deploying/subgraph-studio-faqs.mdx b/website/pages/hi/deploying/subgraph-studio-faqs.mdx index 7ad0db63ef85..2c77fe12eb51 100644 --- a/website/pages/hi/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/hi/deploying/subgraph-studio-faqs.mdx @@ -24,7 +24,7 @@ title: सबग्राफ स्टूडियो अक्सर पूछ ध्यान दें कि एक बार स्थानांतरित हो जाने के बाद आप स्टूडियो में सबग्राफ को देख या संपादित नहीं कर पाएंगे। -## 6. यदि मैं उस सबग्राफ का डेवलपर नहीं हूं जिसका मैं उपयोग करना चाहता हूं तो मैं सबग्राफ के लिए क्वेरी यूआरएल कैसे ढूंढूं? +## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? आप ग्राफ़ एक्सप्लोरर के सबग्राफ विवरण अनुभाग में प्रत्येक सबग्राफ का क्वेरी URL पा सकते हैं। जब आप "क्वेरी" बटन पर क्लिक करते हैं, तो आपको एक फलक पर निर्देशित किया जाएगा, जिसमें आप उस सबग्राफ का क्वेरी URL देख सकते हैं, जिसमें आप रुचि रखते हैं। फिर आप `` प्लेसहोल्डर को बदल सकते हैं एपीआई key के साथ आप सबग्राफ स्टूडियो में लाभ उठाना चाहते हैं। diff --git a/website/pages/hi/deploying/subgraph-studio.mdx b/website/pages/hi/deploying/subgraph-studio.mdx index 36c1e919d966..c1971237858a 100644 --- a/website/pages/hi/deploying/subgraph-studio.mdx +++ b/website/pages/hi/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ title: सबग्राफ स्टूडियो का उपयोग क 1. अपने वॉलेट से साइन इन करें - आप इसे मेटामास्क या वॉलेटकनेक्ट के माध्यम से कर सकते हैं 1. एक बार जब आप साइन इन कर लेते हैं, तो आप अपने खाते के होम पेज पर अपनी विशिष्ट तैनात key देखेंगे। इससे आप या तो अपने सबग्राफ प्रकाशित कर सकेंगे या अपनी एपीआई key + बिलिंग प्रबंधित कर सकेंगे। आपके पास एक अनोखा तैनाती key होगी जिसे यदि आपको लगता है कि इससे समझौता किया गया है तो इसे फिर से उत्पन्न किया जा सकता है। -## सबग्राफ स्टूडियो में अपना सबग्राफ कैसे बनाएं +## How to Create a Subgraph in Subgraph Studio -श्रेष्ठ भाग! जब आप पहली बार एक सबग्राफ बनाते हैं, तो आपको भरने के लिए निर्देशित किया जाएगा: - -- आपका सबग्राफ नाम -- छवि -- विवरण -- श्रेणियां (जैसे `DeFi`, `NFTs`, `Governance`) -- वेबसाइट + ## ग्राफ नेटवर्क के साथ सबग्राफ अनुकूलता diff --git a/website/pages/hi/developing/creating-a-subgraph.mdx b/website/pages/hi/developing/creating-a-subgraph.mdx index 7ce0fa4731cd..24cb6fd82442 100644 --- a/website/pages/hi/developing/creating-a-subgraph.mdx +++ b/website/pages/hi/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: मेनिफेस्ट के लिए अद्यतन करने के लिए महत्वपूर्ण प्रविष्टियां हैं: -- `विवरण`: सबग्राफ क्या है, इसका मानव-पठनीय विवरण। यह विवरण ग्राफ़ एक्सप्लोरर द्वारा प्रदर्शित किया जाता है जब सबग्राफ को होस्ट की गई सेवा में तैनात किया जाता है। +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `रिपॉजिटरी`: रिपॉजिटरी का URL जहां सबग्राफ मेनिफेस्ट पाया जा सकता है। यह द ग्राफ एक्सप्लोरर द्वारा भी प्रदर्शित किया गया है। @@ -146,6 +154,10 @@ dataSources: - `dataSources.source.startBlock`: उस ब्लॉक की वैकल्पिक संख्या जिससे डेटा स्रोत इंडेक्स करना शुरू करता है। ज्यादातर मामलों में, हम उस ब्लॉक का उपयोग करने का सुझाव देते हैं जिसमें अनुबंध बनाया गया था। +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: वे निकाय जो डेटा स्रोत स्टोर को लिखते हैं। प्रत्येक इकाई के लिए स्कीमा को schema.graphql फ़ाइल में परिभाषित किया गया है। - `dataSources.mapping.abis`: स्रोत अनुबंध के साथ-साथ मैपिंग के भीतर से आपके द्वारा इंटरैक्ट किए जाने वाले किसी भी अन्य स्मार्ट अनुबंध के लिए एक या अधिक नामित एबीआई फाइलें। @@ -242,6 +254,7 @@ type GravatarDeclined @entity { | `String` | `स्ट्रिंग` मानों के लिए स्केलर। अशक्त वर्ण समर्थित नहीं हैं और स्वचालित रूप से हटा दिए जाते हैं। | | `Boolean` | `boolean` मानों के लिए स्केलर। | | `Int` | ग्राफक्लाइन स्पेक `Int` को 32 बाइट्स के आकार के रूप में परिभाषित करता है। | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | बड़े पूर्णांक। एथेरियम के `uint32`, `int64`, `uint64`, ..., `uint256` प्रकारों के लिए उपयोग किया जाता है। नोट: `uint32` के नीचे सब कुछ, जैसे `int32`, `uint24` या `int8` को `i32` के रूप में दर्शाया गया है। | | `BigDecimal` | `BigDecimal` उच्च परिशुद्धता दशमलव एक महत्व और एक प्रतिपादक के रूप में दर्शाया गया है। एक्सपोनेंट रेंज -6143 से +6144 तक है। 34 महत्वपूर्ण अंकों तक गोल। | @@ -770,6 +783,8 @@ The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a ### समर्थित फ़िल्टर +#### Call Filter + ```yaml filter: kind: call @@ -806,9 +821,48 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### मानचित्रण समारोह -मैपिंग फ़ंक्शन को इसके एकमात्र तर्क के रूप में `etherium.Block` प्राप्त होगा। इवेंट के लिए मैपिंग फ़ंक्शन की तरह, यह फ़ंक्शन स्टोर में मौजूदा सबग्राफ इकाइयों तक पहुंच सकता है, स्मार्ट कॉन्ट्रैक्ट्स को कॉल कर सकता है और इकाइयां बना या अपडेट कर सकता है। +The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -934,6 +988,8 @@ _meta { ### मौजूदा सबग्राफ पर ग्राफ्टिंग +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. जब `subgraph.yaml` में सबग्राफ मेनिफेस्ट में `graft` ब्लॉक होता है तो एक सबग्राफ को बेस सबग्राफ पर ग्राफ्ट किया जाता है: @@ -963,7 +1019,7 @@ graft: ## फ़ाइल डेटा स्रोत -फ़ाइल डेटा स्रोत एक मजबूत, विस्तार योग्य तरीके से इंडेक्सिंग के दौरान ऑफ-चेन डेटा तक पहुँचने के लिए एक नई सबग्राफ कार्यक्षमता है, जो IPFS से शुरू होती है। +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > यह ऑफ-चेन डेटा के नियतात्मक अनुक्रमण के साथ-साथ स्वैच्छिक HTTP-स्रोत डेटा के संभावित परिचय के लिए आधार भी देता है। @@ -1030,7 +1086,7 @@ type TokenMetadata @entity { > आप इन नेस्टेड इकाइयों के आधार पर पैरेंट इकाइयों को फ़िल्टर करने के लिए [नेस्टेड फ़िल्टर](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) का उपयोग कर सकते हैं। -#### `kind: file/ipfs` के साथ एक नया टेम्पलेटेड डेटा स्रोत जोड़ें +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` यह वह डेटा स्रोत है जो ब्याज की फ़ाइल की पहचान होने पर उत्पन्न होगा। @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { अब आप चेन-आधारित हैंडलर के निष्पादन के दौरान फ़ाइल डेटा स्रोत बना सकते हैं: - ऑटो-जेनरेट किए गए `टेम्पलेट्स` से टेम्प्लेट आयात करें -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> वर्तमान में ग्राफ़ नोड [v0 और v1 सामग्री पहचानकर्ताओं](https://docs.ipfs.tech/concepts/content-addressing/) का समर्थन करता है, और निर्देशिकाओं के साथ सामग्री पहचानकर्ता (जैसे `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata).json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). उदाहरण: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -यह एक नया फ़ाइल डेटा स्रोत बनाएगा, जो ग्राफ़ नोड के कॉन्फ़िगर किए गए IPFS एंडपॉइंट को पोल करेगा, अगर यह नहीं मिला तो पुनः प्रयास करेगा। जब फ़ाइल मिल जाती है, फ़ाइल डेटा स्रोत हैंडलर निष्पादित किया जाएगा। +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. यह उदाहरण CID का उपयोग पैरेंट `Token` इकाई और परिणामी `TokenMetadata` इकाई के बीच लुकअप के रूप में कर रहा है। diff --git a/website/pages/hi/developing/developer-faqs.mdx b/website/pages/hi/developing/developer-faqs.mdx index 15c80502b8b1..63c37db5b205 100644 --- a/website/pages/hi/developing/developer-faqs.mdx +++ b/website/pages/hi/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } वर्तमान में, डैप के लिए अनुशंसित तरीका फ्रंटएंड में कुंजी जोड़ना और अंतिम उपयोगकर्ताओं के लिए इसे उजागर करना है। उस ने कहा, आप उस कुंजी को होस्टनाम तक सीमित कर सकते हैं, जैसे _yourdapp.io_ और सबग्राफ। गेटवे वर्तमान में Edge & द्वारा चलाया जा रहा है; नोड। गेटवे की जिम्मेदारी का हिस्सा अपमानजनक व्यवहार की निगरानी करना और दुर्भावनापूर्ण ग्राहकों से आने वाले ट्रैफ़िक को ब्लॉक करना है। -## 25. मैं होस्ट की गई सेवा पर अपना वर्तमान सबग्राफ कहां ढूंढूं? +## 25. Where do I go to find my current subgraph on the hosted service? -आपके या अन्य लोगों द्वारा होस्ट की गई सेवा पर तैनात किए गए सबग्राफ ढूंढने के लिए होस्ट की गई सेवा पर जाएं। आप इसे [यहां](https://thegraph.com/hosted-service) पा सकते हैं। +Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. क्या होस्ट की गई सेवा क्वेरी शुल्क लेना शुरू कर देगी? +## 26. Will the hosted service start charging query fees? -द ग्राफ कभी भी होस्टेड सेवा के लिए शुल्क नहीं लेगा। द ग्राफ एक डिसेंट्रलाइज्ड प्रोटोकॉल है, और एक सेंट्रलाइज्ड सेवा के लिए शुल्क लेना द ग्राफ के मूल्यों के साथ संतुलित नहीं है। होस्टेड सेवा हमेशा से एक अस्थायी कदम थी जो डिसेंट्रलाइज्ड नेटवर्क को प्राप्त करने में मदद करने के लिए था। डेवलपर्स को संतुष्ट होने पर डिसेंट्रलाइज्ड नेटवर्क में अपग्रेड करने के लिए पर्याप्त समय मिलेगा। +The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. होस्टेड सर्विस कब बंद होगी? +## 27. How do I update a subgraph on mainnet? -होस्ट की गई सेवा 2023 में बंद हो जाएगी। घोषणा ब्लॉग पोस्ट [यहां](https://thegraph.com/blog/sunsetting-hosted-service) पढ़ें। होस्ट की गई सेवा का उपयोग करने वाले सभी डैप को विकेंद्रीकृत नेटवर्क में अपग्रेड करने के लिए प्रोत्साहित किया जाता है। डेवलपर्स के लिए उनके सबग्राफ़ को द ग्राफ़ नेटवर्क में अपग्रेड करने में सहायता के लिए नेटवर्क अनुदान उपलब्ध हैं। यदि आपका डैप किसी सबग्राफ को अपग्रेड कर रहा है तो आप [यहां](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com) आवेदन कर सकते हैं। - -## 28. मैं मेननेट पर सबग्राफ कैसे अपडेट करूं? - -यदि आप एक सबग्राफ डेवलपर हैं, तो आप CLI का उपयोग करके सबग्राफ स्टूडियो में एक नए संस्करण को डिप्लॉय कर सकते हैं। इस समय तक यह निजी होगा, लेकिन यदि आप इससे संतुष्ट हैं, तो आप इसे डिसेंट्रलाइज़ड ग्राफ एक्सप्लोरर में प्रकाशित कर सकते हैं। इससे आपके सबग्राफ का एक नया संस्करण बनाया जाएगा जिसमें क्यूरेटर्स साइनल करना शुरू कर सकते हैं। +If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/hi/developing/graph-ts/api.mdx b/website/pages/hi/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..4d7e5d751d23 --- /dev/null +++ b/website/pages/hi/developing/graph-ts/api.mdx @@ -0,0 +1,858 @@ +--- +title: असेंबलीस्क्रिप्ट एपीआई +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +यह पृष्ठ दस्तावेज करता है कि सबग्राफ मैपिंग लिखते समय किन अंतर्निहित एपीआई का उपयोग किया जा सकता है। बॉक्स से बाहर दो प्रकार के एपीआई उपलब्ध हैं: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## एपीआई संदर्भ + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- एथेरियम, JSON, ग्राफक्यूएल और असेंबलीस्क्रिप्ट जैसे विभिन्न प्रकार की प्रणालियों के बीच अनुवाद करने के लिए निम्न-स्तरीय आदिम। + +### संस्करणों + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| संस्करण | रिलीज नोट्स | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### अंतर्निहित प्रकार + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### स्टोर एपीआई + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### संस्थाओं का निर्माण + +एथेरियम घटनाओं से संस्थाओं को बनाने के लिए निम्नलिखित एक सामान्य पैटर्न है। + +```typescript +/ Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +अन्य संस्थाओं के साथ टकराव से बचने के लिए प्रत्येक इकाई के पास एक विशिष्ट आईडी होनी चाहिए। ईवेंट पैरामीटर के लिए एक अद्वितीय पहचानकर्ता शामिल करना काफी सामान्य है जिसका उपयोग किया जा सकता है। नोट: आईडी के रूप में लेन-देन हैश का उपयोग करना मानता है कि एक ही लेन-देन में कोई अन्य घटना इस हैश के साथ आईडी के रूप में संस्था नहीं बनाती है। + +#### Loading entities from the store + +यदि कोई इकाई पहले से मौजूद है, तो इसे स्टोर से निम्न के साथ लोड किया जा सकता है: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### एक ब्लॉक के साथ बनाई गई संस्थाओं को देखना + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +स्टोर एपीआई उन संस्थाओं की पुनर्प्राप्ति की सुविधा देता है जो वर्तमान ब्लॉक में बनाई या अपडेट की गई थीं। इसके लिए एक विशिष्ट स्थिति यह है कि एक हैंडलर कुछ ऑन-चेन ईवेंट से लेन-देन बनाता है, और बाद में हैंडलर मौजूद होने पर इस लेनदेन तक पहुंचना चाहता है। ऐसे मामले में जहां लेन-देन मौजूद नहीं है, सबग्राफ को केवल यह पता लगाने के लिए डेटाबेस में जाना होगा कि इकाई मौजूद नहीं है; अगर सबग्राफ लेखक पहले से ही जानता है कि इकाई को उसी ब्लॉक में बनाया जाना चाहिए, तो loadInBlock का उपयोग करके इस डेटाबेस राउंडट्रिप से बचा जाता है। कुछ सबग्राफ के लिए, ये छूटे हुए लुकअप इंडेक्सिंग समय में महत्वपूर्ण योगदान दे सकते हैं। + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +किसी मौजूदा निकाय को अद्यतन करने के दो तरीके हैं: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +ज्यादातर मामलों में गुण बदलना सीधे आगे है, उत्पन्न संपत्ति सेटर्स के लिए धन्यवाद: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### स्टोर से संस्थाओं को हटाना + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### एथेरियम एपीआई + +एथेरियम एपीआई स्मार्ट कॉन्ट्रैक्ट्स, पब्लिक स्टेट वेरिएबल्स, कॉन्ट्रैक्ट फ़ंक्शंस, इवेंट्स, ट्रांजेक्शन, ब्लॉक्स और एन्कोडिंग / डिकोडिंग एथेरियम डेटा तक पहुंच प्रदान करता है। + +#### एथेरियम प्रकार के लिए समर्थन + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### घटनाक्रम और ब्लॉक/लेनदेन डेटा + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +एक सामान्य पैटर्न उस अनुबंध का उपयोग करना है जिससे कोई घटना उत्पन्न होती है। यह निम्नलिखित कोड के साथ हासिल किया गया है: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +कोई अन्य अनुबंध जो सबग्राफ का हिस्सा है, उत्पन्न कोड से आयात किया जा सकता है और एक वैध पते के लिए बाध्य किया जा सकता है। + +#### रिवर्टेड कॉल्स को हैंडल करना + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +ध्यान दें कि Geth या Infura क्लाइंट से जुड़ा एक ग्राफ़ नोड सभी रिवर्ट का पता नहीं लगा सकता है, अगर आप इस पर भरोसा करते हैं तो हम पैरिटी क्लाइंट से जुड़े ग्राफ़ नोड का उपयोग करने की सलाह देते हैं। + +#### एन्कोडिंग/डिकोडिंग एबीआई + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +अधिक जानकारी के लिए: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### लॉगिंग एपीआई + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('संदेश प्रदर्शित किया जाना है: {}, {}, {}', [ + value.toString(), + OtherValue.toString(), + 'पहले से ही एक स्ट्रिंग', +]) +``` + +#### एक या अधिक मान लॉग करना + +##### एकल मान लॉग करना + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### किसी मौजूदा सरणी से एकल प्रविष्टि लॉग करना + +नीचे दिए गए उदाहरण में, तीन मानों वाले सरणी के बावजूद, तर्क सरणी का केवल पहला मान लॉग किया गया है। + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### मौजूदा सरणी से एकाधिक प्रविष्टियां लॉग करना + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### मौजूदा सरणी से एक विशिष्ट प्रविष्टि लॉग करना + +सरणी में एक विशिष्ट मान प्रदर्शित करने के लिए, अनुक्रमित मान प्रदान किया जाना चाहिए। + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### लॉगिंग घटना की जानकारी + +नीचे दिया गया उदाहरण एक घटना से ब्लॉक संख्या, ब्लॉक हैश और लेनदेन हैश को लॉग करता है: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### आईपीएफएस एपीआई + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +IPFS हैश या पथ को देखते हुए, IPFS से फ़ाइल पढ़ना निम्नानुसार किया जाता है: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### क्रिप्टो एपीआई + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### प्रकार रूपांतरण संदर्भ + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() या s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() या s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### डेटा स्रोत मेटाडेटा + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### इकाई और डेटासोर्स कॉन्टेक्स्ट + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/hi/developing/graph-ts/common-issues.mdx b/website/pages/hi/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..baa35e2d653c --- /dev/null +++ b/website/pages/hi/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: आम AssemblyScript मुद्दे +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/hi/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/hi/developing/substreams-powered-subgraphs-faq.mdx index 4709d033d3ab..02592fd21457 100644 --- a/website/pages/hi/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/hi/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: सबस्ट्रीम-संचालित सबग्राफ अक्सर पूछे जाने वाले प्रश्न +title: Substreams-powered subgraphs FAQ --- -## सबस्ट्रीम क्या हैं? +## What are Substreams? -[StreamingFast](https://www.streamingfast.io/) द्वारा विकसित, सबस्ट्रीम एक असाधारण शक्तिशाली प्रसंस्करण इंजन है जो ब्लॉकचेन डेटा की समृद्ध धाराओं का उपभोग करने में सक्षम है। सबस्ट्रीम आपको अंतिम-उपयोगकर्ता अनुप्रयोगों द्वारा तेज़ और निर्बाध पाचन के लिए ब्लॉकचेन डेटा को परिष्कृत और आकार देने की अनुमति देता है। अधिक विशेष रूप से, सबस्ट्रीम एक ब्लॉकचेन-अज्ञेयवादी, समानांतर और स्ट्रीमिंग-प्रथम इंजन है, जो ब्लॉकचेन डेटा परिवर्तन परत के रूप में कार्य करता है। [Firehose] \(https://firehose.streamingfast.io/) द्वारा संचालित, यह डेवलपर्स को रस्ट मॉड्यूल लिखने, सामुदायिक मॉड्यूल बनाने, अत्यधिक उच्च-प्रदर्शन अनुक्रमण प्रदान करने और [sink] \(/ सबस्ट्रीम/डेवलपर्स-) करने में सक्षम बनाता है। गाइड/सिंक-लक्ष्य/रीडमी/#सबस्ट्रीम-सिंक-अवलोकन उनका डेटा कहीं भी। +Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. -सबस्ट्रीम के बारे में अधिक जानने के लिए [Substreams Documentation](/substreams) पर जाएं। +Go to the [Substreams Documentation](/substreams) to learn more about Substreams. -## सबस्ट्रीम-संचालित सबग्राफ क्या हैं? +## What are Substreams-powered subgraphs? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) सबस्ट्रीम की शक्ति को सबग्राफ की क्वेरीबिलिटी के साथ जोड़ते हैं। सबस्ट्रीम-संचालित सबग्राफ प्रकाशित करते समय, सबस्ट्रीम परिवर्तनों द्वारा उत्पादित डेटा, [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), जो सबग्राफ संस्थाओं के साथ संगत हैं। +[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. -यदि आप पहले से ही सबग्राफ विकास से परिचित हैं, तो ध्यान दें कि सबस्ट्रीम-संचालित सबग्राफ को तब क्वेरी किया जा सकता है, जैसे कि यह असेंबलीस्क्रिप्ट परिवर्तन परत द्वारा उत्पादित किया गया था, सभी सबग्राफ लाभों के साथ, जैसे एक गतिशील और लचीला ग्राफक्यूएल एपीआई प्रदान करना। +If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. -## सबस्ट्रीम-संचालित सबग्राफ, सबग्राफ से किस प्रकार भिन्न हैं? +## How are Substreams-powered subgraphs different from subgraphs? -सबग्राफ डेटा स्रोतों से बने होते हैं जो ऑन-चेन घटनाओं को निर्दिष्ट करते हैं, और उन घटनाओं को असेंबलीस्क्रिप्ट में लिखे गए हैंडलर के माध्यम से कैसे परिवर्तित किया जाना चाहिए। इन घटनाओं को श्रृंखला पर घटनाओं के घटित होने के क्रम के आधार पर क्रमिक रूप से संसाधित किया जाता है। +Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. -इसके विपरीत, सबस्ट्रीम-संचालित सबग्राफ में एक एकल डेटास्रोत होता है जो सबस्ट्रीम पैकेज को संदर्भित करता है, जिसे ग्राफ़ नोड द्वारा संसाधित किया जाता है। पारंपरिक सबग्राफ की तुलना में सबस्ट्रीम के पास अतिरिक्त ग्रैन्युलर ऑन-चेन डेटा तक पहुंच होती है, और बड़े पैमाने पर समानांतर प्रसंस्करण से भी लाभ हो सकता है, जिसका मतलब बहुत तेज प्रसंस्करण समय हो सकता है। +By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. -## सबस्ट्रीम-संचालित सबग्राफ का उपयोग करने के क्या लाभ हैं? +## What are the benefits of using Substreams-powered subgraphs? -सबस्ट्रीम-संचालित सबग्राफ, सबस्ट्रीम के सभी लाभों को सबग्राफ की क्वेरीबिलिटी के साथ जोड़ते हैं। वे ग्राफ़ में अधिक संयोजनशीलता और उच्च-प्रदर्शन अनुक्रमण लाते हैं। वे नए डेटा उपयोग के मामलों को भी सक्षम करते हैं; उदाहरण के लिए, एक बार जब आप अपना सबस्ट्रीम-संचालित सबग्राफ बना लेते हैं, तो आप अलग-अलग [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) जैसे पोस्टग्रेएसक्यूएल, मोंगोडीबी, और काफ्का। +Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. -## सबस्ट्रीम के क्या लाभ हैं? +## What are the benefits of Substreams? -सबस्ट्रीम का उपयोग करने के कई लाभ हैं, जिनमें शामिल हैं: +There are many benefits to using Substreams, including: -- कंपोज़ेबल: आप लेगो ब्लॉक जैसे सबस्ट्रीम मॉड्यूल को स्टैक कर सकते हैं, और सार्वजनिक डेटा को और परिष्कृत करते हुए सामुदायिक मॉड्यूल का निर्माण कर सकते हैं। +- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. -- उच्च-प्रदर्शन अनुक्रमण: समानांतर संचालन के बड़े पैमाने पर समूहों के माध्यम से तीव्रता के आदेशों को तेजी से अनुक्रमित करना (BigQuery के बारे में सोचें)। +- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). -- कहीं भी सिंक करें: अपने डेटा को अपनी इच्छानुसार कहीं भी सिंक करें: PostgreSQL, MongoDB, Kafka, सबग्राफ, फ़्लैट फ़ाइलें, Google शीट्स। +- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. -- प्रोग्राम करने योग्य: निष्कर्षण को अनुकूलित करने, परिवर्तन-समय एकत्रीकरण करने और एकाधिक सिंक के लिए अपने आउटपुट को मॉडल करने के लिए कोड का उपयोग करें। +- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. -- अतिरिक्त डेटा तक पहुंच जो JSON RPC के भाग के रूप में उपलब्ध नहीं है +- Access to additional data which is not available as part of the JSON RPC -- फ़ायरहोज़ के सभी लाभ. +- All the benefits of the Firehose. -## फ़ायरहोज़ क्या है? +## What is the Firehose? -[StreamingFast](https://www.streamingfast.io/) द्वारा विकसित, फ़ायरहोज़ एक ब्लॉकचेन डेटा निष्कर्षण परत है जिसे ब्लॉकचेन के पूरे इतिहास को उस गति से संसाधित करने के लिए डिज़ाइन किया गया है जो पहले नहीं देखी गई थी। फ़ाइल-आधारित और स्ट्रीमिंग-प्रथम दृष्टिकोण प्रदान करते हुए, यह स्ट्रीमिंगफ़ास्ट के ओपन-सोर्स तकनीकों के सुइट और सबस्ट्रीम की नींव का एक मुख्य घटक है। +Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. -फ़ायरहोज़ के बारे में अधिक जानने के लिए [documentation](https://firehose.streamingfast.io/) पर जाएँ। +Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. -## फ़ायरहोज़ के क्या लाभ हैं? +## What are the benefits of the Firehose? -फ़ायरहोज़ का उपयोग करने के कई लाभ हैं, जिनमें शामिल हैं: +There are many benefits to using Firehose, including: -- सबसे कम विलंबता और कोई मतदान नहीं: स्ट्रीमिंग-फर्स्ट फैशन में, फ़ायरहोज़ नोड्स को पहले ब्लॉक डेटा को पुश करने की दौड़ के लिए डिज़ाइन किया गया है। +- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. -- डाउनटाइम को रोकें: उच्च उपलब्धता के लिए शुरू से ही डिज़ाइन किया गया। +- Prevents downtimes: Designed from the ground up for High Availability. -- कभी भी एक बीट मिस न करें: फ़ायरहोज़ स्ट्रीम कर्सर को फोर्क्स को संभालने और किसी भी स्थिति में वहीं जारी रखने के लिए डिज़ाइन किया गया है जहां आपने छोड़ा था। +- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. -- सबसे समृद्ध डेटा मॉडल: सर्वोत्तम डेटा मॉडल जिसमें शेष परिवर्तन, पूर्ण कॉल ट्री, आंतरिक लेनदेन, लॉग, भंडारण परिवर्तन, गैस लागत और बहुत कुछ शामिल है। +- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. -- फ़्लैट फ़ाइलों का लाभ उठाता है: ब्लॉकचेन डेटा को फ़्लैट फ़ाइलों में निकाला जाता है, जो उपलब्ध सबसे सस्ता और सबसे अनुकूलित कंप्यूटिंग संसाधन है। +- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. -## डेवलपर्स सबस्ट्रीम-संचालित सबग्राफ और सबस्ट्रीम के बारे में अधिक जानकारी कहां से प्राप्त कर सकते हैं? +## Where can developers access more information about Substreams-powered subgraphs and Substreams? -[Substreams documentation](/substreams) आपको सबस्ट्रीम मॉड्यूल बनाना सिखाएगा। +The [Substreams documentation](/substreams) will teach you how to build Substreams modules. -[Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) आपको दिखाएगा कि ग्राफ़ पर तैनाती के लिए उन्हें कैसे पैकेज किया जाए। +The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. -## सबस्ट्रीम में रस्ट मॉड्यूल की क्या भूमिका है? +## What is the role of Rust modules in Substreams? -रस्ट मॉड्यूल सबग्राफ में असेंबलीस्क्रिप्ट मैपर्स के समतुल्य हैं। उन्हें समान तरीके से WASM में संकलित किया जाता है, लेकिन प्रोग्रामिंग मॉडल समानांतर निष्पादन की अनुमति देता है। वे उस प्रकार के परिवर्तनों और एकत्रीकरण को परिभाषित करते हैं जिन्हें आप कच्चे ब्लॉकचेन डेटा पर लागू करना चाहते हैं। +Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. -विवरण के लिए [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) देखें। +See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. -## क्या सबस्ट्रीम को रचना योग्य बनाता है? +## What makes Substreams composable? -सबस्ट्रीम का उपयोग करते समय, संरचना परिवर्तन परत पर होती है जिससे कैश्ड मॉड्यूल को फिर से उपयोग करने में सक्षम बनाया जाता है। +When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. -उदाहरण के तौर पर, ऐलिस एक DEX मूल्य मॉड्यूल बना सकता है, बॉब अपनी रुचि के कुछ टोकन के लिए वॉल्यूम एग्रीगेटर बनाने के लिए इसका उपयोग कर सकता है, और लिसा एक मूल्य ऑरेकल बनाने के लिए चार अलग-अलग DEX मूल्य मॉड्यूल को जोड़ सकती है। एक एकल सबस्ट्रीम अनुरोध इन सभी व्यक्तिगत मॉड्यूल को पैकेज करेगा, डेटा की अधिक परिष्कृत स्ट्रीम की पेशकश करने के लिए उन्हें एक साथ लिंक करेगा। फिर उस स्ट्रीम का उपयोग सबग्राफ को पॉप्युलेट करने के लिए किया जा सकता है, और उपभोक्ताओं द्वारा पूछताछ की जा सकती है। +As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. -## आप सबस्ट्रीम-संचालित सबग्राफ कैसे बना और तैनात कर सकते हैं? +## How can you build and deploy a Substreams-powered Subgraph? -एक सबस्ट्रीम-संचालित सबग्राफ को [defining](/cookbook/substreams-powered-subgraphs/) करने के बाद, आप इसे [सबग्राफ स्टूडियो](https://thegraph.com/studio/) में तैनात करने के लिए ग्राफ सीएलआई का उपयोग कर सकते हैं। +After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). -## मुझे सबस्ट्रीम और सबस्ट्रीम-संचालित सबग्राफ के उदाहरण कहां मिल सकते हैं? +## Where can I find examples of Substreams and Substreams-powered subgraphs? -आप सबस्ट्रीम और सबस्ट्रीम-संचालित सबग्राफ के उदाहरण ढूंढने के लिए [this Github repo](https://github.com/pinax-network/awesome-substreams) पर जा सकते हैं। +You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. -## ग्राफ़ नेटवर्क के लिए सबस्ट्रीम और सबस्ट्रीम-संचालित सबग्राफ का क्या अर्थ है? +## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? -एकीकरण कई लाभों का वादा करता है, जिसमें सामुदायिक मॉड्यूल का लाभ उठाकर और उन पर निर्माण करके अत्यधिक उच्च-प्रदर्शन अनुक्रमण और अधिक संयोजनशीलता शामिल है। +The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. diff --git a/website/pages/hi/developing/supported-networks.json b/website/pages/hi/developing/supported-networks.json index 5e12392b8c7d..d09515af0489 100644 --- a/website/pages/hi/developing/supported-networks.json +++ b/website/pages/hi/developing/supported-networks.json @@ -1,5 +1,5 @@ { - "network": "Network", + "network": "नेटवर्क", "cliName": "CLI Name", "chainId": "Chain ID", "studioAndHostedService": "Studio and Hosted Service", diff --git a/website/pages/hi/developing/supported-networks.mdx b/website/pages/hi/developing/supported-networks.mdx index 92914ac63d0c..cf872395ac16 100644 --- a/website/pages/hi/developing/supported-networks.mdx +++ b/website/pages/hi/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## ग्राफ नोड -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. फ़ायरहोज़ एकीकरण के माध्यम से ग्राफ़ नोड अन्य प्रोटोकॉल को भी अनुक्रमित कर सकता है। फायरहोज एकीकरण NEAR, Arweave और Cosmos-आधारित नेटवर्क के लिए बनाया गया है। diff --git a/website/pages/hi/developing/unit-testing-framework.mdx b/website/pages/hi/developing/unit-testing-framework.mdx index 5b8465d0545f..0d0605eab6fe 100644 --- a/website/pages/hi/developing/unit-testing-framework.mdx +++ b/website/pages/hi/developing/unit-testing-framework.mdx @@ -102,13 +102,13 @@ graph test path/to/file.test.ts **विकल्प:** ```sh --c, --कवरेज परीक्षणों को कवरेज मोड में चलाएँ --d, --docker एक डोकर कंटेनर में परीक्षण चलाएँ (नोट: कृपया सबग्राफ के रूट फ़ोल्डर से निष्पादित करें) --एफ, --फोर्स बाइनरी: बाइनरी को फिर से डाउनलोड करता है। डॉकर: डॉकरफ़ाइल को फिर से डाउनलोड करता है और डॉकर छवि को फिर से बनाता है। --h, --help उपयोग की जानकारी दिखाएं --एल, --लॉग्स ओएस, सीपीयू मॉडल और डाउनलोड यूआरएल (डीबगिंग उद्देश्यों) के बारे में कंसोल जानकारी पर लॉग करता है --r, --recompile परीक्षणों को पुनः संकलित करने के लिए बाध्य करता है --v, --version रस्ट बाइनरी का वह संस्करण चुनें जिसे आप डाउनलोड/उपयोग करना चाहते हैं +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### Docker @@ -989,9 +989,9 @@ test('Data source simple mocking example', () => { ## टेस्ट कवरेज -**माचिस** का उपयोग करके, सबग्राफ डेवलपर्स एक स्क्रिप्ट चलाने में सक्षम हैं जो लिखित इकाई परीक्षणों के परीक्षण कवरेज की गणना करेगा। +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -परीक्षण कवरेज उपकरण संकलित परीक्षण `wasm` बायनेरिज़ लेता है और उन्हें `wat` फ़ाइलों में परिवर्तित करता है, जिसे बाद में यह देखने के लिए आसानी से निरीक्षण किया जा सकता है कि हैंडलर `सबग्राफ में परिभाषित हैं या नहीं. subgraph.yaml` को बुलाया गया है। चूंकि असेंबलीस्क्रिप्ट और वेबअसेंबली में कोड कवरेज (और संपूर्ण परीक्षण) बहुत प्रारंभिक चरण में है, **Matchstick** शाखा कवरेज की जांच नहीं कर सकता है। इसके बजाय हम इस दावे पर भरोसा करते हैं कि यदि किसी दिए गए हैंडलर को बुलाया गया है, तो उसके लिए ईवेंट/फ़ंक्शन को उचित रूप से मॉक किया गया है। +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. ### आवश्यक शर्तें @@ -1028,7 +1028,7 @@ graph test -- -c }, ``` -वह कवरेज टूल निष्पादित करेगा और आपको टर्मिनल में कुछ इस तरह देखना चाहिए: +That will execute the coverage tool and you should see something like this in the terminal: ```sh $ graph test -c @@ -1085,7 +1085,7 @@ Global test coverage: 22.2% (2/9 handlers). > > in ~lib/matchstick-as/assembly/defaults.ts(18,12) > -> त्रुटि TS2554: अपेक्षित? तर्क, लेकिन मिल गया? +> त्रुटि TS2554: अपेक्षित? तर्क, लेकिन मिला ?. > > return new ethereum.Transaction(defaultAddressBytes, defaultBigInt, defaultAddress, defaultAddress, defaultBigInt, defaultBigInt, defaultBigInt, defaultAddressBytes, defaultBigInt); > diff --git a/website/pages/hi/firehose.mdx b/website/pages/hi/firehose.mdx index ffee461da643..90ce965a8b69 100644 --- a/website/pages/hi/firehose.mdx +++ b/website/pages/hi/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose ब्लॉकचैन डेटा को संसाधित करने के लिए फाइल-आधारित और स्ट्रीमिंग-प्रथम दृष्टिकोण प्रदान करता है। +![Firehose Logo](/img/firehose-logo.png) -एथेरियम (और कई ईवीएम श्रृंखला), एनईएआर, सोलाना, कॉसमॉस और अरविवे के लिए फायरहोज एकीकरण का निर्माण किया गया है, जिसमें अधिक काम किया गया है। +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -ग्राफ़ नोड एकीकरण कई श्रृंखलाओं के लिए बनाए गए हैं, इसलिए सबग्राफ फ़ायरहोज़ से डेटा को पावर परफ़ॉर्मेंट और स्केलेबल इंडेक्सिंग में स्ट्रीम कर सकते हैं। Firehose [सबस्ट्रीम](/substreams) को भी शक्ति प्रदान करता है, जो ग्राफ़ कोर डेवलपर्स द्वारा निर्मित एक नई रूपांतरण तकनीक है। +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -अधिक जानने के लिए [firehose दस्तावेज़ीकरण](https://firehose.streamingfast.io/) पर जाएं। +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### शुरू करना + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/hi/glossary.mdx b/website/pages/hi/glossary.mdx index a61d12fae9a3..455310a82342 100644 --- a/website/pages/hi/glossary.mdx +++ b/website/pages/hi/glossary.mdx @@ -12,7 +12,7 @@ title: शब्दकोष - **सबग्राफ**: ब्लॉकचैन डेटा पर निर्मित एक कस्टम एपीआई जिसे [ग्राफक्यूएल](https://graphql.org/) का उपयोग करके क्वेरी की जा सकती है। डेवलपर ग्राफ़ के विकेन्द्रीकृत नेटवर्क के लिए सबग्राफ बना, तैनात और प्रकाशित कर सकते हैं। फिर, इंडेक्सर्स सबग्राफ उपभोक्ताओं द्वारा पूछताछ के लिए उन्हें उपलब्ध कराने के लिए सबग्राफ को इंडेक्स करना शुरू कर सकते हैं। -- **होस्ट की गई सेवा**: ग्राफ़ के विकेन्द्रीकृत नेटवर्क के रूप में उप-अनुच्छेद बनाने और क्वेरी करने के लिए एक अस्थायी मचान सेवा सेवा की लागत, सेवा की गुणवत्ता और डेवलपर अनुभव को परिपक्व कर रही है। +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **इंडेक्सर्स**: नेटवर्क प्रतिभागी जो ब्लॉकचेन से डेटा को इंडेक्स करने के लिए इंडेक्सिंग नोड्स चलाते हैं और ग्राफक्यूएल क्वेरीज सर्व करते हैं। @@ -24,6 +24,8 @@ title: शब्दकोष - **इंडेक्सर का सेल्फ स्टेक**: GRT की वह राशि जो इंडेक्सर्स विकेंद्रीकृत नेटवर्क में भाग लेने के लिए दांव पर लगाते हैं। न्यूनतम 100,000 जीआरटी है, और कोई ऊपरी सीमा नहीं है। +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **डेलीगेटर्स**: नेटवर्क प्रतिभागी जो GRT के मालिक हैं और अपने GRT को इंडेक्सर्स को सौंपते हैं। यह इंडेक्सर्स को नेटवर्क पर सबग्राफ में अपनी हिस्सेदारी बढ़ाने की अनुमति देता है। बदले में, डेलिगेटर्स को इंडेक्सिंग रिवॉर्ड्स का एक हिस्सा मिलता है जो इंडेक्सर्स को सबग्राफ प्रोसेसिंग के लिए मिलता है। - **प्रत्यायोजन कर**: प्रतिनिधि द्वारा 0.5% शुल्क का भुगतान किया जाता है, जब वे अनुक्रमणकों को GRT प्रत्यायोजित करते हैं. शुल्क का भुगतान करने के लिए प्रयुक्त GRT जल गया है। @@ -38,27 +40,21 @@ title: शब्दकोष - **सबग्राफ मेनिफेस्ट**: एक JSON फाइल जो सबग्राफ के ग्राफक्यूएल स्कीमा, डेटा स्रोत और अन्य मेटाडेटा का वर्णन करती है। [यहां](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) एक उदाहरण है। -- **रिबेट पूल**: एक आर्थिक सुरक्षा उपाय जो सबग्राफ उपभोक्ताओं द्वारा भुगतान किए गए क्वेरी शुल्क को रखता है जब तक कि इंडेक्सर्स द्वारा क्वेरी शुल्क छूट के रूप में उनका दावा नहीं किया जा सकता। अवशिष्ट जीआरटी जल गया है। - -- **युग**: नेटवर्क में समय की एक इकाई। एक युग वर्तमान में 6,646 ब्लॉक या लगभग 1 दिन है। +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **आवंटन**: एक इंडेक्सर अपनी कुल जीआरटी हिस्सेदारी (प्रतिनिधियों की हिस्सेदारी सहित) सबग्राफ के लिए आवंटित कर सकता है जो कि ग्राफ के विकेंद्रीकृत नेटवर्क पर प्रकाशित किया गया है। आवंटन चार चरणों में से एक में मौजूद हैं। 1. **सक्रिय**: एक आवंटन को तब सक्रिय माना जाता है जब इसे ऑन-चेन बनाया जाता है। इसे ओपनिंग आबंटन कहा जाता है, और यह नेटवर्क को इंगित करता है कि इंडेक्सर सक्रिय रूप से अनुक्रमण कर रहा है और किसी विशेष सबग्राफ के लिए प्रश्नों की सेवा कर रहा है। सक्रिय आबंटन उप-अनुच्छेद पर संकेत के अनुपात में अनुक्रमित पुरस्कार अर्जित करते हैं, और आवंटित जीआरटी की राशि। - 2. **बंद**: एक इंडेक्सर एक हालिया, और मान्य, इंडेक्सिंग का प्रमाण (POI) जमा करके किसी दिए गए सबग्राफ पर अर्जित इंडेक्सिंग पुरस्कारों का दावा कर सकता है। इसे आवंटन बंद करने के रूप में जाना जाता है। आवंटन बंद होने से पहले कम से कम एक युग के लिए खुला होना चाहिए। अधिकतम आवंटन अवधि 28 युग है। यदि एक अनुक्रमणिका आवंटन को 28 युगों से अधिक खुला छोड़ देता है, तो इसे बासी आवंटन के रूप में जाना जाता है। जब आवंटन **बंद** स्थिति में होता है, तब भी एक मछुआरा झूठे डेटा की सेवा के लिए एक इंडेक्सर को चुनौती देने के लिए एक विवाद खोल सकता है। - - 3. **अंतिम रूप दिया गया**: विवाद की अवधि समाप्त हो गई है, और इंडेक्सर्स द्वारा दावा किए जाने के लिए क्वेरी शुल्क छूट उपलब्ध हैं। - - 4. **दावा किया गया**: आवंटन का अंतिम चरण, सभी पात्र पुरस्कार वितरित किए जा चुके हैं और इसके प्रश्न शुल्क छूट का दावा किया जा चुका है। + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **सबग्राफ स्टूडियो**: सबग्राफ बनाने, लगाने और प्रकाशित करने के लिए एक शक्तिशाली डैप। -- **मछुआरे**: नेटवर्क प्रतिभागी इंडेक्सर्स की क्वेरी प्रतिक्रियाओं और पीओआई पर विवाद कर सकते हैं। इसे मछुआरा होना कहा जाता है। मछुआरे के पक्ष में सुलझाए गए विवाद का परिणाम मछुआरे को एक पुरस्कार के साथ-साथ अनुक्रमणक के लिए वित्तीय जुर्माना होता है, इस प्रकार अनुक्रमणक की अखंडता और नेटवर्क में अनुक्रमणकों द्वारा निष्पादित क्वेरी कार्य को प्रोत्साहन मिलता है। जुर्माना (स्लैशिंग) वर्तमान में एक इंडेक्सर की स्वयं की हिस्सेदारी के 2.5% पर निर्धारित है, जिसमें 50% घटा हुआ GRT मछुआरे को जाता है, और अन्य 50% जला दिया जाता है। +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **मध्यस्थ**: मध्यस्थ सरकार द्वारा निर्धारित नेटवर्क भागीदार होते हैं। मध्यस्थ की भूमिका इंडेक्सिंग और क्वेरी विवादों के परिणाम तय करना है। उनका लक्ष्य द ग्राफ नेटवर्क की उपयोगिता और विश्वसनीयता को अधिकतम करना है। +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **स्लैशिंग**: इंडेक्सिंग का गलत प्रमाण (POI) प्रदान करने या गलत डेटा प्रस्तुत करने के लिए इंडेक्सर्स अपने स्टेक वाले GRT को घटा सकते हैं। कटौती प्रतिशत एक प्रोटोकॉल पैरामीटर है जो वर्तमान में इंडेक्सर की स्वयं हिस्सेदारी के 2.5% पर सेट है। घटाया गया GRT का 50% उस मछुआरे को जाता है जिसने गलत डेटा या गलत POI पर विवाद किया था। बाकी 50 फीसदी जल गया है। +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **इंडेक्सिंग रिवार्ड्स**: वे पुरस्कार जो इंडेक्सर्स को सबग्राफ इंडेक्स करने के लिए मिलते हैं। इंडेक्सिंग पुरस्कार जीआरटी में वितरित किए जाते हैं। @@ -66,7 +62,7 @@ title: शब्दकोष - **GRT**: ग्राफ़ का कार्य उपयोगिता टोकन। जीआरटी नेटवर्क प्रतिभागियों को नेटवर्क में योगदान करने के लिए आर्थिक प्रोत्साहन प्रदान करता है। -- **POI या इंडेक्सिंग का प्रमाण**: जब एक इंडेक्सर अपने आवंटन को बंद कर देता है और किसी दिए गए सबग्राफ पर अपने अर्जित इंडेक्सर रिवार्ड्स का दावा करना चाहता है, तो उन्हें इंडेक्सिंग का एक वैध और हालिया प्रूफ (POI) प्रदान करना होगा। मछुआरे इंडेक्सर द्वारा प्रदान किए गए पीओआई पर विवाद कर सकते हैं। मछुआरे के पक्ष में सुलझाए गए विवाद का परिणाम अनुक्रमणिका में कमी के रूप में होगा। +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **ग्राफ़ नोड**: ग्राफ़ नोड वह घटक है जो सबग्राफ़ को अनुक्रमित करता है, और परिणामी डेटा को ग्राफ़क्यूएल एपीआई के माध्यम से क्वेरी के लिए उपलब्ध कराता है। इस तरह यह इंडेक्सर स्टैक के लिए केंद्रीय है, और एक सफल इंडेक्सर चलाने के लिए ग्राफ नोड का सही संचालन महत्वपूर्ण है। @@ -80,10 +76,10 @@ title: शब्दकोष - **कूलडाउन अवधि**: वह समय जब तक कोई अनुक्रमणिका अपने प्रतिनिधिमंडल पैरामीटर को बदल नहीं सकता, तब तक वह फिर से ऐसा कर सकता है। -- **एल2 ट्रांसफर टूल्स**: स्मार्ट कॉन्ट्रैक्ट और यूआई जो नेटवर्क प्रतिभागियों को एथेरियम मेननेट से आर्बिट्रम वन में ट्रांसफर करने में सक्षम बनाते हैं। नेटवर्क प्रतिभागी प्रत्यायोजित जीआरटी, सबग्राफ, क्यूरेशन शेयर और इंडेक्सर की स्वयं हिस्सेदारी स्थानांतरित कर सकते हैं। +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- **_एक सबग्राफ को ग्राफ़ नेटवर्क में अपग्रेड करना_**: होस्ट की गई सेवा से एक सबग्राफ को ग्राफ़ नेटवर्क में ले जाने की प्रक्रिया। +- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. -- **_एक सबग्राफ को अपडेट करना_**: सबग्राफ के मेनिफेस्ट, स्कीमा या अपडेट के साथ एक नया सबग्राफ संस्करण जारी करने की प्रक्रिया मानचित्रण। +- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **माइग्रेट करना**: क्यूरेशन शेयरों की प्रक्रिया सबग्राफ के पुराने संस्करण से सबग्राफ के नए संस्करण में स्थानांतरित हो जाती है (यानी, v0.0.1 होने पर क्यूरेशन शेयर नवीनतम संस्करण में चले जाते हैं v0.0.2 पर अद्यतन किया गया है)। +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/hi/graphcast.mdx b/website/pages/hi/graphcast.mdx index 41447da41c72..60466f7812bb 100644 --- a/website/pages/hi/graphcast.mdx +++ b/website/pages/hi/graphcast.mdx @@ -10,7 +10,7 @@ title: ग्राफकास्ट ग्राफकास्ट एसडीके (सॉफ्टवेयर डेवलपमेंट किट) डेवलपर्स को रेडियो बनाने की अनुमति देता है, जो गपशप-संचालित अनुप्रयोग हैं जो इंडेक्सर्स किसी दिए गए उद्देश्य को पूरा करने के लिए चला सकते हैं। हम निम्नलिखित उपयोग के मामलों के लिए कुछ रेडियो बनाने का भी इरादा रखते हैं (या अन्य डेवलपर्स/टीमों को सहायता प्रदान करते हैं जो रेडियो बनाना चाहते हैं): -- सबग्राफ डेटा अखंडता की रीयल-टाइम क्रॉस-चेकिंग ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - अन्य इंडेक्सर्स से ताना सिंकिंग सबग्राफ, सबस्ट्रीम और फायरहोज डेटा के लिए नीलामी और समन्वय आयोजित करना। - सक्रिय क्वेरी एनालिटिक्स पर स्व-रिपोर्टिंग, जिसमें सबग्राफ अनुरोध मात्रा, शुल्क मात्रा आदि शामिल हैं। - इंडेक्सिंग एनालिटिक्स पर सेल्फ-रिपोर्टिंग, जिसमें सबग्राफ इंडेक्सिंग टाइम, हैंडलर गैस कॉस्ट, इंडेक्सिंग एरर, आदि शामिल हैं। diff --git a/website/pages/hi/index.json b/website/pages/hi/index.json index 61c5d7db57b1..26fd1d5d6804 100644 --- a/website/pages/hi/index.json +++ b/website/pages/hi/index.json @@ -23,8 +23,8 @@ "description": "सबग्राफ बनाने के लिए स्टूडियो का प्रयोग करें" }, "migrateFromHostedService": { - "title": "होस्ट की गई सेवा से माइग्रेट करें", - "description": "सबग्राफ को ग्राफ़ नेटवर्क में माइग्रेट करना" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "होस्टेड सेवा", - "description": "होस्टेड सर्विस पर सबग्राफ बनाएं और एक्सप्लोर करें" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "समर्थित नेटवर्क्स", - "description": "The Graph Network और Hosted सेवा पर The Graph निम्नलिखित नेटवर्क का समर्थन करता है।", - "graphNetworkAndHostedService": "ग्राफ नेटवर्क और होस्टेड सेवा", - "hostedService": "होस्टेड सेवा", - "betaWarning": "बीटा में है।" + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/hi/mips-faqs.mdx b/website/pages/hi/mips-faqs.mdx index fce107fd2317..de45376c7e5c 100644 --- a/website/pages/hi/mips-faqs.mdx +++ b/website/pages/hi/mips-faqs.mdx @@ -4,122 +4,124 @@ title: एमआईपी अक्सर पूछे जाने वाले ## परिचय -द ग्राफ इकोसिस्टम में भाग लेने के लिए यह एक रोमांचक समय है! [ग्राफ़ डे 2022](https://thegraph.com/graph-day/2022/) के दौरान यानिव ताल ने [होस्ट की गई सेवा को बंद करने](https://thegraph.com/blog/sunsetting-hosted-service/) की घोषणा की, एक पल के लिए ग्राफ पारिस्थितिकी तंत्र कई वर्षों से काम कर रहा है। +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! -होस्ट की गई सेवा के समाप्त होने और इसकी सभी गतिविधियों को विकेंद्रीकृत नेटवर्क में स्थानांतरित करने में सहायता के लिए, द ग्राफ़ फ़ाउंडेशन ने [माइग्रेशन इंफ्रास्ट्रक्चर प्रोवाइडर्स (एमआईपी) प्रोग्राम](https://thegraph.com/blog/mips-multi) की घोषणा की है -चेन-इंडेक्सिंग-प्रोत्साहन-कार्यक्रम। +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. -एमआईपी प्रोग्राम इंडेक्सर्स के लिए एथेरियम मेननेट से परे इंडेक्स चेन के संसाधनों के साथ उन्हें समर्थन देने के लिए एक प्रोत्साहन कार्यक्रम है और ग्राफ़ प्रोटोकॉल विकेंद्रीकृत नेटवर्क को मल्टी-चेन इंफ्रास्ट्रक्चर परत में विस्तारित करने में मदद करता है। +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -MIPs (Mission, Incentives, and Policy) कार्यक्रम ने 0.75% यानी 75M GRT के अलौकिक कुल सप्लाई का आवंटन किया है, जिसमें से 0.5% Indexers को इनाम देने के लिए है जो नेटवर्क के बूटस्ट्रैपिंग में योगदान देते हैं और 0.25% नेटवर्क ग्रांट्स के लिए आवंटित है जो मल्टी-चेन सबग्राफ उपयोग करने वाले सबग्राफ डेवलपर्स के लिए है। +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. -### उपयोगी संसाधन +The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. -- [विन्सेंट (विक्टर) टैगेलिया से इंडेक्सर 2ूल](https://indexer-2ools.vincenttaglia.com/#/) -- [ग्राफ नेटवर्क पर प्रभावी इंडेक्सर कैसे बनें](https://thegraph.com/blog/how-to-become-indexer/) -- [इंडेक्सर नॉलेज हब](https://thegraph.academy/indexers/) -- [आवंटन अनुकूलक](https://github.com/graphprotocol/allocationopt.jl) -- [आवंटन अनुकूलन टूलिंग](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +### Useful Resources -### 1. क्या सबग्राफ विफल होने पर भी इंडेक्सिंग (पीओआई) का वैध प्रमाण उत्पन्न करना संभव है? +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) +- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) +- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -हाँ, यह वास्तव में है। +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -संदर्भ के लिए, मध्यस्थता चार्टर, [यहां चार्टर के बारे में अधिक जानें](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), विफल सबग्राफ के लिए POI उत्पन्न करने की पद्धति को निर्दिष्ट करता है। +Yes, it is indeed. -समुदाय के एक सदस्य [SunTzu](https://github.com/suntzu93) ने मध्यस्थता चार्टर की कार्यप्रणाली के अनुपालन में इस प्रक्रिया को स्वचालित करने के लिए एक स्क्रिप्ट बनाई है। रेपो देखें [here] \(https://github.com/suntzu93/get_valid_poi_subgraph)। +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -### 2. एमआईपी कार्यक्रम किस श्रृंखला को सबसे पहले प्रोत्साहित करेगा? +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). -विकेंद्रीकृत नेटवर्क पर समर्थित होने वाली पहली श्रृंखला ग्नोसिस चेन है! ग्नोसिस चेन, जिसे पहले एक्सडीएआई के नाम से जाना जाता था, एक ईवीएम-आधारित श्रृंखला है। ग्नोसिस चेन को पहली बार चुना गया था क्योंकि इसके चलने वाले नोड्स की उपयोगकर्ता-मित्रता, इंडेक्सर की तत्परता, द ग्राफ के साथ संरेखण और वेब 3 के भीतर गोद लेना। +### 2. Which chain will the MIPs program incentivise first? -### 3. एमआईपी कार्यक्रम में नई शृंखला कैसे जोड़ी जाएगी? +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. -इंडेक्सर की तत्परता, मांग और सामुदायिक भावना के आधार पर, पूरे एमआईपी कार्यक्रम में नई श्रृंखलाओं की घोषणा की जाएगी। चेन को पहले टेस्टनेट पर सपोर्ट किया जाएगा और बाद में मेननेट पर उस चेन को सपोर्ट करने के लिए एक GIP पास किया जाएगा। एमआईपी प्रोग्राम में भाग लेने वाले इंडेक्सर्स चुनेंगे कि वे किस चेन का समर्थन करने में रुचि रखते हैं और क्वेरी फीस अर्जित करने और सबग्राफ की सेवा के लिए नेटवर्क पर इंडेक्सिंग पुरस्कार अर्जित करने के अलावा प्रति श्रृंखला पुरस्कार अर्जित करेंगे। एमआईपी प्रतिभागियों को उनके प्रदर्शन, नेटवर्क की जरूरतों को पूरा करने की क्षमता और सामुदायिक समर्थन के आधार पर अंक दिए जाएंगे। +### 3. How will new chains be added to the MIPs program? -### 4. हमें कैसे पता चलेगा कि नेटवर्क नई श्रृंखला के लिए तैयार है? +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. -ग्राफ़ फाउंडेशन तत्परता का सर्वोत्तम आकलन करने के लिए क्यूओएस प्रदर्शन मेट्रिक्स, नेटवर्क प्रदर्शन और सामुदायिक चैनलों की निगरानी करेगा। प्राथमिकता यह सुनिश्चित कर रही है कि नेटवर्क उन मल्टी-चेन डैप के प्रदर्शन की जरूरतों को पूरा करे जो उनके सबग्राफ को माइग्रेट करने में सक्षम हों। +### 4. How will we know when the network is ready for a new chain? -### 5. पुरस्कार प्रति श्रृंखला कैसे विभाजित होते हैं? +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. -यह देखते हुए कि चेन नोड्स को सिंक करने के लिए अपनी आवश्यकताओं में भिन्न होती हैं, और वे क्वेरी वॉल्यूम और गोद लेने में भिन्न होती हैं, उस श्रृंखला के चक्र के अंत में प्रति श्रृंखला पुरस्कार तय किए जाएंगे ताकि यह सुनिश्चित किया जा सके कि सभी फीडबैक और सीखने पर कब्जा कर लिया गया है। हालाँकि, हर समय इंडेक्सर्स नेटवर्क पर चेन को सपोर्ट करने के बाद क्वेरी फीस और इंडेक्सिंग रिवार्ड्स अर्जित करने में सक्षम होंगे। +### 5. How are rewards divided per chain? -### 6. क्या हमें एमआईपी कार्यक्रम में सभी श्रृंखलाओं को अनुक्रमित करने की आवश्यकता है या क्या हम केवल एक श्रृंखला चुन सकते हैं और उसे अनुक्रमित कर सकते हैं? +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. -आप जो भी श्रृंखला चाहते हैं उसे अनुक्रमित करने के लिए आपका स्वागत है! एमआईपी कार्यक्रम का लक्ष्य इंडेक्सर्स को उन उपकरणों और ज्ञान से लैस करना है जो वे चाहते हैं कि चेन को इंडेक्स करें और उन वेब 3 पारिस्थितिक तंत्रों का समर्थन करें जिनमें वे रुचि रखते हैं। हालांकि, प्रत्येक श्रृंखला के लिए टेस्टनेट से मेननेट तक चरण होते हैं। आपके द्वारा अनुक्रमित की जा रही श्रृंखलाओं के लिए सभी चरणों को पूरा करना सुनिश्चित करें। चरणों के बारे में अधिक जानने के लिए [एमआईपी धारणा पृष्ठ](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) देखें। +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? -### 7. इनाम कब बांटे जाएंगे? +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. -एक बार प्रदर्शन मेट्रिक्स मिलने और माइग्रेट किए गए सबग्राफ उन इंडेक्सर्स द्वारा समर्थित होने के बाद एमआईपी पुरस्कार प्रति श्रृंखला वितरित किए जाएंगे। उस श्रृंखला के चक्र के बीच में प्रति श्रृंखला कुल पुरस्कारों के बारे में जानकारी देखें। +### 7. When will rewards be distributed? -### 8. अंकन कैसे काम करता है? +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. -इंडेक्सर्स लीडरबोर्ड पर पूरे कार्यक्रम में स्कोरिंग के आधार पर पुरस्कारों के लिए प्रतिस्पर्धा करेंगे। कार्यक्रम स्कोरिंग पर आधारित होगा: +### 8. How does scoring work? -**सबग्राफ कवरेज** +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: -- क्या आप प्रति श्रृंखला सबग्राफ के लिए अधिकतम समर्थन प्रदान कर रहे हैं? +**Subgraph Coverage** -- एमआईपी के दौरान, बड़े इंडेक्सर्स से उम्मीद की जाती है कि वे प्रति श्रृंखला सबग्राफ का 50%+ दांव लगाएंगे जिसका वे समर्थन करते हैं। +- Are you providing maximal support for subgraphs per chain? -**सेवा की गुणवत्ता** +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. -- क्या इंडेक्सर सेवा की अच्छी गुणवत्ता (विलंबता, ताजा डेटा, अपटाइम, आदि) के साथ श्रृंखला की सेवा कर रहा है? +**Quality Of Service** -- क्या डैप डेवलपर्स का समर्थन करने वाला इंडेक्सर उनकी जरूरतों के प्रति प्रतिक्रियाशील है? +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? -क्या इंडेक्सर कुशलता से आवंटन कर रहा है, नेटवर्क के समग्र स्वास्थ्य में योगदान दे रहा है? +- Is the Indexer supporting dapp developers being reactive to their needs? -**समुदाय का समर्थन** +Is Indexer allocating efficiently, contributing to the overall health of the network? -- क्या इंडेक्सर साथी इंडेक्सर्स के साथ सहयोग कर रहा है ताकि उन्हें मल्टी-चेन स्थापित करने में मदद मिल सके? +**Community Support** -- क्या इंडेक्सर पूरे कार्यक्रम के दौरान कोर देवों को फीडबैक प्रदान कर रहा है या फोरम में इंडेक्सर्स के साथ जानकारी साझा कर रहा है? +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? -### 9. डिस्कॉर्ड की भूमिका कैसे सौंपी जाएगी? +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? -मॉडरेटर अगले कुछ दिनों में भूमिकाएं असाइन करेंगे. +### 9. How will the Discord role be assigned? -### 10. क्या प्रोग्राम को टेस्टनेट पर शुरू करना और फिर मेननेट पर स्विच करना ठीक है? क्या आप पुरस्कार वितरित करते समय मेरे नोड की पहचान कर पाएंगे और इसे ध्यान में रख पाएंगे? +Moderators will assign the roles in the next few days. -हां, वास्तव में आपसे ऐसा करने की अपेक्षा की जाती है। गोरली पर कई चरण हैं और एक मेननेट पर है। +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? -### 11. आप किस बिंदु पर प्रतिभागियों से मेननेट परिनियोजन जोड़ने की उम्मीद करते हैं? +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. -चरण 3 के दौरान मेननेट इंडेक्सर की आवश्यकता होगी। इस पर अधिक जानकारी [जल्द ही इस धारणा पृष्ठ में साझा की जाएगी।](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +### 11. At what point do you expect participants to add a mainnet deployment? -### 12. क्या पुरस्कार निहित होने के अधीन होंगे? +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -कार्यक्रम के अंत में वितरित किया जाने वाला प्रतिशत निहित होने के अधीन होगा। इस पर और अधिक इंडेक्सर समझौते में साझा किया जाएगा। +### 12. Will rewards be subject to vesting? -### 13. एक से अधिक सदस्यों वाली टीमों के लिए, क्या टीम के सभी सदस्यों को MIPs Discord भूमिका दी जाएगी? +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. -हाँ +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -### 14. क्या एमआईपी टेस्टनेट में भाग लेने के लिए ग्राफ क्यूरेटर प्रोग्राम से लॉक किए गए टोकन का उपयोग करना संभव है? +Yes -हाँ +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -### 15. एमआईपी कार्यक्रम के दौरान, क्या अमान्य पीओआई का विवाद करने की अवधि होगी? +Yes -तय किया जाएगा। कृपया इस पर अधिक जानकारी के लिए समय-समय पर इस पृष्ठ पर लौटें या यदि आपका अनुरोध अत्यावश्यक है, तो कृपया info@thegraph.foundation पर ईमेल करें +### 15. During the MIPs program, will there be a period to dispute invalid POI? -### 17. क्या हम दो निहित अनुबंधों को जोड़ सकते हैं? +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation -नहीं। विकल्प हैं: आप एक को दूसरे को सौंप सकते हैं या दो अलग-अलग इंडेक्सर्स चला सकते हैं। +### 17. Can we combine two vesting contracts? -### 18. केवाईसी प्रश्न? +No. The options are: you can delegate one to the other one or run two separate indexers. -कृपया info@thegraph.foundation पर ईमेल करें +### 18. KYC Questions? -### 19. मैं ग्नोसिस चेन को इंडेक्स करने के लिए तैयार नहीं हूं, क्या मैं तैयार होने पर दूसरी चेन से इंडेक्स करना शुरू कर सकता हूं? +Please email info@thegraph.foundation -हाँ +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -### 20. क्या सर्वर चलाने के लिए अनुशंसित क्षेत्र हैं? +Yes -हम क्षेत्रों पर सिफारिशें नहीं देते हैं। स्थानों को चुनते समय आप यह सोचना चाहेंगे कि क्रिप्टोकरेंसी के लिए प्रमुख बाज़ार कहाँ हैं। +### 20. Are there recommended regions to run the servers? -### 21. "हैंडलर गैस लागत" क्या है? +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. -यह एक हैंडलर को निष्पादित करने की लागत का निर्धारक उपाय है। नाम के सुझाव के विपरीत, यह ब्लॉकचेन पर गैस की लागत से संबंधित नहीं है। +### 21. What is “handler gas cost”? + +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/hi/network/benefits.mdx b/website/pages/hi/network/benefits.mdx index 17b646dd0914..b145895e7853 100644 --- a/website/pages/hi/network/benefits.mdx +++ b/website/pages/hi/network/benefits.mdx @@ -14,7 +14,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg - 60-98% कम मासिक लागत - $0 इंफ्रास्ट्रक्चर सेटअप लागत - सुपीरियर अपटाइम -- 438 इंडेक्सर्स तक पहुंच (और गिनती जारी है) +- Access to hundreds of independent Indexers around the world - वैश्विक समुदाय द्वारा 24/7 तकनीकी सहायता ## लाभ समझाया @@ -79,9 +79,9 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg एक सबग्राफ पर क्यूरेटिंग सिग्नल एक वैकल्पिक वन-टाइम, नेट-जीरो कॉस्ट है (उदाहरण के लिए, सिग्नल में $1k को सबग्राफ पर क्यूरेट किया जा सकता है, और बाद में वापस ले लिया जाता है - प्रक्रिया में रिटर्न अर्जित करने की क्षमता के साथ)। -कुछ उपयोगकर्ताओं को अपने सबग्राफ को नए संस्करण में अपडेट करने की आवश्यकता हो सकती है। एथेरियम गैस शुल्क के कारण, लेखन के समय अपडेट की लागत ~$50 होती है। +Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. -ध्यान दें कि [Arbitrum](/arbitrum/arbitrum-faq) पर गैस शुल्क एथेरियम मेननेट से काफी कम है। +Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. ## कोई सेटअप लागत नहीं & ग्रेटर ऑपरेशनल एफिशिएंसी @@ -89,8 +89,8 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## Reliability & Resiliency -ग्राफ़ का विकेन्द्रीकृत नेटवर्क उपयोगकर्ताओं को भौगोलिक रिडंडेंसी तक पहुंच प्रदान करता है जो `ग्राफ़-नोड` को स्व-होस्ट करते समय मौजूद नहीं होता है। प्रश्नों को 99.9%+ अपटाइम के लिए भरोसेमंद तरीके से परोसा जाता है, जो 168 इंडेक्सर्स (और गिनती) द्वारा वैश्विक स्तर पर नेटवर्क को सुरक्षित करने के लिए हासिल किया गया है। +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. निचला रेखा: ग्राफ़ नेटवर्क कम खर्चीला है, उपयोग में आसान है, और `ग्राफ़-नोड` को स्थानीय रूप से चलाने की तुलना में बेहतर परिणाम देता है। -आज ही द ग्राफ़ नेटवर्क का उपयोग शुरू करें, और जानें कि कैसे [अपने सबग्राफ़ को द ग्राफ़ के विकेन्द्रीकृत नेटवर्क में अपग्रेड करें](/cookbook/upgrading-a-subgraph)। +Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/hi/network/curating.mdx b/website/pages/hi/network/curating.mdx index 84c375b73563..14a099594b19 100644 --- a/website/pages/hi/network/curating.mdx +++ b/website/pages/hi/network/curating.mdx @@ -4,7 +4,7 @@ title: क्यूरेटिंग ग्राफ़ विकेंद्रीकृत अर्थव्यवस्था के लिए क्यूरेटर महत्वपूर्ण हैं। वे वेब 3 पारिस्थितिकी तंत्र के अपने ज्ञान का उपयोग सबग्राफ पर मूल्यांकन और संकेत करने के लिए करते हैं जिसे ग्राफ़ नेटवर्क द्वारा अनुक्रमित किया जाना चाहिए। एक्सप्लोरर के माध्यम से, क्यूरेटर सिग्नलिंग निर्णय लेने के लिए नेटवर्क डेटा देखने में सक्षम होते हैं। ग्राफ़ नेटवर्क उन क्यूरेटर को पुरस्कृत करता है जो सबग्राफ उत्पन्न करने वाली क्वेरी फीस के एक हिस्से के साथ अच्छी गुणवत्ता वाले सबग्राफ पर संकेत देते हैं। क्यूरेटरों को जल्दी संकेत देने के लिए आर्थिक रूप से प्रोत्साहित किया जाता है। क्यूरेटर के ये संकेत इंडेक्सर्स के लिए महत्वपूर्ण हैं, जो तब इन सिग्नल किए गए सबग्राफ से डेटा को प्रोसेस या इंडेक्स कर सकते हैं। -संकेत करते समय, क्यूरेटर्स निर्धारित संस्करण पर संकेत करने या ऑटो-माइग्रेट का उपयोग करने का फैसला कर सकते हैं। ऑटो-माइग्रेट का उपयोग करते समय, क्यूरेटर्स के शेयर्स हमेशा डेवलपर द्वारा प्रकाशित नवीनतम संस्करण में माइग्रेट किए जाएंगे। यदि आप इसके बजाय एक विशिष्ट संस्करण पर संकेत करने का फैसला करते हैं, तो शेयर्स हमेशा इस विशिष्ट संस्करण पर ही रहेंगे। +When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. याद रखें कि क्यूरेशन जोखिम भरा है। कृपया यह सुनिश्चित करने के लिए अपना परिश्रम करें कि आप अपने भरोसे के सबग्राफ पर क्यूरेट करते हैं। एक सबग्राफ बनाना अनुमति रहित है, इसलिए लोग सबग्राफ बना सकते हैं और उन्हें किसी भी नाम से पुकार सकते हैं। क्यूरेशन जोखिमों पर अधिक मार्गदर्शन के लिए, [द ग्राफ़ अकादमी की क्यूरेशन गाइड](https://thegraph.academy/curators/) देखें। @@ -60,7 +60,7 @@ title: क्यूरेटिंग ## जोखिम 1. क्वेरी बाजार द ग्राफ में स्वाभाविक रूप से युवा है और इसमें जोखिम है कि नवजात बाजार की गतिशीलता के कारण आपका %APY आपकी अपेक्षा से कम हो सकता है। -2. क्यूरेशन शुल्क - जब एक क्यूरेटर सबग्राफ पर GRT संकेत करता है, तो उन्हें 1% क्यूरेशन टैक्स का सामना करना पड़ता है। यह शुल्क दहन किया जाता है और बाकी राशि बॉन्डिंग कर्व की रिजर्व सप्लाई में जमा की जाती है। +2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. 3. जब क्यूरेटर जीआरटी निकालने के लिए अपने शेयरों को जलाते हैं, तो शेष शेयरों का जीआरटी मूल्यांकन कम हो जाएगा। सावधान रहें कि कुछ मामलों में, क्यूरेटर उनके शेयरों **सभी को एक साथ** नष्ट करने का निर्णय ले सकते हैं। यह स्थिति सामान्य हो सकती है यदि कोई dApp डेवलपर अपने सबग्राफ का वर्जनिंग/सुधार और क्वेरी करना बंद कर देता है या यदि कोई सबग्राफ विफल हो जाता है। नतीजतन, शेष क्यूरेटर अपने शुरुआती जीआरटी का केवल एक अंश ही निकाल पाएंगे। कम जोखिम वाली प्रोफ़ाइल वाली नेटवर्क भूमिका के लिए, [प्रतिनिधि](/network/delegating) देखें। 4. बग के कारण सबग्राफ विफल हो सकता है। एक विफल सबग्राफ क्वेरी शुल्क अर्जित नहीं करता है। नतीजतन, आपको तब तक इंतजार करना होगा जब तक कि डेवलपर बग को ठीक नहीं करता है और एक नया संस्करण तैनात करता है। - यदि आपने सबग्राफ के नवीनतम संस्करण की सदस्यता ली है, तो आपके शेयर उस नए संस्करण में स्वत: माइग्रेट हो जाएंगे। इस पर 0.5% क्यूरेशन टैक्स लगेगा। @@ -79,13 +79,13 @@ title: क्यूरेटिंग - क्यूरेटर नेटवर्क की अपनी समझ का उपयोग करके यह अनुमान लगाने की कोशिश कर सकते हैं कि कैसे एक व्यक्तिगत सबग्राफ भविष्य में उच्च या निम्न क्वेरी मात्रा उत्पन्न कर सकता है। - क्यूरेटर को द ग्राफ एक्सप्लोरर के माध्यम से उपलब्ध मेट्रिक्स को भी समझना चाहिए। पिछली क्वेरी मात्रा और सबग्राफ डेवलपर कौन है जैसे मेट्रिक्स यह निर्धारित करने में मदद कर सकते हैं कि सबग्राफ संकेत देने लायक है या नहीं। -### 3. सबग्राफ को अपडेट करने की लागत क्या है? +### 3. What’s the cost of updating a subgraph? -नए सबग्राफ के लिए अपने क्यूरेशन शेयर्स को माइग्रेट करने पर 1% क्यूरेशन टैक्स लगता है। क्यूरेटर्स एक सबग्राफ के नवीनतम संस्करण की सदस्यता लेने का विकल्प चुन सकते हैं। जब क्यूरेशन शेयर्स एक नए संस्करण में ऑटो-माइग्रेट होते हैं, तो क्यूरेटर्स को भी आधी क्यूरेशन टैक्स, यानी 0.5%, भुगतान करना पड़ता है, क्योंकि सबग्राफ को अपडेट करना एक ऑन-चेन क्रिया है जिसमें गैस की खर्च होती है। +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. -### 4. मैं अपने सबग्राफ को कितनी बार अपडेट कर सकता हूं? +### 4. How often can I update my subgraph? -सुझाव दिया जाता है कि आप अपने सबग्राफ्स को बहुत अक्सर अपडेट न करें। अधिक जानकारी के लिए ऊपर के प्रश्न को देखें। +It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. ### 5. क्या मैं अपने क्यूरेशन शेयर बेच सकता हूँ? diff --git a/website/pages/hi/network/developing.mdx b/website/pages/hi/network/developing.mdx index ec6a89bc7885..3f43f162a9cf 100644 --- a/website/pages/hi/network/developing.mdx +++ b/website/pages/hi/network/developing.mdx @@ -34,11 +34,11 @@ title: विकसित होना विकासकर्ता गेटवे के लिए एक इंडेक्सर वरीयता व्यक्त करने में भी सक्षम हैं, उदाहरण के लिए उन इंडेक्सर्स को प्राथमिकता देना जिनकी क्वेरी प्रतिक्रिया तेज है, या जिनका डेटा सबसे अद्यतित है। ये नियंत्रण सबग्राफ स्टूडियो में सेट किए गए हैं। -### सबग्राफ़ अपडेट करना +### Updating Subgraphs एक समय के बाद एक सबग्राफ डेवलपर अपने सबग्राफ को अपडेट करना चाह सकता है, शायद बग को ठीक करना या नई कार्यक्षमता जोड़ना। सबग्राफ डेवलपर दर-सीमित विकास और परीक्षण के लिए सबग्राफ स्टूडियो में अपने सबग्राफ के नए संस्करण (ओं) को तैनात कर सकता है। -सबग्राफ डेवलपर तैयार होते हैं तो वे एक लेनदेन आरंभ कर सकते हैं जिससे वे अपने सबग्राफ को नए संस्करण पर पॉइंट कर सकते हैं। सबग्राफ को अपडेट करने से किसी भी संकेत को नए संस्करण में माइग्रेट किया जाता है (यहां तक ​​कि उपयोगकर्ता ने "ऑटो-माइग्रेट" का चयन किया हो), जिससे माइग्रेशन टैक्स भी लगता है। इस संकेत माइग्रेशन से इंडेक्सर्स को नए संस्करण के इंडेक्सिंग करने के लिए प्रेरित किया जाता है, इसलिए यह जल्दी ही क्वेरी के लिए उपलब्ध हो जाना चाहिए। +Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. ### सबग्राफ का बहिष्कार करना @@ -50,4 +50,4 @@ title: विकसित होना ### डेवलपर्स और नेटवर्क अर्थशास्त्र -डेवलपर्स नेटवर्क में एक मुख्य आर्थिक कारक हैं, जो इंडेक्सिंग को प्रोत्साहित करने और महत्वपूर्ण रूप से सबग्राफ क्वेरी करने के लिए GRT को लॉक करते हैं, जो नेटवर्क के मुख्य मूल्य विनिमय का एक हिस्सा है। सबग्राफ डेवलपर्स एक सबग्राफ को अपडेट करने के समय भी GRT को दहन करते हैं। +Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. diff --git a/website/pages/hi/network/explorer.mdx b/website/pages/hi/network/explorer.mdx index b0b3b8b39937..b78434937d08 100644 --- a/website/pages/hi/network/explorer.mdx +++ b/website/pages/hi/network/explorer.mdx @@ -74,7 +74,7 @@ title: ग्राफ एक्सप्लोरर द ग्राफ नेटवर्क की सुरक्षा और विकेंद्रीकरण को बनाए रखने में प्रतिनिधि महत्वपूर्ण भूमिका निभाते हैं। वे एक या एक से अधिक इंडेक्सर्स को GRT टोकन सौंपकर (यानी, "स्टेकिंग") नेटवर्क में भाग लेते हैं। डेलीगेटर्स के बिना, इंडेक्सर्स के महत्वपूर्ण पुरस्कार और शुल्क अर्जित करने की संभावना कम होती है। इसलिए, इंडेक्सर्स डेलिगेटर्स को इंडेक्सिंग रिवार्ड्स और उनके द्वारा अर्जित क्वेरी फीस के एक हिस्से की पेशकश करके आकर्षित करना चाहते हैं। -प्रतिनिधि, बदले में, कई अलग-अलग चरों के आधार पर इंडेक्सर्स का चयन करते हैं, जैसे कि पिछला प्रदर्शन, इंडेक्सिंग रिवॉर्ड रेट और क्वेरी शुल्क में कटौती। समुदाय के भीतर प्रतिष्ठा भी इसमें एक कारक हो सकती है! [द ग्राफ़्स डिस्कॉर्ड](https://discord.gg/graphprotocol) या [ के ज़रिए चुने गए इंडेक्सर्स से जुड़ने की सलाह दी जाती है ग्राफ फोरम](https://forum.thegraph.com/)! +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![एक्सप्लोरर छवि 7](/img/Delegation-Overview.png) diff --git a/website/pages/hi/network/indexing.mdx b/website/pages/hi/network/indexing.mdx index 4936fb8d016e..4af09b4d7fd8 100644 --- a/website/pages/hi/network/indexing.mdx +++ b/website/pages/hi/network/indexing.mdx @@ -2,7 +2,7 @@ title: इंडेक्सिंग --- -इंडेक्सर्स द ग्राफ नेटवर्क में नोड ऑपरेटर हैं जो इंडेक्सिंग और क्वेरी प्रोसेसिंग सेवाएं प्रदान करने के लिए ग्राफ टोकन (जीआरटी) को दांव पर लगाते हैं। इंडेक्सर्स अपनी सेवाओं के लिए क्वेरी फीस और इंडेक्सिंग पुरस्कार अर्जित करते हैं। वे एक रिबेट पूल से भी कमाते हैं जो कॉब-डगलस रिबेट फंक्शन के बाद उनके काम के अनुपात में सभी नेटवर्क योगदानकर्ताओं के साथ साझा किया जाता है। +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. जीआरटी जो प्रोटोकॉल में दांव पर लगा है, विगलन अवधि के अधीन है और यदि अनुक्रमणिका दुर्भावनापूर्ण हैं और अनुप्रयोगों को गलत डेटा प्रदान करते हैं या यदि वे गलत तरीके से अनुक्रमणित करते हैं तो इसे घटाया जा सकता है। इंडेक्सर्स नेटवर्क में योगदान करने के लिए डेलीगेटर्स से प्रत्यायोजित हिस्सेदारी के लिए पुरस्कार भी अर्जित करते हैं। @@ -26,7 +26,7 @@ title: इंडेक्सिंग अनुक्रमण पुरस्कार प्रोटोकॉल मुद्रास्फीति से आते हैं जो 3% वार्षिक जारी करने के लिए निर्धारित है। उन्हें प्रत्येक पर सभी क्यूरेशन सिग्नल के अनुपात के आधार पर सबग्राफ में वितरित किया जाता है, फिर उस सबग्राफ पर उनकी आवंटित हिस्सेदारी के आधार पर इंडेक्सर्स को आनुपातिक रूप से वितरित किया जाता है। **अनुक्रमण के एक वैध प्रमाण (POI) के साथ एक आवंटन बंद होना चाहिए जो पुरस्कारों के योग्य होने के लिए मध्यस्थता चार्टर द्वारा निर्धारित मानकों को पूरा करता है।** -पुरस्कारों की गणना के लिए समुदाय द्वारा कई उपकरण बनाए गए हैं; आपको उनका एक संग्रह [समुदाय गाइड संग्रह](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c) में व्यवस्थित मिलेगा। आप [डिस्कॉर्ड सर्वर](https://discord.gg/graphprotocol) पर #Delegators और #Indexers चैनल में टूल की अप टू डेट सूची भी पा सकते हैं। यहां हम अनुक्रमणिका सॉफ़्टवेयर स्टैक के साथ एकीकृत [अनुशंसित आवंटन अनुकूलक](https://github.com/graphprotocol/AllocationOpt.jl) को लिंक करते हैं। +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. ### इंडेक्सिंग (पीओआई) का सबूत क्या है? @@ -81,17 +81,17 @@ query indexerAllocations { ### प्रश्न शुल्क छूट क्या हैं और वे कब वितरित की जाती हैं? -जब भी आवंटन बंद हो जाता है और सबग्राफ के क्वेरी शुल्क रिबेट पूल में जमा हो जाता है तो क्वेरी शुल्क गेटवे द्वारा एकत्र किया जाता है। रिबेट पूल को इंडेक्सर्स को प्रोत्साहित करने के लिए डिज़ाइन किया गया है कि वे नेटवर्क के लिए कमाए जाने वाले क्वेरी शुल्क की राशि के मोटे अनुपात में हिस्सेदारी आवंटित करें। पूल में क्वेरी फीस का हिस्सा जो एक विशेष इंडेक्सर को आवंटित किया जाता है, कोब-डगलस प्रोडक्शन फंक्शन का उपयोग करके गणना की जाती है; प्रति इंडेक्सर वितरित राशि पूल में उनके योगदान और सबग्राफ पर हिस्सेदारी के आवंटन का एक कार्य है। +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -एक बार जब आवंटन बंद हो जाता है और विवाद की अवधि बीत जाती है तो इंडेक्सर द्वारा दावा किए जाने के लिए छूट उपलब्ध होती है। दावा करने पर, क्वेरी शुल्क कटौती क्वेरी शुल्क कटौती और प्रतिनिधिमंडल पूल अनुपात के आधार पर इंडेक्सर और उनके प्रतिनिधियों को क्वेरी शुल्क छूट वितरित की जाती है। +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### क्वेरी फी कट और इंडेक्सिंग रिवॉर्ड कट क्या है? `queryFeeCut` और `indexingRewardCut` मान डेलिगेशन पैरामीटर हैं जिन्हें इंडेक्सर और उनके डेलीगेटर्स के बीच GRT के वितरण को नियंत्रित करने के लिए इंडेक्सर cooldownBlocks के साथ सेट कर सकता है। प्रतिनिधिमंडल पैरामीटर सेट करने के निर्देशों के लिए [प्रोटोकॉल में स्टेकिंग](/network/indexing#stake-in-the-protocol) में अंतिम चरण देखें। -- **queryFeeCut** - क्वेरी शुल्क का % सबग्राफ पर संचित छूट का % होता है जिसे इंडेक्सर को वितरित किया जाएगा। यदि इसे 95% पर सेट किया जाता है, तो इंडेक्सर को क्वेरी शुल्क रिबेट पूल का 95% प्राप्त होगा जब एक आवंटन का दावा किया जाता है और अन्य 5% प्रतिनिधियों के पास जाता है। +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - एक सबग्राफ पर संचित इंडेक्सिंग पुरस्कारों का % जो इंडेक्सर को वितरित किया जाएगा। यदि इसे 95% पर सेट किया जाता है, तो आवंटन बंद होने पर इंडेक्सर को इंडेक्सिंग पुरस्कार पूल का 95% प्राप्त होगा और प्रतिनिधि अन्य 5% को विभाजित कर देंगे। +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### इंडेक्सर्स कैसे जानते हैं कि इंडेक्स करने के लिए कौन से सबग्राफ हैं? @@ -375,7 +375,7 @@ docker-compose up #### शुरू करना -इंडेक्सर एजेंट और इंडेक्सर सेवा को आपके ग्राफ नोड इंफ्रास्ट्रक्चर के साथ सह-स्थित होना चाहिए। आपके इंडेक्सर घटकों के लिए आभासी निष्पादन वातावरण स्थापित करने के कई तरीके हैं; यहां हम बताएंगे कि एनपीएम पैकेज या स्रोत का उपयोग करके या Google क्लाउड कुबेरनेट इंजन पर कुबेरनेट्स और डॉकटर के माध्यम से उन्हें नंगे धातु पर कैसे चलाना है। यदि ये सेटअप उदाहरण आपके बुनियादी ढांचे में अच्छी तरह से अनुवाद नहीं करते हैं, तो संभवतः संदर्भ के लिए एक सामुदायिक मार्गदर्शिका होगी, आइए [Discord](https://discord.gg/graphprotocol) पर नमस्ते कहें! अपने इंडेक्सर घटकों को शुरू करने से पहले [प्रोटोकॉल में हिस्सेदारी](/network/indexing#stake-in-the-protocol) याद रखें! +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! #### From NPM packages @@ -662,21 +662,21 @@ ActionType { स्रोत से उदाहरण उपयोग: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` ध्यान दें कि आवंटन प्रबंधन के लिए समर्थित क्रिया प्रकारों की अलग-अलग इनपुट आवश्यकताएं होती हैं: @@ -798,8 +798,4 @@ setDelegationParameters(950000, 600000, 500) - **बंद** - 1 युग बीत जाने के बाद एक अनुक्रमणिका आवंटन को बंद करने के लिए स्वतंत्र है ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master /contracts/stracting/Shaking.sol#L873)) या उनका इंडेक्सर एजेंट **maxAllocationEpochs** (वर्तमान में 28 दिन) के बाद आवंटन को स्वचालित रूप से बंद कर देगा). जब एक आबंटन इंडेक्सिंग (पीओआई) के वैध प्रमाण के साथ बंद हो जाता है तो उनके इंडेक्सिंग पुरस्कार इंडेक्सर और उसके प्रतिनिधियों को वितरित किए जाते हैं (अधिक जानने के लिए नीचे "पुरस्कार कैसे वितरित किए जाते हैं?" देखें)। -- **अंतिम रूप दिया गया** - एक बार आवंटन बंद हो जाने के बाद विवाद की अवधि होती है जिसके बाद आवंटन को **अंतिम रूप दिया गया** माना जाता है और यह क्वेरी शुल्क छूट दावा किए जाने के लिए उपलब्ध हैं (दावा ())। इंडेक्सर एजेंट **अंतिम रूप दिए गए** आवंटनों का पता लगाने के लिए नेटवर्क की निगरानी करता है और अगर वे कॉन्फ़िगर करने योग्य (और वैकल्पिक) थ्रेशोल्ड, **— से ऊपर हैं, तो उनका दावा करता है— -आवंटन-दावा-दहलीज**। - -- **दावा किया गया** - आवंटन की अंतिम स्थिति; इसने एक सक्रिय आवंटन के रूप में अपना पाठ्यक्रम चलाया है, सभी पात्र पुरस्कार वितरित किए गए हैं और इसकी पूछताछ शुल्क छूट का दावा किया गया है। - इंडेक्सर्स को ऑन-चेन आवंटन बनाने से पहले चेनहेड में सबग्राफ परिनियोजन को सिंक करने के लिए ऑफ-चेन सिंकिंग कार्यक्षमता का उपयोग करने की सिफारिश की जाती है। यह सुविधा सबग्राफ के लिए विशेष रूप से उपयोगी है जो सिंक करने के लिए 28 से अधिक समय ले सकती है या अनिश्चित रूप से विफल होने की कुछ संभावनाएं हैं। diff --git a/website/pages/hi/new-chain-integration.mdx b/website/pages/hi/new-chain-integration.mdx index fea6babc2cb3..b5492d5061af 100644 --- a/website/pages/hi/new-chain-integration.mdx +++ b/website/pages/hi/new-chain-integration.mdx @@ -1,33 +1,33 @@ --- -title: नए नेटवर्कों का एकीकरण +title: Integrating New Networks --- -वर्तमान में ग्राफ नोड निम्नलिखित श्रृंखला प्रकारों से डेटा को सूचीबद्ध कर सकता है: +Graph Node can currently index data from the following chain types: -- ईथरियम, EVM JSON-RPC और [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) के माध्यम से -- NEAR, [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) के माध्यम से -- Cosmos [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) के माध्यम से -- Arweave, [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) के माध्यम से +- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) +- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -अगर आप किसी अलग श्रृंखला प्रकार में रुचि रखते हैं, तो आपको ग्राफ नोड के साथ एक नई एकीकरण बनाने की आवश्यकता होगी। हमारी सिफारिशित दिशा-निर्देशिका यह है कि श्रृंखला के लिए एक फायरहोस का विकास करें, और फिर उस फायरहोस को ग्राफ नोड के साथ एकीकृत करें। +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -अगर ब्लॉकचेन EVM समरूपी है और क्लाइंट/नोड मानक EVM JSON-RPC API को उदाहरणीय रूप से प्रकट करता है, तो ग्राफ नोड को नए श्रृंखला को सूचीबद्ध करने की क्षमता होनी चाहिए। अधिक जानकारी के लिए, [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc) की परीक्षण के लिए देखें। +If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -गैर-ईवीएम-आधारित श्रृंखलाओं के लिए, ग्राफ़ नोड को जीआरपीसी और ज्ञात प्रकार परिभाषाओं के माध्यम से ब्लॉकचेन डेटा को अंतर्ग्रहण करने की आवश्यकता होगी। यह [Firehose] \(firehose/) के माध्यम से किया जा सकता है, जो [StreamingFast](https://www.streamingfast.io/) द्वारा विकसित एक नई तकनीक है जो फ़ाइल-आधारित का उपयोग करके एक उच्च-स्केलेबल इंडेक्सिंग ब्लॉकचेन समाधान प्रदान करती है। स्ट्रीमिंग-प्रथम दृष्टिकोण। +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose -जबकि दोनों सबग्राफ के लिए उपयुक्त हैं, एक फ़ायरहोज़ की हमेशा उन डेवलपर्स के लिए आवश्यकता होती है जो [Substreams](substreams/), के साथ निर्माण करना चाहते हैं, जैसे कि [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). का निर्माण करना चाहते हैं। इसके अलावा, JSON-RPC की तुलना में फ़ायरहोज़ बेहतर अनुक्रमण गति की अनुमति देता है। +While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. -नए EVM श्रृंखला एकीकरणकर्ताओं को सब-स्ट्रीमों के लाभ और उसकी विशाल पैरालेल इंडेक्सिंग क्षमताओं को ध्यान में रखकर फायरहोस पर आधारित दिशा-निर्देशित दृष्टिकोण को भी विचार सकते हैं। दोनों का समर्थन करने से विकासकर्ताओं को नए श्रृंखला के लिए सब-स्ट्रीमों या सबग्राफ बनाने में चयन करने की स्वतंत्रता मिलती है। +New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. -> **ध्यान दें**: ईवीएम श्रृंखलाओं के लिए फ़ायरहोज़-आधारित एकीकरण के लिए अभी भी इंडेक्सर्स को श्रृंखला के संग्रह आरपीसी नोड को सबग्राफ को ठीक से अनुक्रमित करने के लिए चलाने की आवश्यकता होगी। यह `eth_call` RPC विधि द्वारा आम तौर पर पहुंच योग्य स्मार्ट अनुबंध स्थिति प्रदान करने में फ़ायरहोज़ की असमर्थता के कारण है। (यह याद दिलाने लायक है कि eth_calls [डेवलपर्स के लिए अच्छा अभ्यास नहीं है](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) --- @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/hi/operating-graph-node.mdx b/website/pages/hi/operating-graph-node.mdx index f83a7efb4f6c..5d53c0fe05a7 100644 --- a/website/pages/hi/operating-graph-node.mdx +++ b/website/pages/hi/operating-graph-node.mdx @@ -2,19 +2,19 @@ title: ग्राफ नोड का परिचालन --- -ग्राफ़ नोड वह घटक है जो सबग्राफ को अनुक्रमित करता है, और परिणामी डेटा को ग्राफक्यूएल एपीआई के माध्यम से क्वेरी के लिए उपलब्ध कराता है। इस प्रकार यह इंडेक्सर स्टैक के लिए केंद्रीय है, और एक सफल इंडेक्सर को चलाने के लिए ग्राफ़ नोड का सही संचालन महत्वपूर्ण है। +ग्राफ़ नोड वह घटक है जो उप-अनुच्छेदों को अनुक्रमित करता है, और परिणामी डेटा को ग्राफ़िकल एपीआई के माध्यम से क्वेरी के लिए उपलब्ध कराता है। इस तरह यह इंडेक्सर स्टैक के लिए केंद्रीय है, और एक सफल इंडेक्सर चलाने के लिए ग्राफ नोड का सही संचालन महत्वपूर्ण है। -यह ग्राफ़ नोड का एक प्रासंगिक अवलोकन और इंडेक्सर्स के लिए उपलब्ध कुछ अधिक उन्नत विकल्प प्रदान करता है। विस्तृत दस्तावेज़ और निर्देश [ग्राफ़ नोड रिपॉजिटरी](https://github.com/graphprotocol/graph-node) में पाए जा सकते हैं। +यह ग्राफ़ नोड का एक प्रासंगिक अवलोकन प्रदान करता है, और इंडेक्सर्स के लिए उपलब्ध कुछ और उन्नत विकल्प प्रदान करता है। विस्तृत दस्तावेज़ और निर्देश [ग्राफ़ नोड रिपॉजिटरी](https://github.com/graphprotocol/graph-node) में देखे जा सकते हैं। ## ग्राफ नोड -[ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) ग्राफ़ नेटवर्क पर सबग्राफ़ को अनुक्रमित करने, ब्लॉकचेन क्लाइंट से जुड़ने, सबग्राफ़ को अनुक्रमित करने और अनुक्रमित डेटा उपलब्ध कराने के लिए संदर्भ कार्यान्वयन है पूछताछ करने के लिए। +[ग्राफ़ नोड](https://github.com/graphprotocol/graph-node) ग्राफ़ नेटवर्क पर सबग्राफ़ को इंडेक्स करने, ब्लॉकचेन क्लाइंट से कनेक्ट करने, सबग्राफ़ को इंडेक्स करने और इंडेक्स किए गए डेटा को उपलब्ध कराने के लिए संदर्भ कार्यान्वयन है पूछताछ करने के लिए। -ग्राफ़ नोड (और संपूर्ण इंडेक्सर स्टैक) को नंगे धातु, या क्लाउड वातावरण में चलाया जा सकता है। केंद्रीय अनुक्रमण घटक का यह लचीलापन ग्राफ़ प्रोटोकॉल की मजबूती के लिए महत्वपूर्ण है। इसी तरह, ग्राफ़ नोड [स्रोत से बनाया जा सकता है](https://github.com/graphprotocol/graph-node), या इंडेक्सर्स [डॉकर छवियाँ प्रदान की गईं](https:// में से किसी एक का उपयोग कर सकते हैं hub.docker.com/r/graphprotocol/graph-node)। +ग्राफ़ नोड (और संपूर्ण इंडेक्सर स्टैक) को नंगे धातु, या क्लाउड वातावरण में चलाया जा सकता है। सेंट्रल इंडेक्सिंग कंपोनेंट का यह लचीलापन द ग्राफ प्रोटोकॉल की मजबूती के लिए महत्वपूर्ण है। इसी तरह, ग्राफ़ नोड [स्रोत से निर्मित](https://github.com/graphprotocol/graph-node) हो सकता है, या इंडेक्सर [डॉकर छवियां प्रदान करता है](https:// में से किसी एक का उपयोग कर सकते हैं। hub.docker.com/r/graphprotocol/graph-node)। ### पोस्टग्रेएसक्यूएल डेटाबेस -ग्राफ़ नोड के लिए मुख्य स्टोर, यह वह जगह है जहां सबग्राफ डेटा संग्रहीत किया जाता है, साथ ही सबग्राफ के बारे में मेटाडेटा, और सबग्राफ-अज्ञेयवादी नेटवर्क डेटा जैसे ब्लॉक कैश और एथ_कॉल कैश। +ग्राफ नोड के लिए मुख्य स्टोर, यह वह जगह है जहां सबग्राफ डेटा संग्रहीत किया जाता है, साथ ही सबग्राफ के बारे में मेटाडेटा, और सबग्राफ-एग्नोस्टिक नेटवर्क डेटा जैसे ब्लॉक कैश, और eth_call कैश। ### नेटवर्क क्लाइंट @@ -22,11 +22,11 @@ title: ग्राफ नोड का परिचालन जबकि कुछ सबग्राफ को केवल एक पूर्ण एथेरियम नोड की आवश्यकता हो सकती है, कुछ में इंडेक्सिंग सुविधाएं हो सकती हैं जिनके लिए अतिरिक्त आरपीसी कार्यक्षमता की आवश्यकता होती है। विशेष रूप से सबग्राफ जो इंडेक्सिंग के हिस्से के रूप में `eth_calls` बनाते हैं, उन्हें एक आर्काइव नोड की आवश्यकता होगी जो [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) को सपोर्ट करता हो।, और `callHandlers` वाले सबग्राफ, या `call` फ़िल्टर वाले `blockHandlers`, `trace_filter` समर्थन की आवश्यकता होती है ([ट्रेस मॉड्यूल दस्तावेज़ यहां देखें](https://openethereum.github.io/JSONRPC-trace-module))। -**आगामी: नेटवर्क फ़ायरहोज़** - फ़ायरहोज़ एक जीआरपीसी सेवा है जो ऑर्डर किए गए, फिर भी फोर्क-अवेयर, ब्लॉकों की स्ट्रीम प्रदान करती है, जिसे ग्राफ़ के मुख्य डेवलपर्स द्वारा परफॉर्मेंट इंडेक्सिंग को बेहतर समर्थन देने के लिए विकसित किया गया है। पैमाना। यह वर्तमान में इंडेक्सर की आवश्यकता नहीं है, लेकिन इंडेक्सर्स को पूर्ण नेटवर्क समर्थन से पहले, प्रौद्योगिकी से परिचित होने के लिए प्रोत्साहित किया जाता है। फ़ायरहोज़ के बारे में अधिक जानें [यहां](https://firehose.streamingfast.io/)। +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### आईपीएफएस नोड्स -सबग्राफ़ परिनियोजन मेटाडेटा IPFS नेटवर्क पर संग्रहीत किया जाता है। ग्राफ़ नोड मुख्य रूप से सबग्राफ़ परिनियोजन के दौरान सबग्राफ़ मेनिफेस्ट और सभी लिंक की गई फ़ाइलों को लाने के लिए आईपीएफएस नोड तक पहुंचता है। नेटवर्क इंडेक्सर्स को अपने स्वयं के आईपीएफएस नोड को होस्ट करने की आवश्यकता नहीं है। नेटवर्क के लिए एक IPFS नोड https://ipfs.network.thegraph.com पर होस्ट किया गया है। +सबग्राफ परिनियोजन मेटाडेटा IPFS नेटवर्क पर संग्रहीत है। सबग्राफ मैनिफ़ेस्ट और सभी लिंक की गई फ़ाइलों को लाने के लिए सबग्राफ़ परिनियोजन के दौरान ग्राफ़ नोड मुख्य रूप से IPFS नोड तक पहुँचता है। नेटवर्क इंडेक्सर्स को अपने स्वयं के IPFS नोड को होस्ट करने की आवश्यकता नहीं है। नेटवर्क के लिए IPFS नोड https://ipfs.network.thegraph.com पर होस्ट किया गया है। ### प्रोमेथियस मेट्रिक्स सर्वर @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Setup -1. PostgreSQL डेटाबेस सर्वर प्रारंभ करें +1. एक PostgreSQL डेटाबेस सर्वर प्रारंभ करें ```sh initdb -D .postgres @@ -71,9 +71,9 @@ cargo run -p graph-node --release -- \ ### कुबेरनेट्स के साथ शुरुआत करना -संपूर्ण Kubernetes उदाहरण कॉन्फ़िगरेशन [इंडेक्सर रिपॉजिटरी](https://github.com/graphprotocol/indexer/tree/main/k8s) में पाया जा सकता है। +एक पूर्ण कुबेरनेट्स उदाहरण विन्यास [इंडेक्सर रिपॉजिटरी](https://github.com/graphprotocol/indexer/tree/main/k8s) में पाया जा सकता है। -### पोर्ट +### Ports जब यह चल रहा होता है तो ग्राफ़ नोड निम्नलिखित पोर्ट को उजागर करता है: @@ -89,13 +89,13 @@ cargo run -p graph-node --release -- \ ## उन्नत ग्राफ़ नोड कॉन्फ़िगरेशन -अपने सबसे सरल रूप में, ग्राफ़ नोड को ग्राफ़ नोड के एक उदाहरण, एक एकल पोस्टग्रेएसक्यूएल डेटाबेस, एक आईपीएफएस नोड और अनुक्रमित किए जाने वाले सबग्राफ द्वारा आवश्यक नेटवर्क क्लाइंट के साथ संचालित किया जा सकता है। +अपने सरलतम रूप में, ग्राफ़ नोड को ग्राफ़ नोड, एक एकल PostgreSQL डेटाबेस, एक IPFS नोड, और नेटवर्क क्लाइंट के रूप में संचालित किया जा सकता है, जैसा कि उप-अनुच्छेदों द्वारा अनुक्रमित किया जाना आवश्यक है। -उन ग्राफ़ नोड्स का समर्थन करने के लिए कई ग्राफ़ नोड्स और कई डेटाबेस जोड़कर इस सेटअप को क्षैतिज रूप से बढ़ाया जा सकता है। उन्नत उपयोगकर्ता ग्राफ़ नोड की कुछ क्षैतिज स्केलिंग क्षमताओं के साथ-साथ `config.toml` फ़ाइल और ग्राफ़ नोड के पर्यावरण चर के माध्यम से कुछ अधिक उन्नत कॉन्फ़िगरेशन विकल्पों का लाभ उठाना चाह सकते हैं। +उन ग्राफ़ नोड्स का समर्थन करने के लिए कई ग्राफ़ नोड्स और कई डेटाबेस जोड़कर इस सेटअप को क्षैतिज रूप से बढ़ाया जा सकता है। उन्नत उपयोगकर्ता `config.toml` फ़ाइल और ग्राफ़ नोड के पर्यावरण चर के माध्यम से ग्राफ़ नोड की कुछ क्षैतिज स्केलिंग क्षमताओं के साथ-साथ कुछ अधिक उन्नत कॉन्फ़िगरेशन विकल्पों का लाभ उठाना चाह सकते हैं। ### `config.toml` -एक [TOML](https://toml.io/en/) कॉन्फ़िगरेशन फ़ाइल का उपयोग CLI में प्रदर्शित कॉन्फ़िगरेशन की तुलना में अधिक जटिल कॉन्फ़िगरेशन सेट करने के लिए किया जा सकता है। फ़ाइल का स्थान --config कमांड लाइन स्विच के साथ पास किया जाता है। +एक [TOML](https://toml.io/en/) कॉन्फ़िगरेशन फ़ाइल का उपयोग CLI में प्रदर्शित कॉन्फ़िगरेशन की तुलना में अधिक जटिल कॉन्फ़िगरेशन सेट करने के लिए किया जा सकता है। फ़ाइल का स्थान --config कमांड लाइन स्विच के साथ दिया जाता है। > कॉन्फ़िगरेशन फ़ाइल का उपयोग करते समय, --postgres-url, --postgres-secondary-hosts, और --postgres-host-weights विकल्पों का उपयोग करना संभव नहीं है। @@ -110,7 +110,7 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -`config.toml` का पूरा दस्तावेज़ [ग्राफ़ नोड में पाया जा सकता है दस्तावेज़](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +`config.toml` का पूरा दस्तावेज़ [ग्राफ़ नोड में पाया जा सकता है डॉक्स](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md)। #### एकाधिक ग्राफ नोड्स @@ -120,7 +120,7 @@ indexers = [ "<.. list of all indexing nodes ..>" ] #### परिनियोजन नियम -एकाधिक ग्राफ नोड्स को देखते हुए, नए सबग्राफ की तैनाती का प्रबंधन करना आवश्यक है ताकि एक ही सबग्राफ को दो अलग-अलग नोड्स द्वारा अनुक्रमित न किया जा सके, जिससे टकराव हो सकता है। यह परिनियोजन नियमों का उपयोग करके किया जा सकता है, जो यह भी निर्दिष्ट कर सकता है कि यदि डेटाबेस शार्डिंग का उपयोग किया जा रहा है, तो सबग्राफ का डेटा किस `shard` में संग्रहीत किया जाना चाहिए। परिनियोजन नियम सबग्राफ नाम और उस नेटवर्क पर मेल खा सकते हैं जिसे निर्णय लेने के लिए परिनियोजन अनुक्रमित किया जा रहा है। +कई ग्राफ़ नोड्स को देखते हुए, नए सबग्राफ की तैनाती का प्रबंधन करना आवश्यक है ताकि एक ही सबग्राफ को दो अलग-अलग नोड्स द्वारा अनुक्रमित नहीं किया जा सके, जिससे टकराव हो। यह परिनियोजन नियमों का उपयोग करके किया जा सकता है, जो यह भी निर्दिष्ट कर सकता है कि यदि डेटाबेस शार्डिंग का उपयोग किया जा रहा है, तो `shard` को सबग्राफ के डेटा में संग्रहीत किया जाना चाहिए। डिप्लॉयमेंट नियम सबग्राफ नाम और उस नेटवर्क से मेल खा सकते हैं जिसे निर्णय लेने के लिए डिप्लॉयमेंट इंडेक्स कर रहा है। उदाहरण परिनियोजन नियम कॉन्फ़िगरेशन: @@ -154,32 +154,32 @@ indexers = [ #### समर्पित क्वेरी नोड्स -कॉन्फ़िगरेशन फ़ाइल में निम्नलिखित को शामिल करके नोड्स को स्पष्ट रूप से क्वेरी नोड्स के रूप में कॉन्फ़िगर किया जा सकता है: +कॉन्फ़िगरेशन फ़ाइल में निम्नलिखित को शामिल करके नोड्स को स्पष्ट रूप से क्वेरी नोड होने के लिए कॉन्फ़िगर किया जा सकता है: ```toml [general] query = "" ``` -कोई भी नोड जिसका --नोड-आईडी रेगुलर एक्सप्रेशन से मेल खाता है, केवल प्रश्नों का उत्तर देने के लिए सेट किया जाएगा। +कोई भी नोड जिसका --node-id रेगुलर एक्सप्रेशन से मेल खाता है, केवल प्रश्नों का जवाब देने के लिए सेट किया जाएगा। -#### शार्डिंग के माध्यम से डेटाबेस स्केलिंग +#### शार्डिंग के माध्यम से डाटाबेस स्केलिंग -अधिकांश उपयोग के मामलों के लिए, एक एकल पोस्टग्रेज़ डेटाबेस ग्राफ़-नोड उदाहरण का समर्थन करने के लिए पर्याप्त है। जब एक ग्राफ-नोड उदाहरण एकल पोस्टग्रेज डेटाबेस से आगे निकल जाता है, तो ग्राफ-नोड के डेटा के भंडारण को कई पोस्टग्रेज डेटाबेस में विभाजित करना संभव है। सभी डेटाबेस मिलकर ग्राफ़-नोड इंस्टेंस का स्टोर बनाते हैं। प्रत्येक व्यक्तिगत डेटाबेस को शार्ड कहा जाता है। +अधिकांश उपयोग के मामलों के लिए, एक एकल पोस्टग्रेज डेटाबेस ग्राफ-नोड उदाहरण का समर्थन करने के लिए पर्याप्त है। जब एक ग्राफ-नोड उदाहरण एकल पोस्टग्रेज डेटाबेस से आगे निकल जाता है, तो ग्राफ-नोड के डेटा के भंडारण को कई पोस्टग्रेज डेटाबेस में विभाजित करना संभव है। सभी डेटाबेस मिलकर ग्राफ़-नोड इंस्टेंस का स्टोर बनाते हैं। प्रत्येक व्यक्तिगत डेटाबेस को शार्ड कहा जाता है। कई डेटाबेस में सबग्राफ परिनियोजन को विभाजित करने के लिए शार्ड्स का उपयोग किया जा सकता है, और डेटाबेस में क्वेरी लोड को फैलाने के लिए प्रतिकृतियों का उपयोग करने के लिए भी उपयोग किया जा सकता है। इसमें उपलब्ध डेटाबेस कनेक्शनों की संख्या को कॉन्फ़िगर करना शामिल है, प्रत्येक ग्राफ-नोड को प्रत्येक डेटाबेस के लिए अपने कनेक्शन पूल में रखना चाहिए, जो तेजी से महत्वपूर्ण हो जाता है क्योंकि अधिक उप-अनुच्छेदों को अनुक्रमित किया जा रहा है। शेयरिंग तब उपयोगी हो जाती है जब आपका मौजूदा डेटाबेस ग्राफ़ नोड द्वारा डाले गए भार के साथ नहीं रह सकता है, और जब डेटाबेस का आकार बढ़ाना संभव नहीं होता है। -> आमतौर पर शार्ड से शुरुआत करने से पहले एक डेटाबेस को जितना संभव हो उतना बड़ा बनाना बेहतर होता है। एक अपवाद वह है जहां क्वेरी ट्रैफ़िक सबग्राफ के बीच बहुत असमान रूप से विभाजित होता है; उन स्थितियों में यह नाटकीय रूप से मदद कर सकता है यदि उच्च-वॉल्यूम सबग्राफ को एक शार्ड में रखा जाता है और बाकी सभी चीज़ों को दूसरे में रखा जाता है क्योंकि उस सेटअप से यह अधिक संभावना हो जाती है कि उच्च-वॉल्यूम सबग्राफ के लिए डेटा डीबी-आंतरिक कैश में रहता है और नहीं उस डेटा से प्रतिस्थापित करें जिसकी कम-वॉल्यूम सबग्राफ से उतनी आवश्यकता नहीं है। +> शार्क से शुरू करने से पहले, आम तौर पर जितना संभव हो उतना बड़ा डेटाबेस बनाना बेहतर होता है। एक अपवाद वह है जहां क्वेरी ट्रैफ़िक सबग्राफ के बीच असमान रूप से विभाजित होता है; उन परिस्थितियों में यह नाटकीय रूप से मदद कर सकता है यदि उच्च मात्रा वाले सबग्राफ को एक शार्ड में रखा जाता है और बाकी सब कुछ दूसरे में रखा जाता है क्योंकि यह सेटअप अधिक संभावना बनाता है कि उच्च मात्रा वाले सबग्राफ के लिए डेटा डीबी-आंतरिक कैश में रहता है और नहीं करता है उन डेटा से प्रतिस्थापित हो जाएं जिनकी कम मात्रा वाले सबग्राफ से उतनी आवश्यकता नहीं है। -कनेक्शन कॉन्फ़िगर करने के संदर्भ में, postgresql.conf में max*connections से शुरू करें और इसे 400 (या शायद 200) पर सेट करें और स्टोर*कनेक्शन*वाइट*टाइम*एमएस और स्टोर*कनेक्शन*चेकआउट*काउंट प्रोमेथियस मेट्रिक्स देखें। ध्यान देने योग्य प्रतीक्षा समय (5 एमएस से ऊपर कुछ भी) एक संकेत है कि बहुत कम कनेक्शन उपलब्ध हैं; डेटाबेस के बहुत व्यस्त होने (जैसे उच्च सीपीयू लोड) के कारण उच्च प्रतीक्षा समय भी होगा। हालाँकि यदि डेटाबेस अन्यथा स्थिर लगता है, तो उच्च प्रतीक्षा समय कनेक्शन की संख्या बढ़ाने की आवश्यकता का संकेत देता है। कॉन्फ़िगरेशन में, प्रत्येक ग्राफ़-नोड इंस्टेंस कितने कनेक्शन का उपयोग कर सकता है यह एक ऊपरी सीमा है, और ग्राफ़ नोड कनेक्शन को खुला नहीं रखेगा यदि उसे उनकी आवश्यकता नहीं है। +कनेक्शन कॉन्फ़िगर करने के मामले में, postgresql.conf में max_connections से 400 (या शायद 200) पर सेट करें और store_connection_wait_time_ms और store_connection_checkout_count प्रोमेथियस मेट्रिक्स देखें। ध्यान देने योग्य प्रतीक्षा समय (5ms से ऊपर कुछ भी) एक संकेत है कि बहुत कम कनेक्शन उपलब्ध हैं; उच्च प्रतीक्षा समय डेटाबेस के बहुत व्यस्त होने (जैसे उच्च CPU लोड) के कारण भी होगा। हालाँकि यदि डेटाबेस अन्यथा स्थिर लगता है, तो उच्च प्रतीक्षा समय कनेक्शन की संख्या बढ़ाने की आवश्यकता का संकेत देता है। कॉन्फ़िगरेशन में, प्रत्येक ग्राफ़-नोड उदाहरण कितने कनेक्शन का उपयोग कर सकता है, यह एक ऊपरी सीमा है, और ग्राफ़ नोड कनेक्शन को खुला नहीं रखेगा यदि इसकी आवश्यकता नहीं है। स्टोर कॉन्फ़िगरेशन के बारे में [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) अधिक पढ़ें। #### समर्पित ब्लॉक अंतर्ग्रहण -यदि कई नोड्स कॉन्फ़िगर किए गए हैं, तो एक नोड को निर्दिष्ट करना आवश्यक होगा जो नए ब्लॉकों के अंतर्ग्रहण के लिए जिम्मेदार है, ताकि सभी कॉन्फ़िगर किए गए इंडेक्स नोड्स चेन हेड पर मतदान न करें। यह `चेन` नेमस्पेस के भाग के रूप में किया जाता है, जो ब्लॉक अंतर्ग्रहण के लिए उपयोग किए जाने वाले `node_id` को निर्दिष्ट करता है: +यदि कई नोड्स कॉन्फ़िगर किए गए हैं, तो एक नोड को निर्दिष्ट करना आवश्यक होगा जो नए ब्लॉकों के अंतर्ग्रहण के लिए जिम्मेदार है, ताकि सभी कॉन्फ़िगर किए गए इंडेक्स नोड्स चेन हेड को पोल न करें। यह `चेन्स` नामस्थान के हिस्से के रूप में किया जाता है, ब्लॉक अंतर्ग्रहण के लिए उपयोग किए जाने वाले `node_id` को निर्दिष्ट करता है: ```toml [chains] @@ -188,13 +188,13 @@ ingestor = "block_ingestor_node" #### कई नेटवर्क का समर्थन करना -ग्राफ़ प्रोटोकॉल अनुक्रमण पुरस्कारों के लिए समर्थित नेटवर्कों की संख्या बढ़ा रहा है, और असमर्थित नेटवर्कों को अनुक्रमित करने वाले कई सबग्राफ मौजूद हैं जिन्हें एक अनुक्रमणिका संसाधित करना चाहेगी। `config.toml` फ़ाइल निम्न के अभिव्यंजक और लचीले कॉन्फ़िगरेशन की अनुमति देती है: +ग्राफ़ प्रोटोकॉल इंडेक्सिंग पुरस्कारों के लिए समर्थित नेटवर्क की संख्या बढ़ा रहा है, और ऐसे कई सबग्राफ मौजूद हैं जो असमर्थित नेटवर्क को इंडेक्स करते हैं जिन्हें एक इंडेक्सर प्रोसेस करना चाहता है। `config.toml` फ़ाइल अभिव्यक्तिपूर्ण और लचीले कॉन्फ़िगरेशन की अनुमति देती है: - एकाधिक नेटवर्क - प्रति नेटवर्क एकाधिक प्रदाता (यह प्रदाताओं में लोड को विभाजित करने की अनुमति दे सकता है, और पूर्ण नोड्स के साथ-साथ आर्काइव नोड्स के कॉन्फ़िगरेशन की अनुमति भी दे सकता है, यदि कोई वर्कलोड अनुमति देता है तो ग्राफ नोड सस्ता प्रदाताओं को प्राथमिकता देता है)। - अतिरिक्त प्रदाता विवरण, जैसे सुविधाएँ, प्रमाणीकरण और प्रदाता का प्रकार (प्रायोगिक फ़ायरहोज़ समर्थन के लिए) -`[chains]` अनुभाग एथेरियम प्रदाताओं को नियंत्रित करता है जिनसे ग्राफ-नोड जुड़ता है, और जहां प्रत्येक श्रृंखला के लिए ब्लॉक और अन्य मेटाडेटा संग्रहीत होते हैं। निम्नलिखित उदाहरण दो श्रृंखलाओं, मेननेट और कोवन को कॉन्फ़िगर करता है, जहां मेननेट के लिए ब्लॉक वीआईपी शार्ड में संग्रहीत होते हैं और कोवन के लिए ब्लॉक प्राथमिक शार्ड में संग्रहीत होते हैं। मेननेट श्रृंखला दो अलग-अलग प्रदाताओं का उपयोग कर सकती है, जबकि कोवन में केवल एक प्रदाता है। +`[chains]` अनुभाग एथेरियम प्रदाताओं को नियंत्रित करता है जो ग्राफ़-नोड से कनेक्ट होते हैं, और जहां प्रत्येक श्रृंखला के लिए ब्लॉक और अन्य मेटाडेटा संग्रहीत होते हैं। निम्न उदाहरण दो श्रृंखलाओं, मेननेट और कोवन को कॉन्फ़िगर करता है, जहां मेननेट के ब्लॉक वीआईपी शार्ड में संग्रहीत होते हैं और कोवन के लिए ब्लॉक प्राथमिक शार्ड में संग्रहीत होते हैं। मेननेट श्रृंखला दो अलग-अलग प्रदाताओं का उपयोग कर सकती है, जबकि कोवन में केवल एक प्रदाता होता है। ```toml [chains] @@ -210,65 +210,65 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -प्रदाता कॉन्फ़िगरेशन के बारे में अधिक पढ़ें [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers)। +प्रदाता कॉन्फ़िगरेशन के बारे में [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) अधिक पढ़ें। ### Environment variables -ग्राफ़ नोड पर्यावरण चर की एक श्रृंखला का समर्थन करता है जो सुविधाओं को सक्षम कर सकता है, या ग्राफ़ नोड व्यवहार को बदल सकता है। इन्हें [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) प्रलेखित किया गया है। +ग्राफ़ नोड पर्यावरण चर की एक श्रृंखला का समर्थन करता है जो सुविधाओं को सक्षम कर सकता है, या ग्राफ़ नोड व्यवहार को बदल सकता है। ये [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) प्रलेखित हैं। -### निरंतर तैनाती +### Continuous deployment जो उपयोगकर्ता उन्नत कॉन्फ़िगरेशन के साथ एक स्केल्ड इंडेक्सिंग सेटअप का संचालन कर रहे हैं, वे कुबेरनेट्स के साथ अपने ग्राफ़ नोड्स को प्रबंधित करने से लाभान्वित हो सकते हैं। -- इंडेक्सर रिपॉजिटरी में एक [उदाहरण Kubernetes संदर्भ](https://github.com/graphprotocol/indexer/tree/main/k8s) है +- इंडेक्सर रिपॉजिटरी में [उदाहरण Kubernetes संदर्भ](https://github.com/graphprotocol/indexer/tree/main/k8s) है - [लॉन्चपैड](https://docs.graphops.xyz/launchpad/intro) ग्राफऑप्स द्वारा संचालित कुबेरनेट्स पर ग्राफ प्रोटोकॉल इंडेक्सर चलाने के लिए एक टूलकिट है। यह ग्राफ नोड परिनियोजन का प्रबंधन करने के लिए हेल्म चार्ट और सीएलआई का एक सेट प्रदान करता है। -### ग्राफ़ नोड का प्रबंधन +### ग्राफ नोड का प्रबंधन -एक चालू ग्राफ़ नोड (या ग्राफ़ नोड्स!) को देखते हुए, चुनौती उन नोड्स में तैनात सबग्राफ़ को प्रबंधित करने की है। ग्राफ़ नोड सबग्राफ़ को प्रबंधित करने में सहायता के लिए कई प्रकार के टूल पेश करता है। +चल रहे ग्राफ़ नोड (या ग्राफ़ नोड्स!) को देखते हुए, चुनौती उन नोड्स में तैनात सबग्राफ को प्रबंधित करने की है। ग्राफ़ नोड उप-अनुच्छेदों को प्रबंधित करने में मदद करने के लिए उपकरणों की एक श्रृंखला पेश करता है। #### लॉगिंग -ग्राफ़ नोड के लॉग ग्राफ़ नोड और विशिष्ट सबग्राफ के डिबगिंग और अनुकूलन के लिए उपयोगी जानकारी प्रदान कर सकते हैं। ग्राफ़ नोड निम्नलिखित स्तरों के साथ `GRAPH_LOG` पर्यावरण चर के माध्यम से विभिन्न लॉग स्तरों का समर्थन करता है: त्रुटि, चेतावनी, जानकारी, डीबग या ट्रेस। +ग्राफ़ नोड के लॉग ग्राफ़ नोड और विशिष्ट सबग्राफ के डिबगिंग और अनुकूलन के लिए उपयोगी जानकारी प्रदान कर सकते हैं। ग्राफ़ नोड निम्न स्तरों के साथ `GRAPH_LOG` पर्यावरण चर के माध्यम से विभिन्न लॉग स्तरों का समर्थन करता है: त्रुटि, चेतावनी, सूचना, डीबग या ट्रेस। इसके अलावा `GRAPH_LOG_QUERY_TIMING` को `gql` पर सेट करना इस बारे में अधिक विवरण प्रदान करता है कि ग्राफ़क्यूएल क्वेरीज़ कैसे चल रही हैं (हालांकि यह बड़ी मात्रा में लॉग उत्पन्न करेगा)। -#### Monitoring & alerting +#### निगरानी & चेतावनी ग्राफ़ नोड डिफ़ॉल्ट रूप से 8040 पोर्ट पर प्रोमेथियस एंडपॉइंट के माध्यम से मेट्रिक्स प्रदान करता है। इन मेट्रिक्स की कल्पना करने के लिए ग्राफाना का उपयोग किया जा सकता है। -इंडेक्सर रिपॉजिटरी एक [उदाहरण Grafana कॉन्फ़िगरेशन](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) प्रदान करता है। +इंडेक्सर रिपॉजिटरी [ग्राफाना कॉन्फ़िगरेशन का उदाहरण](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) प्रदान करता है। -#### ग्राफ़मैन +#### Graphman `ग्राफ़मैन` ग्राफ़ नोड के लिए एक रखरखाव टूल है, जो विभिन्न दैनिक और असाधारण कार्यों के निदान और समाधान में मदद करता है। ग्राफ़मैन कमांड आधिकारिक कंटेनरों में शामिल है, और आप इसे चलाने के लिए अपने ग्राफ़-नोड कंटेनर में docker exec कर सकते हैं। इसके लिए `config.toml` फ़ाइल की आवश्यकता होती है। -`ग्राफमैन` कमांड का पूरा दस्तावेज़ ग्राफ़ नोड रिपॉजिटरी में उपलब्ध है। ग्राफ़ नोड `/docs` में \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) देखें +`ग्राफ़मैन` कमांड का पूरा दस्तावेज़ ग्राफ़ नोड रिपॉजिटरी में उपलब्ध है। देखें \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) ग्राफ़ नोड `/docs` में -### सबग्राफ के साथ कार्य करना +### सबग्राफ के साथ काम करना #### अनुक्रमण स्थिति एपीआई -डिफ़ॉल्ट रूप से पोर्ट 8030/ग्राफक्यूएल पर उपलब्ध, इंडेक्सिंग स्टेटस एपीआई विभिन्न सबग्राफ के लिए इंडेक्सिंग स्थिति की जांच करने, इंडेक्सिंग के प्रमाणों की जांच करने, सबग्राफ सुविधाओं का निरीक्षण करने और बहुत कुछ के लिए तरीकों की एक श्रृंखला को उजागर करता है। +डिफ़ॉल्ट रूप से पोर्ट 8030/ग्राफ़िकल पर उपलब्ध, इंडेक्सिंग स्टेटस एपीआई विभिन्न सबग्राफ के लिए इंडेक्सिंग स्टेटस की जाँच करने, इंडेक्सिंग के प्रूफ़ की जाँच करने, सबग्राफ़ सुविधाओं का निरीक्षण करने आदि के लिए कई तरीकों को उजागर करता है। -पूर्ण स्कीमा [यहां](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध है। +पूरा स्कीमा [यहां](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध है। #### अनुक्रमण प्रदर्शन अनुक्रमण प्रक्रिया के तीन अलग-अलग भाग हैं: -- प्रदाता से रुचि की घटनाएँ प्राप्त की जा रही हैं +- प्रदाता से रुचि के इवेंट लाए जा रहे हैं - उपयुक्त संचालकों के साथ घटनाओं को संसाधित करना (इसमें राज्य के लिए श्रृंखला को कॉल करना और स्टोर से डेटा प्राप्त करना शामिल हो सकता है) - परिणामी डेटा को स्टोर पर लिखना -ये चरण पाइपलाइनयुक्त हैं (अर्थात इन्हें समानांतर में निष्पादित किया जा सकता है), लेकिन ये एक-दूसरे पर निर्भर हैं। जहां सबग्राफ इंडेक्स में धीमे होते हैं, अंतर्निहित कारण विशिष्ट सबग्राफ पर निर्भर करेगा। +इन चरणों को पाइपलाइन किया गया है (अर्थात इन्हें समानांतर में निष्पादित किया जा सकता है), लेकिन वे एक दूसरे पर निर्भर हैं। जहां सबग्राफ इंडेक्स के लिए धीमे होते हैं, अंतर्निहित कारण विशिष्ट सबग्राफ पर निर्भर करेगा। -अनुक्रमण धीमेपन के सामान्य कारण: +अनुक्रमण धीमा होने के सामान्य कारण: -- श्रृंखला से प्रासंगिक घटनाओं को खोजने में लगने वाला समय (`trace_filter` पर निर्भरता को देखते हुए, विशेष रूप से कॉल हैंडलर धीमे हो सकते हैं) -- हैंडलर के हिस्से के रूप में बड़ी संख्या में `eth_calls` बनाना +- श्रृंखला से प्रासंगिक घटनाओं को खोजने में लगने वाला समय (विशेष रूप से कॉल हैंडलर धीमा हो सकता है, `trace_filter` पर निर्भरता को देखते हुए) +- हैंडलर्स के हिस्से के रूप में बड़ी संख्या में `eth_calls` बनाना - निष्पादन के दौरान बड़ी मात्रा में स्टोर इंटरैक्शन - स्टोर में सहेजने के लिए बड़ी मात्रा में डेटा - संसाधित करने के लिए बड़ी संख्या में ईवेंट @@ -276,26 +276,26 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] - प्रदाता स्वयं चेन हेड के पीछे पड़ रहा है - प्रदाता से चेन हेड पर नई रसीदें प्राप्त करने में धीमापन -सबग्राफ इंडेक्सिंग मेट्रिक्स इंडेक्सिंग धीमेपन के मूल कारण का निदान करने में मदद कर सकता है। कुछ मामलों में, समस्या सबग्राफ के साथ ही है, लेकिन अन्य में, बेहतर नेटवर्क प्रदाता, कम डेटाबेस विवाद और अन्य कॉन्फ़िगरेशन सुधार अनुक्रमण प्रदर्शन में उल्लेखनीय सुधार कर सकते हैं। +सबग्राफ इंडेक्सिंग मेट्रिक्स इंडेक्सिंग धीमेपन के मूल कारण का निदान करने में मदद कर सकते हैं। कुछ मामलों में, समस्या सबग्राफ में ही निहित है, लेकिन अन्य में, बेहतर नेटवर्क प्रदाता, कम डेटाबेस विवाद और अन्य कॉन्फ़िगरेशन सुधार इंडेक्सिंग प्रदर्शन में स्पष्ट रूप से सुधार कर सकते हैं। #### विफल सबग्राफ -अनुक्रमण के दौरान सबग्राफ विफल हो सकते हैं, यदि उन्हें ऐसे डेटा का सामना करना पड़ता है जो अप्रत्याशित है, कुछ घटक अपेक्षा के अनुरूप काम नहीं कर रहे हैं, या यदि ईवेंट हैंडलर या कॉन्फ़िगरेशन में कुछ बग है। विफलता के दो सामान्य प्रकार हैं: +इंडेक्सिंग सबग्राफ के दौरान विफल हो सकता है, यदि वे अप्रत्याशित डेटा का सामना करते हैं, कुछ घटक अपेक्षित रूप से काम नहीं कर रहे हैं, या यदि ईवेंट हैंडलर या कॉन्फ़िगरेशन में कुछ बग है। विफलता के दो सामान्य प्रकार हैं: -- नियतात्मक विफलताएँ: ये ऐसी विफलताएँ हैं जिनका समाधान पुनः प्रयास से नहीं किया जा सकेगा +- नियतात्मक विफलताएँ: ये ऐसी विफलताएँ हैं जिन्हें पुनर्प्रयास से हल नहीं किया जा सकता है - गैर-नियतात्मक विफलताएँ: ये प्रदाता के साथ समस्याओं या कुछ अप्रत्याशित ग्राफ़ नोड त्रुटि के कारण हो सकती हैं। जब एक गैर-नियतात्मक विफलता होती है, तो ग्राफ़ नोड समय के साथ पीछे हटते हुए विफल हैंडलर को फिर से प्रयास करेगा। -कुछ मामलों में विफलता को अनुक्रमणिका द्वारा हल किया जा सकता है (उदाहरण के लिए यदि त्रुटि सही प्रकार के प्रदाता नहीं होने का परिणाम है, तो आवश्यक प्रदाता जोड़ने से अनुक्रमणिका जारी रहेगी)। हालाँकि अन्य में, सबग्राफ कोड में बदलाव की आवश्यकता होती है। +कुछ मामलों में इंडेक्सर द्वारा विफलता को हल किया जा सकता है (उदाहरण के लिए यदि त्रुटि सही प्रकार का प्रदाता नहीं होने का परिणाम है, तो आवश्यक प्रदाता जोड़ने से अनुक्रमण जारी रहेगा)। हालाँकि अन्य में, सबग्राफ कोड में बदलाव की आवश्यकता होती है। -> नियतात्मक विफलताओं को "अंतिम" माना जाता है, जिसमें विफल ब्लॉक के लिए अनुक्रमण का प्रमाण उत्पन्न होता है, जबकि गैर-नियतात्मक विफलताएं नहीं होती हैं, क्योंकि सबग्राफ "अनफेल" हो सकता है और अनुक्रमण जारी रख सकता है। कुछ मामलों में, गैर-नियतात्मक लेबल गलत है, और सबग्राफ कभी भी त्रुटि को दूर नहीं करेगा; ऐसी विफलताओं को ग्राफ़ नोड रिपॉजिटरी पर मुद्दों के रूप में रिपोर्ट किया जाना चाहिए। +> नियतात्मक विफलताओं को "अंतिम" माना जाता है, असफल ब्लॉक के लिए उत्पन्न अनुक्रमण के प्रमाण के साथ, जबकि गैर-निर्धारक विफलताओं को नहीं माना जाता है, क्योंकि सबग्राफ "असफल" और अनुक्रमण जारी रखने का प्रबंधन कर सकता है। कुछ मामलों में, गैर-नियतात्मक लेबल गलत है, और सबग्राफ कभी भी त्रुटि को दूर नहीं करेगा; ऐसी विफलताओं को ग्राफ़ नोड रिपॉजिटरी पर मुद्दों के रूप में रिपोर्ट किया जाना चाहिए। #### कैश को ब्लॉक और कॉल करें -प्रदाता से रीफ़ेचिंग को बचाने के लिए ग्राफ़ नोड स्टोर में कुछ डेटा को कैश करता है। ब्लॉक कैश किए जाते हैं, जैसे `eth_calls` के परिणाम होते हैं (बाद वाले को एक विशिष्ट ब्लॉक के रूप में कैश किया जाता है)। यह कैशिंग थोड़े बदले हुए सबग्राफ के "पुन: समन्वयन" के दौरान अनुक्रमण गति को नाटकीय रूप से बढ़ा सकती है। +प्रदाता से रीफ़ेचिंग को बचाने के लिए ग्राफ़ नोड स्टोर में कुछ डेटा को कैश करता है। ब्लॉक को कैश किया जाता है, जैसा कि `eth_calls` के परिणाम होते हैं (बाद वाले को एक विशिष्ट ब्लॉक के रूप में कैश किया जाता है)। यह कैशिंग थोड़े बदले हुए सबग्राफ के "रीसिंकिंग" के दौरान अनुक्रमण गति को नाटकीय रूप से बढ़ा सकती है। हालाँकि, कुछ उदाहरणों में, यदि एथेरियम नोड ने कुछ अवधि के लिए गलत डेटा प्रदान किया है, तो यह कैश में अपना रास्ता बना सकता है, जिससे गलत डेटा या विफल सबग्राफ हो सकते हैं। इस मामले में इंडेक्सर जहरीली कैश को साफ करने के लिए `ग्राफमैन` का उपयोग कर सकते हैं, और फिर प्रभावित सबग्राफ को रिवाइंड कर सकते हैं, जो तब (उम्मीद है) स्वस्थ प्रदाता से ताजा डेटा प्राप्त करेगा। -यदि ब्लॉक कैश असंगतता का संदेह है, जैसे कि टीएक्स रसीद गुम होने की घटना: +यदि एक ब्लॉक कैश असंगतता का संदेह है, जैसे कि tx रसीद गुम घटना: 1. `ग्राफमैन श्रृंखला सूची` श्रृंखला का नाम खोजने के लिए। 2. `ग्राफमैन चेन चेक-ब्लॉक बाई-नंबर ` यह जांच करेगा कि क्या कैश्ड ब्लॉक प्रदाता से मेल खाता है, और यदि ऐसा नहीं होता है तो ब्लॉक को कैश से हटा देता है। @@ -304,7 +304,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] #### समस्याओं और त्रुटियों को क्वेरी करना -एक बार सबग्राफ को अनुक्रमित कर दिए जाने के बाद, इंडेक्सर्स सबग्राफ के समर्पित क्वेरी एंडपॉइंट के माध्यम से प्रश्नों की सेवा करने की उम्मीद कर सकते हैं। यदि इंडेक्सर महत्वपूर्ण क्वेरी वॉल्यूम प्रदान करने की उम्मीद कर रहा है, तो एक समर्पित क्वेरी नोड की सिफारिश की जाती है, और बहुत अधिक क्वेरी वॉल्यूम के मामले में, इंडेक्सर प्रतिकृति शार्ड को कॉन्फ़िगर करना चाह सकते हैं ताकि क्वेरीज़ इंडेक्सिंग प्रक्रिया को प्रभावित न करें। +एक बार सबग्राफ अनुक्रमित हो जाने के बाद, इंडेक्सर्स सबग्राफ के समर्पित क्वेरी एंडपॉइंट के माध्यम से प्रश्नों की सेवा करने की उम्मीद कर सकते हैं। यदि इंडेक्सर महत्वपूर्ण क्वेरी वॉल्यूम की सेवा करने की उम्मीद कर रहा है, तो एक समर्पित क्वेरी नोड की सिफारिश की जाती है, और बहुत अधिक क्वेरी वॉल्यूम के मामले में, इंडेक्सर्स रेप्लिका शार्ड्स को कॉन्फ़िगर करना चाह सकते हैं ताकि क्वेरी इंडेक्सिंग प्रक्रिया को प्रभावित न करें। हालाँकि, एक समर्पित क्वेरी नोड और प्रतिकृतियों के साथ भी, कुछ प्रश्नों को निष्पादित करने में लंबा समय लग सकता है, और कुछ मामलों में मेमोरी उपयोग में वृद्धि होती है और अन्य उपयोगकर्ताओं के लिए क्वेरी समय को नकारात्मक रूप से प्रभावित करती है। @@ -312,34 +312,34 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ##### क्वेरी कैशिंग -ग्राफ़ नोड डिफ़ॉल्ट रूप से ग्राफक्यूएल क्वेरीज़ को कैश करता है, जो डेटाबेस लोड को काफी कम कर सकता है। इसे आगे `GRAPH_QUERY_CACHE_BLOCKS` और `GRAPH_QUERY_CACHE_MAX_MEM` सेटिंग्स के साथ कॉन्फ़िगर किया जा सकता है - और पढ़ें [यहां](https://github.com/graphprotocol/graph-node/blob/master /docs/environment-variables.md#graphql-caching)। +ग्राफ़ नोड डिफ़ॉल्ट रूप से ग्राफ़क्यूएल प्रश्नों को कैश करता है, जो डेटाबेस लोड को काफी कम कर सकता है। इसे `GRAPH_QUERY_CACHE_BLOCKS` और `GRAPH_QUERY_CACHE_MAX_MEM` सेटिंग्स के साथ और अधिक कॉन्फ़िगर किया जा सकता है - अधिक [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) पढ़ें। ##### प्रश्नों का विश्लेषण -समस्याग्रस्त प्रश्न अक्सर दो तरीकों में से एक में सामने आते हैं। कुछ मामलों में, उपयोगकर्ता स्वयं रिपोर्ट करते हैं कि दी गई क्वेरी धीमी है। उस स्थिति में चुनौती धीमेपन के कारण का निदान करना है - चाहे वह एक सामान्य मुद्दा हो, या उस सबग्राफ या क्वेरी के लिए विशिष्ट हो। और फिर यदि संभव हो तो निश्चित रूप से इसका समाधान करें। +समस्याग्रस्त प्रश्न अक्सर दो तरीकों में से एक में सामने आते हैं। कुछ मामलों में, उपयोगकर्ता स्वयं रिपोर्ट करते हैं कि दी गई क्वेरी धीमी है। उस स्थिति में धीमी गति के कारण का निदान करना चुनौती है - चाहे वह एक सामान्य समस्या हो, या उस सबग्राफ या क्वेरी के लिए विशिष्ट हो। और यदि संभव हो तो निश्चित रूप से इसे हल करने के लिए। अन्य मामलों में, क्वेरी नोड पर ट्रिगर उच्च मेमोरी उपयोग हो सकता है, इस मामले में सबसे पहले समस्या उत्पन्न करने वाली क्वेरी की पहचान करना चुनौती है। इंडेक्सर [qlog](https://github.com/graphprotocol/qlog/) का इस्तेमाल ग्राफ़ नोड के क्वेरी लॉग को प्रोसेस करने और सारांशित करने के लिए कर सकते हैं। `GRAPH_LOG_QUERY_TIMING` को धीमी क्वेरी को पहचानने और डीबग करने में सहायता के लिए भी सक्षम किया जा सकता है। -धीमी क्वेरी को देखते हुए, इंडेक्सर्स के पास कुछ विकल्प होते हैं। निःसंदेह वे समस्याग्रस्त क्वेरी भेजने की लागत में उल्लेखनीय वृद्धि करने के लिए अपने लागत मॉडल को बदल सकते हैं। इसके परिणामस्वरूप उस क्वेरी की आवृत्ति में कमी आ सकती है. हालाँकि यह अक्सर समस्या के मूल कारण का समाधान नहीं करता है। +धीमी क्वेरी को देखते हुए, इंडेक्सर्स के पास कुछ विकल्प होते हैं। निस्संदेह वे अपने लागत मॉडल को बदल सकते हैं, समस्याग्रस्त क्वेरी भेजने की लागत में काफी वृद्धि कर सकते हैं। इसके परिणामस्वरूप उस क्वेरी की आवृत्ति में कमी हो सकती है। हालाँकि यह अक्सर समस्या के मूल कारण को हल नहीं करता है। -##### खाता-जैसा अनुकूलन +##### खाता जैसा अनुकूलन -डेटाबेस तालिकाएँ जो संस्थाओं को संग्रहीत करती हैं, आम तौर पर दो किस्मों में आती हैं: 'लेन-देन-जैसी', जहाँ इकाइयाँ, एक बार बनाई जाने के बाद, कभी अपडेट नहीं होती हैं, यानी, वे वित्तीय लेनदेन की सूची के समान कुछ संग्रहीत करती हैं, और 'खाता-जैसी' जहाँ इकाइयाँ होती हैं बहुत बार अपडेट किए जाते हैं, यानी, वे वित्तीय खातों की तरह कुछ संग्रहीत करते हैं जो हर बार लेनदेन दर्ज होने पर संशोधित हो जाते हैं। खाता-जैसी तालिकाओं की विशेषता यह है कि उनमें बड़ी संख्या में इकाई संस्करण होते हैं, लेकिन अपेक्षाकृत कुछ अलग इकाइयाँ होती हैं। अक्सर, ऐसी तालिकाओं में अलग-अलग इकाइयों की संख्या कुल पंक्तियों की संख्या (इकाई संस्करण) का 1% होती है +डेटाबेस तालिकाएँ जो संस्थाओं को स्टोर करती हैं, आम तौर पर दो किस्मों में आती हैं: 'लेन-देन-जैसी', जहाँ संस्थाएँ, एक बार बनने के बाद, कभी भी अपडेट नहीं होती हैं, यानी, वे वित्तीय लेनदेन की सूची के समान कुछ स्टोर करती हैं, और 'खाता-जैसा' जहाँ संस्थाएँ बहुत बार अपडेट किए जाते हैं, यानी, वे वित्तीय खातों की तरह कुछ स्टोर करते हैं जो हर बार लेनदेन रिकॉर्ड होने पर संशोधित हो जाते हैं। खाता-जैसी तालिकाओं की विशेषता इस तथ्य से होती है कि उनमें बड़ी संख्या में इकाई संस्करण होते हैं, लेकिन अपेक्षाकृत कुछ विशिष्ट इकाइयाँ होती हैं। अक्सर, ऐसी तालिकाओं में अलग-अलग संस्थाओं की संख्या पंक्तियों की कुल संख्या (इकाई संस्करण) का 1% होती है -खाता-जैसी तालिकाओं के लिए, `ग्राफ-नोड` क्वेरी उत्पन्न कर सकता है जो इस विवरण का लाभ उठाता है कि पोस्टग्रेज परिवर्तन की इतनी उच्च दर के साथ डेटा को कैसे संग्रहीत करता है, अर्थात् हाल के ब्लॉक के सभी संस्करण इसमें हैं ऐसी तालिका के लिए समग्र भंडारण का एक छोटा उपधारा। +खाता-जैसी तालिकाओं के लिए, `ग्राफ़-नोड` प्रश्नों को उत्पन्न कर सकता है जो इस बात का विवरण देता है कि कैसे पोस्टग्रेज डेटा को इतनी उच्च दर के परिवर्तन के साथ संग्रहीत करता है, अर्थात् हाल के ब्लॉक के सभी संस्करण अंदर हैं ऐसी तालिका के लिए समग्र संग्रहण का एक छोटा उपखंड। -कमांड `ग्राफमैन आँकड़े दिखाते हैं; दिखाता है, परिनियोजन में प्रत्येक इकाई प्रकार/तालिका के लिए, कितनी अलग-अलग इकाइयाँ हैं, और प्रत्येक तालिका में कितने इकाई संस्करण हैं। वह डेटा पोस्टग्रेज़-आंतरिक अनुमानों पर आधारित है, और इसलिए आवश्यक रूप से सटीक नहीं है, और परिमाण के क्रम से बंद हो सकता है। `इकाई` कॉलम में एक `-1` का मतलब है कि पोस्टग्रेज का मानना है कि सभी पंक्तियों में एक अलग इकाई होती है। +आदेश `ग्राफ़मैन आँकड़े दिखाता है, परिनियोजन में प्रत्येक इकाई प्रकार/तालिका के लिए, कितने अलग निकाय हैं, और प्रत्येक तालिका में कितने इकाई संस्करण हैं। वह डेटा पोस्टग्रेज-आंतरिक अनुमानों पर आधारित है, और इसलिए अनिवार्य रूप से सटीक है, और परिमाण के क्रम से बंद हो सकता है। `-1` `entities` कॉलम में इसका मतलब है कि Postgres का मानना है कि सभी पंक्तियों में एक अलग इकाई होती है। -सामान्य तौर पर, ऐसी तालिकाएँ जहाँ अलग-अलग संस्थाओं की संख्या पंक्तियों/इकाई संस्करणों की कुल संख्या के 1% से कम होती है, खाता-जैसे अनुकूलन के लिए अच्छे उम्मीदवार होते हैं। जब `ग्राफमैन आँकड़े दिखाते हैं` का आउटपुट इंगित करता है कि एक तालिका इस अनुकूलन से लाभान्वित हो सकती है, तो `ग्राफमैन आँकड़े चलाने से
` तालिका की पूरी गिनती करेगा - जो धीमी हो सकती है, लेकिन समग्र इकाई संस्करणों के लिए अलग-अलग इकाइयों के अनुपात का सटीक माप देती है। +सामान्य तौर पर, तालिकाएँ जहाँ अलग-अलग संस्थाओं की संख्या पंक्तियों / इकाई संस्करणों की कुल संख्या के 1% से कम होती है, वे खाता-जैसे अनुकूलन के लिए अच्छे उम्मीदवार होते हैं। जब `ग्राफ़मैन आँकड़े दिखाते हैं` का आउटपुट इंगित करता है कि एक तालिका इस अनुकूलन से लाभान्वित हो सकती है, चल रहे `ग्राफ़मैन आँकड़े <टेबल>` तालिका की पूरी गणना करेगा - जो धीमा हो सकता है, लेकिन अलग-अलग इकाइयों के समग्र इकाई संस्करणों के अनुपात का एक सटीक माप देता है। -एक बार एक तालिका को खाता-जैसी निर्धारित करने के बाद, `ग्राफमैन सांख्यिकी खाता-जैसे .
` चलाने से उस तालिका के विरुद्ध प्रश्नों के लिए खाता-जैसा अनुकूलन चालू हो जाएगा। ऑप्टिमाइज़ेशन को फिर से बंद किया जा सकता है `graphman stats account-like --clear .
` क्वेरी नोड्स को यह नोटिस करने में 5 मिनट तक का समय लगता है कि ऑप्टिमाइज़ेशन चालू कर दिया गया है या बंद. अनुकूलन चालू करने के बाद, यह सत्यापित करना आवश्यक है कि परिवर्तन वास्तव में उस तालिका के लिए प्रश्नों को धीमा नहीं करता है। यदि आपने पोस्टग्रेज़ की निगरानी के लिए ग्राफाना को कॉन्फ़िगर किया है, तो धीमी क्वेरीज़ बड़ी संख्या में `pg_stat_activity` में दिखाई देंगी, जिसमें कई सेकंड लगेंगे। उस स्थिति में, अनुकूलन को फिर से बंद करना होगा। +एक बार तालिका को खाता-समान निर्धारित कर लेने के बाद, `ग्राफ़मैन आँकड़े खाता-जैसा .
` चलाने से उस तालिका के विरुद्ध प्रश्नों के लिए खाता-जैसा अनुकूलन चालू हो जाएगा। ऑप्टिमाइज़ेशन को फिर से बंद किया जा सकता है `ग्राफ़मैन स्टैटिस्टिक्स अकाउंट-लाइक --clear .
` क्वेरी नोड्स को यह नोटिस करने में 5 मिनट तक का समय लगता है कि ऑप्टिमाइज़ेशन चालू कर दिया गया है या बंद। अनुकूलन चालू करने के बाद, यह सत्यापित करना आवश्यक है कि परिवर्तन वास्तव में उस तालिका के लिए प्रश्नों को धीमा नहीं करता है। यदि आपने Postgres की निगरानी के लिए Grafana को कॉन्फ़िगर किया है, तो धीमी क्वेरी बड़ी संख्या में `pg_stat_activity`में दिखाई देगी, जिसमें कई सेकंड लगेंगे। उस स्थिति में, अनुकूलन को फिर से बंद करने की आवश्यकता होती है। -Uniswap-जैसे सबग्राफ के लिए, `जोड़ी` और `टोकन` टेबल इस अनुकूलन के लिए प्रमुख उम्मीदवार हैं, और डेटाबेस लोड पर नाटकीय प्रभाव डाल सकते हैं। +Uniswap- जैसे सबग्राफ के लिए `pair` और `token` टेबल इस ऑप्टिमाइज़ेशन के लिए प्रमुख उम्मीदवार हैं, और डेटाबेस लोड पर नाटकीय प्रभाव डाल सकते हैं। #### सबग्राफ हटाना -> यह नई कार्यक्षमता है, जो ग्राफ़ नोड 0.29.x में उपलब्ध होगी +> यह नई कार्यक्षमता है, जो ग्राफ नोड 0.29.x में उपलब्ध होगी -किसी बिंदु पर एक अनुक्रमणिका किसी दिए गए सबग्राफ को हटाना चाह सकती है। इसे `ग्राफमैन ड्रॉप` के माध्यम से आसानी से किया जा सकता है, जो एक परिनियोजन और उसके सभी अनुक्रमित डेटा को हटा देता है। परिनियोजन को या तो एक सबग्राफ नाम, एक IPFS हैश `Qm..`, या डेटाबेस नेमस्पेस `sgdNNN` के रूप में निर्दिष्ट किया जा सकता है। आगे के दस्तावेज़ [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) उपलब्ध हैं। +किसी बिंदु पर एक अनुक्रमणिका किसी दिए गए सबग्राफ को हटाना चाह सकती है। यह `ग्राफमैन ड्रॉप` के माध्यम से आसानी से किया जा सकता है, जो एक परिनियोजन और उसके सभी अनुक्रमित डेटा को हटा देता है। परिनियोजन को या तो सबग्राफ नाम, IPFS हैश `Qm..`, या डेटाबेस नाम स्थान `sgdNNN` के रूप में निर्दिष्ट किया जा सकता है। आगे के दस्तावेज़ [यहां](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) उपलब्ध हैं। diff --git a/website/pages/hi/publishing/publishing-a-subgraph.mdx b/website/pages/hi/publishing/publishing-a-subgraph.mdx index deef3378e8d8..bc936ae91146 100644 --- a/website/pages/hi/publishing/publishing-a-subgraph.mdx +++ b/website/pages/hi/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ title: विकेंद्रीकृत नेटवर्क के लि विकेन्द्रीकृत नेटवर्क पर एक सबग्राफ प्रकाशित करने से यह [क्यूरेटर्स](/network/curating) के लिए उपलब्ध हो जाता है ताकि वे उस पर क्यूरेट करना शुरू कर सकें, और [इंडेक्सर्स](/network/indexing) इसे अनुक्रमणित करना शुरू करने के लिए। -विकेंद्रीकृत नेटवर्क पर सबग्राफ प्रकाशित करने के तरीके के बारे में जानने के लिए [यह वीडियो](https://youtu.be/HfDgC2oNnwo?t=580) देखें। + आप समर्थित नेटवर्क की सूची [यहां](/Developing/supported-networks) प्राप्त कर सकते हैं। diff --git a/website/pages/hi/querying/graphql-api.mdx b/website/pages/hi/querying/graphql-api.mdx index f8954a39cdea..353a62ef15a7 100644 --- a/website/pages/hi/querying/graphql-api.mdx +++ b/website/pages/hi/querying/graphql-api.mdx @@ -34,9 +34,9 @@ title: ग्राफक्यूएल एपीआई } ``` -### छँटाई +### Sorting -किसी संग्रह की क्वेरी करते समय, `orderBy` पैरामीटर का उपयोग किसी विशिष्ट विशेषता के आधार पर क्रमित करने के लिए किया जा सकता है। इसके अतिरिक्त, `orderDirection` का उपयोग सॉर्ट दिशा, `asc` आरोही के लिए या `desc` अवरोही के लिए निर्दिष्ट करने के लिए किया जा सकता है। +When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. #### उदाहरण @@ -66,11 +66,11 @@ title: ग्राफक्यूएल एपीआई } ``` -> वर्तमान में, आप `@entity` और `@derivedFrom` फ़ील्ड पर एक-स्तर गहन `String` या `ID` प्रकार से क्रमबद्ध कर सकते हैं। दुःखद है कि [इंटरफेस पर क्रमबद्ध करना और एक स्तर-गहन एंटिटीज़ पर फ़ील्ड के द्वारा क्रमबद्ध करना जो एरे होते हैं और घनिष्ठ एंटिटीज़ की अनुमति नहीं है।](https://github.com/graphprotocol/graph-node/pull/4058) +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. ### पृष्ठ पर अंक लगाना -किसी संग्रह की क्वेरी करते समय, `पहले` पैरामीटर का उपयोग संग्रह की शुरुआत से पेजिनेट करने के लिए किया जा सकता है। यह ध्यान देने योग्य है कि डिफ़ॉल्ट सॉर्ट क्रम आईडी द्वारा आरोही अल्फ़ान्यूमेरिक क्रम में है, निर्माण समय के अनुसार नहीं। +When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. इसके अलावा, `छोड़ें` पैरामीटर का उपयोग इकाइयों को छोड़ने और पेजिनेट करने के लिए किया जा सकता है। उदा. `first:100` पहले 100 इकाइयां दिखाता है और `first:100, skip:100` अगली 100 इकाइयां दिखाता है। @@ -78,7 +78,7 @@ title: ग्राफक्यूएल एपीआई #### उदाहरण का उपयोग करना `पहले` -पहले 10 टोकन को क्वेरी करें।: +पहले 10 टोकन पूछें: ```graphql { @@ -117,7 +117,7 @@ query manyTokens($lastID: String) { } ``` -पहली बार, यह `lastID = ""` के साथ क्वेरी भेजेगा, और अगली अनुरोधों के लिए `lastID` को पिछले अनुरोध में पिछली एंटिटी के `id` एट्रिब्यूट पर सेट करेगा। इस तरीके से, बढ़ते `skip` मूल्यों का उपयोग करने से बेहतर प्रदर्शन होगा। +The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. ### छनन @@ -157,7 +157,7 @@ query manyTokens($lastID: String) { आप `_change_block(number_gte: Int)` द्वारा संस्थाओं को भी फ़िल्टर कर सकते हैं - यह उन संस्थाओं को फ़िल्टर करता है जिन्हें निर्दिष्ट ब्लॉक में या उसके बाद अपडेट किया गया था। -यदि आप केवल उन एंटिटीज़ को फ़ेच करने की तलाश में हैं जो बदल गए हैं, तो यह उपयोगी हो सकता है, उदाहरण के लिए, अंतिम बार आपने कब जांच की थी। या फिर यह जांचने या डीबग करने में उपयोगी हो सकता है कि आपके सबग्राफ़ में एंटिटीज़ कैसे बदल रहे हैं (यदि ब्लॉक फ़िल्टर के साथ मिलाकर उपयोग किया जाए, तो आप केवल विशिष्ट ब्लॉक में बदली हुई एंटिटीज़ को अलग कर सकते हैं)। +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -237,7 +237,7 @@ query manyTokens($lastID: String) { } ``` -> **नोट**: क्वेरी तैयार करते समय, `या` ऑपरेटर का उपयोग करने का प्रभाव ध्यान में रखना महत्वपूर्ण है। जबकि `या` खोज परिणाम को बढ़ाने के लिए एक उपयोगी टूल हो सकता है, यह भी बहुत महंगा हो सकता है। `या` के मुख्य मुद्दों में से एक यह है कि यह क्वेरी को धीमी कर सकता है। इसका कारण है कि `या` डेटाबेस को कई इंडेक्स के माध्यम से स्कैन करने की आवश्यकता होती है, जो एक समय लेने वाली प्रक्रिया हो सकती है। इन मुद्दों से बचने के लिए, सलाह दी जाती है कि डेवलपर्स हमेशा संभव होते हुए और ऑपरेटर का उपयोग करें जबकि या। इससे अधिक ठोस फिल्टरिंग संभव होती है और तेज, अधिक सटीक क्वेरी के लिए ले जाता है। +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. #### सभी फ़िल्टर @@ -274,13 +274,13 @@ _not_ends_with_nocase _change_block(number_gte: Int) ``` -### समय-यात्रा क्वेरी +### Time-travel queries -आप अपनी संस्थाओं की स्थिति को न केवल नवीनतम ब्लॉक के लिए पूछ सकते हैं, जो कि डिफ़ॉल्ट है, बल्कि अतीत में मनमाने ब्लॉक के लिए भी है। जिस ब्लॉक पर एक क्वेरी होनी चाहिए, उसे या तो उसके ब्लॉक नंबर या उसके ब्लॉक हैश द्वारा `ब्लॉक` तर्क को क्वेरी के टॉपलेवल फील्ड में शामिल करके निर्दिष्ट किया जा सकता है। +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. -इस प्रकार के क्वेरी का परिणाम समय के साथ नहीं बदलेगा, अर्थात एक निश्चित पिछले ब्लॉक पर क्वेरी करने पर वही परिणाम होगा जो उसे कभी भी निष्पादित किया जाएगा, इस अपवाद के साथ कि यदि आप चेन के सिरे के बहुत करीब के एक ब्लॉक पर क्वेरी करते हैं तो यदि वह ब्लॉक मुख्य चेन पर नहीं होता है और चेन को संगठित किया जाता है तो परिणाम बदल सकता है। एक ब्लॉक अंतिम माना जाने के बाद, क्वेरी का परिणाम बदलने से बचेगा। +The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. -ध्यान दें कि वर्तमान अमल में अभी भी कुछ सीमाओं का सामना करना पड़ सकता है जो इन गारंटियों का उल्लंघन कर सकती हैं। अंतिम रूप से, यह प्रयोजन के लिए विशेषज्ञों द्वारा निर्मित हुआ है और इसमें कुछ सीमाएं हो सकती हैं। इम्प्लीमेंटेशन कभी-कभी नहीं बता सकता कि क्या एक दिए गए ब्लॉक हैश मुख्य श्रृंखला पर नहीं है, या यह भी नहीं बता सकता है कि अंतिम रूप से मान्य नहीं माना जा सकने वाले ब्लॉक के लिए ब्लॉक हैश द्वारा क्वेरी का परिणाम कितना दुविधापूर्ण हो सकता है जो क्वेरी के साथ समयानुसार चल रही एक ब्लॉक रीआर्गेनाइज़ेशन द्वारा प्रभावित हो सकता है। वे अंतिम रूप से मुख्य श्रृंखला पर होने वाले ब्लॉक हैश द्वारा क्वेरी के परिणामों पर कोई प्रभाव नहीं डालते हैं। इस मुद्दे में [इस मुद्दे](https://github.com/graphprotocol/graph-node/issues/1405) में विस्तार से समझाया गया है। +Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. #### उदाहरण @@ -316,7 +316,7 @@ _change_block(number_gte: Int) ### पूर्ण पाठ खोज प्रश्न -पूर्ण पाठ खोज क्वेरी फ़ील्ड, एक व्यक्तिगत रूप से जोड़ा जा सकता है और सबग्राफ़ स्कीमा में अनुकूलित किया जा सकता है, एक व्यक्तिगत पाठ खोज API प्रदान करते हैं। अपने सबग्राफ़ में पूर्ण पाठ खोज जोड़ने के लिए [पूर्णपाठ खोज फ़ील्ड का परिभाषित करना](/developing/creating-a-subgraph#defining-fulltext-search-fields) देखें। +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. शब्दों की आपूर्ति के लिए पूर्ण पाठ्य अन्वेषण में एक आवश्यक क्षेत्र `पाठ` है। इस `पाठ` खोज क्षेत्र में उपयोग करने के लिए कई विशेष पूर्ण-पाठ पत्र उपलब्ध हैं। @@ -357,7 +357,7 @@ _change_block(number_gte: Int) } ``` -अधिक जटिल फिल्टर बनाने के लिए फुलटेक्स्ट ऑपरेटरों को मिलाएं। इस उदाहरण क्वेरी के अनुसरण के साथ एक बहाना खोज ऑपरेटर संयुक्त रूप से सभी ब्लॉग संस्थाओं को उन शब्दों से मिलाएगा जो "लू" से शुरू होते हैं और उसके बाद "संगीत"। +Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". ```graphql { @@ -376,15 +376,15 @@ _change_block(number_gte: Int) ## योजना -आपके डेटा स्रोत की स्कीमा - यानी, इकाई प्रकार, मान और संबंध जो क्वेरी के लिए उपलब्ध हैं - को [ग्राफक्यूएल इंटरफ़ेस डेफिनिशन लैंग्वेज (आईडीएल)](https://facebook.github.io/graphql/draft/#sec-Type-System) के माध्यम से परिभाषित किया गया है। +The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). ग्राफक्यूएल स्कीमा आम तौर पर `क्वेरी`, `सदस्यता` और `म्यूटेशन` के रूट प्रकारों को परिभाषित करते हैं। ग्राफ़ केवल `क्वेरी` का समर्थन करता है। आपके सबग्राफ के लिए रूट `क्वेरी` प्रकार स्वचालित रूप से आपके सबग्राफ मेनिफ़ेस्ट में शामिल ग्राफ़क्यूएल स्कीमा से उत्पन्न होता है। > **ध्यान दें:** हमारा एपीआई म्यूटेशन को उजागर नहीं करता है क्योंकि डेवलपर्स से उम्मीद की जाती है कि वे अपने एप्लिकेशन से अंतर्निहित ब्लॉकचेन के खिलाफ सीधे लेनदेन जारी करेंगे। -### इकाइयां +### Entities -स्कीमा में `@entity` निर्देशक के साथ सभी GraphQL प्रकार को सत्तायें माना जाएगा और इनमें एक `ID` फ़ील्ड होना आवश्यक होगा। +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. > **ध्यान दें:** वर्तमान में, आपके स्कीमा के सभी प्रकारों में एक `@entity` निर्देश होना चाहिए। भविष्य में, हम किसी `@entity` निर्देश के बिना प्रकारों को मान ऑब्जेक्ट के रूप में मानेंगे, लेकिन यह अभी तक समर्थित नहीं है। @@ -406,7 +406,7 @@ _change_block(number_gte: Int) } ``` -यदि कोई ब्लॉक प्रदान किया गया है, तो मेटाडेटा उस ब्लॉक का है, यदि नवीनतम अनुक्रमित ब्लॉक का उपयोग नहीं किया गया है। यदि प्रदान किया गया है, तो ब्लॉक सबग्राफ के प्रारंभ ब्लॉक के बाद होना चाहिए, और हाल ही में अनुक्रमित ब्लॉक से कम या उसके बराबर होना चाहिए। +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. `deployment` एक अद्वितीय आईडी है, जो `subgraph.yaml` फ़ाइल के IPFS CID के अनुरूप है। diff --git a/website/pages/hi/querying/querying-from-an-application.mdx b/website/pages/hi/querying/querying-from-an-application.mdx index e430a5cbf544..f31dc55bce68 100644 --- a/website/pages/hi/querying/querying-from-an-application.mdx +++ b/website/pages/hi/querying/querying-from-an-application.mdx @@ -33,11 +33,11 @@ https://gateway.thegraph.com/api//subgraphs/id/ - [स्वचालित पृष्ठांकन](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - पूरी तरह से टाइप किया गया परिणाम -`graph-client` भी पॉपुलर GraphQL क्लाइंट्स जैसे Apollo और URQL के साथ एकीकृत है और सभी वातानुकूलनों (React, Angular, Node.js, React Native) के साथ संगत है, इसका उपयोग The Graph के साथ इंटरैक्ट करने के लिए आपको सर्वोत्तम अनुभव प्रदान करेगा। +Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. -आइए देखें कि `graphql-client` वाले सबग्राफ से डेटा कैसे प्राप्त करें। +Let's look at how to fetch data from a subgraph with `graphql-client`. -आरंभ करने के लिए, अपने प्रोजेक्ट में ग्राफ़ क्लाइंट सीएलआई स्थापित करना सुनिश्चित करें: +To get started, make sure to install The Graph Client CLI in your project: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -अपनी क्वेरी को `.graphql` फ़ाइल में परिभाषित करें (या आपकी `.js` या `.ts` फ़ाइल में रेखांकित करें): +Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -फिर, एक कॉन्फ़िगरेशन फ़ाइल बनाएं (जिसे `.graphclientrc.yml` कहा जाता है) और ग्राफ़ द्वारा प्रदान किए गए अपने ग्राफक्यूएल एंडपॉइंट्स को इंगित करें, उदाहरण के लिए: +Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -निम्नलिखित ग्राफ़ क्लाइंट सीएलआई कमांड चलाने से टाइप किया हुआ और उपयोग के लिए तैयार जावास्क्रिप्ट कोड उत्पन्न होगा: +Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: ```sh graphclient build ``` -अंत में, जेनरेट किए गए टाइप किए गए GraphQL दस्तावेज़ों का उपयोग करने के लिए अपनी `.ts` फ़ाइल को अपडेट करें: +Finally, update your `.ts` file to use the generated typed GraphQL documents: ```tsx import React, { useEffect } from 'react' @@ -134,17 +134,17 @@ function App() { export default App ``` -**⚠️महत्वपूर्ण सूचना** +**⚠️ Important notice** -`graph-client` अन्य GraphQL क्लाइंट्स जैसे Apollo client, URQL, या React Query के साथ पूरी तरह से एकीकृत है; आपको [आधिकारिक डेपो में उदाहरण मिलेंगे](https://github.com/graphprotocol/graph-client/tree/main/examples)। +`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). -हालांकि, अगर आप किसी अन्य क्लाइंट के साथ जाना चुनते हैं, तो ध्यान दें कि **क्रॉस-श्रृंखला सबग्राफ हैंडलिंग या स्वत: यामी पृष्ठांकन का उपयोग करने का आपका योग्यता नहीं होगा, जो The Graph क्वेरी के लिए मूल विशेषताएँ हैं**। +However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. ### अपोलो ग्राहक -[अपोलो क्लाइंट](https://www.apollographql.com/docs/) फ्रंट-एंड इकोसिस्टम पर सर्वव्यापी ग्राफक्यूएल क्लाइंट है। +[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. -React, Angular, Vue, Ember, iOS और Android के लिए उपलब्ध, Apollo Client, हालांकि सबसे भारी क्लाइंट होता है, लेकिन GraphQL के ऊपर उन्नत UI बनाने के लिए कई सुविधाएँ प्रदान करता है: +Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: - advanced error handling - pagination @@ -152,9 +152,9 @@ React, Angular, Vue, Ember, iOS और Android के लिए उपलब् - optimistic UI - local state management -आइए देखते हैं कि कैसे एक वेब प्रोजेक्ट में Apollo client के साथ सबग्राफ से डेटा प्राप्त किया जाता है। +Let's look at how to fetch data from a subgraph with Apollo client in a web project. -सबसे पहले, `@apollo/client` और `graphql` इंस्टॉल करें: +First, install `@apollo/client` and `graphql`: ```sh npm install @apollo/client graphql @@ -193,7 +193,7 @@ client }) ``` -वेरिएबल का उपयोग करने के लिए, आप क्वेरी में `वेरिएबल` तर्क पास कर सकते हैं: +To use variables, you can pass in a `variables` argument to the query: ```javascript const tokensQuery = ` @@ -226,16 +226,16 @@ client ### URQL -एक और विकल्प है [URQL](https://formidable.com/open-source/urql/) जो Node.js, React/Preact, Vue, और Svelte परिवार में उपलब्ध है, और उसमें उन्नत सुविधाएँ हैं: +Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: - Flexible cache system - एक्स्टेंसिबल डिज़ाइन (इसके शीर्ष पर नई क्षमताओं को जोड़ना आसान) - लाइटवेट बंडल (अपोलो क्लाइंट की तुलना में ~ 5x हल्का) - फ़ाइल अपलोड और ऑफ़लाइन मोड के लिए समर्थन -आइए देखते हैं कि कैसे एक वेब प्रोजेक्ट में URQL के साथ सबग्राफ से डेटा प्राप्त किया जाता है। +Let's look at how to fetch data from a subgraph with URQL in a web project. -सबसे पहले, `urql` और `graphql` इंस्टॉल करें: +First, install `urql` and `graphql`: ```sh npm install urql graphql diff --git a/website/pages/hi/querying/querying-the-hosted-service.mdx b/website/pages/hi/querying/querying-the-hosted-service.mdx index e7a5fbbc4728..7732498539b8 100644 --- a/website/pages/hi/querying/querying-the-hosted-service.mdx +++ b/website/pages/hi/querying/querying-the-hosted-service.mdx @@ -2,13 +2,13 @@ title: होस्ट की गई सेवा को क्वेरी करना --- -उपग्राफ तैनात करने के साथ, उपग्राफ तैनात करने के साथ, [होस्ट की गई सेवा](https://thegraph.com/hosted-service/) पर जाएं [GraphiQL](https://github.com/graphql/graphiql) इंटरफ़ेस जहां आप प्रश्न जारी करके और स्कीमा देखकर सबग्राफ के लिए तैनात ग्राफक्यूएल एपीआई का पता लगा सकते हैं। +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. -एक उदाहरण नीचे दिया गया है, लेकिन सबग्राफ की इकाइयों को क्वेरी करने के तरीके के संपूर्ण संदर्भ के लिए कृपया [क्वेरी एपीआई](/querying/graphql-api) देखें। +एक उदाहरण नीचे दिया गया है, लेकिन कृपया [क्वेरी एपीआई](/querying/graphql-api) देखें, ताकि सबग्राफ की संस्थाओं से क्वेरी कैसे की जाए, इस पर एक पूर्ण संदर्भ प्राप्त हो सके। ## उदाहरण -यह क्वेरी हमारे मैपिंग द्वारा बनाए गए सभी काउंटरों को सूचीबद्ध करती है। चूँकि हम केवल एक बनाते हैं, परिणाम में केवल हमारा एक `डिफ़ॉल्ट-काउंटर` शामिल होगा: +यह क्वेरी हमारे मैपिंग द्वारा बनाए गए सभी काउंटरों को सूचीबद्ध करती है। चूंकि हम केवल एक बनाते हैं, परिणाम में केवल हमारा `डिफ़ॉल्ट-काउंटर` होगा: ```graphql { @@ -19,9 +19,9 @@ title: होस्ट की गई सेवा को क्वेरी क } ``` -## होस्ट की गई सेवा का उपयोग करना +## Using the hosted service -ग्राफ़ एक्सप्लोरर और इसका ग्राफ़कलाइन खेल का मैदान होस्ट की गई सेवा पर तैनात सबग्राफ का पता लगाने और क्वेरी करने का एक उपयोगी तरीका है। +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. कुछ मुख्य विशेषताओं का विवरण नीचे दिया गया है: diff --git a/website/pages/hi/querying/querying-with-python.mdx b/website/pages/hi/querying/querying-with-python.mdx new file mode 100644 index 000000000000..ac7b135cf2b4 --- /dev/null +++ b/website/pages/hi/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## शुरू करना + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/hi/quick-start.mdx b/website/pages/hi/quick-start.mdx new file mode 100644 index 000000000000..d2d71f6767ee --- /dev/null +++ b/website/pages/hi/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: जल्दी शुरू +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +यह मार्गदर्शिका यह मानते हुए लिखी गई है कि आपके पास: + +- आपकी पसंद के नेटवर्क पर एक स्मार्ट अनुबंध पता +- आपके सबग्राफ को क्यूरेट करने के लिए GRT +- एक क्रिप्टो वॉलेट + +## 1. सबग्राफ स्टूडियो पर एक सबग्राफ बनाएं + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +एक बार कनेक्ट होने के बाद, आप "एक सबग्राफ बनाएं" पर क्लिक करके शुरू कर सकते हैं। अपनी पसंद का नेटवर्क चुनें और जारी रखें पर क्लिक करें। + +## 2. ग्राफ़ सीएलआई स्थापित करें + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +अपनी स्थानीय मशीन पर, निम्न आदेशों में से कोई एक चलाएँ: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. अपना सबग्राफ इनिशियलाइज़ करें + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +जब आप अपना सबग्राफ इनिशियलाइज़ करते हैं, तो सीएलआई टूल आपसे निम्नलिखित जानकारी मांगेगा: + +- प्रोटोकॉल: वह प्रोटोकॉल चुनें जिससे आपका सबग्राफ डेटा को अनुक्रमित करेगा +- सबग्राफ स्लग: अपने सबग्राफ के लिए एक नाम बनाएं। आपका सबग्राफ स्लग आपके सबग्राफ के लिए एक पहचानकर्ता है। +- सबग्राफ बनाने के लिए निर्देशिका: अपनी स्थानीय निर्देशिका चुनें +- एथेरियम नेटवर्क (वैकल्पिक): आपको यह निर्दिष्ट करने की आवश्यकता हो सकती है कि आपका सबग्राफ किस ईवीएम-संगत नेटवर्क से डेटा को अनुक्रमित करेगा +- अनुबंध का पता: उस स्मार्ट अनुबंध के पते का पता लगाएं, जिससे आप डेटा की क्वेरी करना चाहते हैं +- ABI: यदि ABI ऑटोपॉप्युलेटेड नहीं है, तो आपको इसे JSON फ़ाइल के रूप में मैन्युअल रूप से इनपुट करना होगा +- स्टार्ट ब्लॉक: यह सुझाव दिया जाता है कि आप समय बचाने के लिए स्टार्ट ब्लॉक इनपुट करें जबकि आपका सबग्राफ ब्लॉकचैन डेटा को अनुक्रमित करता है। आप उस ब्लॉक को ढूंढकर स्टार्ट ब्लॉक का पता लगा सकते हैं जहां आपका अनुबंध तैनात किया गया था। +- अनुबंध का नाम: अपने अनुबंध का नाम इनपुट करें +- इकाइयों के रूप में अनुक्रमणिका अनुबंध ईवेंट: यह सुझाव दिया जाता है कि आप इसे सही पर सेट करें क्योंकि यह प्रत्येक उत्सर्जित ईवेंट के लिए स्वचालित रूप से आपके सबग्राफ में मैपिंग जोड़ देगा +- दूसरा अनुबंध जोड़ें (वैकल्पिक): आप एक और अनुबंध जोड़ सकते हैं + +निम्न आदेश चलाकर मौजूदा अनुबंध से अपना सबग्राफ प्रारंभ करें: + +```sh +graph init --studio +``` + +अपने सबग्राफ को इनिशियलाइज़ करते समय क्या अपेक्षा की जाए, इसके उदाहरण के लिए निम्न स्क्रीनशॉट देखें: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. अपना सबग्राफ लिखें + +पिछले आदेश एक मचान सबग्राफ बनाते हैं जिसका उपयोग आप अपने सबग्राफ के निर्माण के लिए शुरुआती बिंदु के रूप में कर सकते हैं। सबग्राफ में बदलाव करते समय, आप मुख्य रूप से तीन फाइलों के साथ काम करेंगे: + +- मेनिफेस्ट (subgraph.yaml) - मेनिफेस्ट परिभाषित करता है कि आपके सबग्राफ को कौन से डेटा सोर्स इंडेक्स करेंगे। +- स्कीमा (schema.graphql) - ग्राफक्यूएल स्कीमा परिभाषित करता है कि आप सबग्राफ से कौन सा डेटा प्राप्त करना चाहते हैं। +- असेंबलीस्क्रिप्ट मैपिंग (mapping.ts) - यह वह कोड है जो स्कीमा में परिभाषित इकाई के लिए आपके डेटा सोर्स से डेटा का अनुवाद करता है। + +अपना सबग्राफ कैसे लिखना है, इस बारे में अधिक जानकारी के लिए, [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. सबग्राफ स्टूडियो में तैनात करें + +एक बार आपका सबग्राफ लिखे जाने के बाद, निम्नलिखित कमांड चलाएँ: + +```sh +$ graph codegen +$ graph build +``` + +- अपने सबग्राफ को प्रमाणित और तैनात करें। तैनाती key सबग्राफ स्टूडियो में सबग्राफ पेज पर पाई जा सकती है। + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. अपने सबग्राफ का परीक्षण करें + +आप खेल के मैदान अनुभाग में एक नमूना क्वेरी बनाकर अपने सबग्राफ का परीक्षण कर सकते हैं। + +लॉग आपको बताएंगे कि क्या आपके सबग्राफ में कोई त्रुटि है। एक ऑपरेशनल सबग्राफ के लॉग इस तरह दिखेंगे: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. अपने सबग्राफ को ग्राफ़ के विकेंद्रीकृत नेटवर्क पर प्रकाशित करें + +एक बार जब आपका सबग्राफ सबग्राफ स्टूडियो में तैनात हो जाता है, तो आपने इसका परीक्षण कर लिया है, और इसे उत्पादन में लगाने के लिए तैयार हैं, फिर आप इसे विकेंद्रीकृत नेटवर्क पर प्रकाशित कर सकते हैं। + +सबग्राफ स्टूडियो में, अपने सबग्राफ पर क्लिक करें। सबग्राफ के पृष्ठ पर, आप शीर्ष दाईं ओर प्रकाशित बटन पर क्लिक कर सकेंगे। + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +इससे पहले कि आप अपने सबग्राफ को क्वेरी कर सकें, इंडेक्सर्स को इस पर क्वेरीज़ परोसना शुरू करना होगा। इस प्रक्रिया को सुव्यवस्थित करने के लिए, आप GRT का उपयोग करके अपने स्वयं के सबग्राफ को क्यूरेट कर सकते हैं। + +यह लिखते समय, यह अनुशंसा की जाती है कि आप 10,000 GRT के साथ अपने स्वयं के सबग्राफ को क्यूरेट करें ताकि यह सुनिश्चित हो सके कि यह अनुक्रमित है और जितनी जल्दी हो सके पूछताछ के लिए उपलब्ध है। + +गैस की लागत बचाने के लिए, जब आप ग्राफ़ के विकेंद्रीकृत नेटवर्क पर अपना सबग्राफ प्रकाशित करते हैं, तो आप अपने सबग्राफ को उसी लेन-देन में क्यूरेट कर सकते हैं, जिसे आपने इस बटन का चयन करके प्रकाशित किया था: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. अपना सबग्राफ पूछें + +अब, आप अपने सबग्राफ को अपने सबग्राफ के क्वेरी URL पर ग्राफ़क्यूएल क्वेरी भेजकर क्वेरी कर सकते हैं, जिसे आप क्वेरी बटन पर क्लिक करके पा सकते हैं। + +यदि आपके पास अपनी एपीआई कुंजी नहीं है, तो आप अपने डैप से मुफ्त, दर-सीमित अस्थायी क्वेरी URL के माध्यम से पूछ सकते हैं, जिसका उपयोग विकास और मंचन के लिए किया जा सकता है। + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/hi/substreams.mdx b/website/pages/hi/substreams.mdx index 15c8547e4e95..b43741f8a66a 100644 --- a/website/pages/hi/substreams.mdx +++ b/website/pages/hi/substreams.mdx @@ -2,8 +2,43 @@ title: सबस्ट्रीम --- -सबस्ट्रीम एक नई तकनीक है जिसे द ग्राफ प्रोटोकॉल कोर डेवलपर्स द्वारा विकसित किया गया है, जो अनुक्रमित ब्लॉकचैन डेटा की अत्यधिक तेजी से खपत और प्रसंस्करण को सक्षम करने के लिए बनाया गया है। सबस्ट्रीम वर्तमान में खुले बीटा में हैं, जो कई ब्लॉकचेन में परीक्षण और विकास के लिए उपलब्ध हैं। +![Substreams Logo](/img/substreams-logo.png) -अधिक जानने के लिए और सबस्ट्रीम बनाना शुरू करने के लिए [सबस्ट्रीम दस्तावेज़ीकरण](https://substreams.streamingfast.io/) पर जाएं। +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### शुरू करना + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/hi/sunrise.mdx b/website/pages/hi/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/hi/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/hi/tokenomics.mdx b/website/pages/hi/tokenomics.mdx index 8a427a6f8e26..0e7711aa0ba0 100644 --- a/website/pages/hi/tokenomics.mdx +++ b/website/pages/hi/tokenomics.mdx @@ -11,7 +11,7 @@ description: ग्राफ़ नेटवर्क को शक्तिश ग्राफ़ एक विकेन्द्रीकृत प्रोटोकॉल है जो ब्लॉकचैन डेटा तक आसान पहुंच को सक्षम बनाता है। यह B2B2C मॉडल के समान है, सिवाय इसके कि यह प्रतिभागियों के विकेंद्रीकृत नेटवर्क द्वारा संचालित है। जीआरटी पुरस्कारों के बदले अंतिम उपयोगकर्ताओं को डेटा प्रदान करने के लिए नेटवर्क प्रतिभागी एक साथ काम करते हैं। जीआरटी कार्य उपयोगिता टोकन है जो डेटा प्रदाताओं और उपभोक्ताओं का समन्वय करता है। GRT नेटवर्क के भीतर डेटा प्रदाताओं और उपभोक्ताओं के समन्वय के लिए एक उपयोगिता के रूप में कार्य करता है और डेटा को प्रभावी ढंग से व्यवस्थित करने के लिए प्रोटोकॉल प्रतिभागियों को प्रोत्साहित करता है। -ग्राफ़ का उपयोग करके, उपयोगकर्ता आसानी से ब्लॉकचैन से डेटा का उपयोग कर सकते हैं, केवल उन्हें आवश्यक विशिष्ट जानकारी के लिए भुगतान कर सकते हैं। ग्राफ़ का उपयोग आज वेब3 पारिस्थितिकी तंत्र में कई लोकप्रिय अनुप्रयोगों द्वारा किया जाता है। +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. ग्राफ़ ब्लॉकचेन डेटा को उसी तरह अनुक्रमित करता है जैसे Google वेब को अनुक्रमित करता है। वास्तव में, हो सकता है कि आप पहले से ही ग्राफ़ का उपयोग बिना इसे जाने कर रहे हों। यदि आपने एक डैप के सामने के छोर को देखा है जो एक सबग्राफ से अपना डेटा प्राप्त करता है, तो आपने एक सबग्राफ से डेटा की क्वेरी की है! @@ -33,7 +33,7 @@ description: ग्राफ़ नेटवर्क को शक्तिश ![टोकनोमिक्स आरेख](/img/updated-tokenomics-image.png) -## डिलीगेटर्स (निष्क्रिय रूप से जीआरटी अर्जित करें) +## Delegators (Passively earn GRT) नेटवर्क पर सबग्राफ में इंडेक्सर की हिस्सेदारी बढ़ाने वाले डेलिगेटर्स द्वारा इंडेक्सर्स को GRT प्रत्यायोजित किया जाता है। बदले में, डेलिगेटर्स इंडेक्सर से सभी क्वेरी फीस और इंडेक्सिंग रिवार्ड्स का एक प्रतिशत अर्जित करते हैं। प्रत्येक इंडेक्सर कट सेट करता है जिसे डेलीगेटर्स को स्वतंत्र रूप से पुरस्कृत किया जाएगा, जिससे डेलिगेटरों को आकर्षित करने के लिए इंडेक्सर्स के बीच प्रतिस्पर्धा पैदा होगी। अधिकांश इंडेक्सर्स सालाना 9-12% के बीच की पेशकश करते हैं। @@ -75,7 +75,7 @@ description: ग्राफ़ नेटवर्क को शक्तिश अनुक्रमणकर्ता दो तरह से GRT पुरस्कार अर्जित कर सकते हैं: -1. प्रश्न शुल्क: सबग्राफ डेटा प्रश्नों के लिए डेवलपर्स या उपयोगकर्ताओं द्वारा भुगतान किया गया जीआरटी। प्रश्न शुल्क एक रिबेट पूल में जमा किया जाता है और इंडेक्सर्स को वितरित किया जाता है। +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. इंडेक्सिंग रिवार्ड्स: 3% वार्षिक निर्गमन इंडेक्सर्स को उनके द्वारा इंडेक्स किए जाने वाले सबग्राफ की संख्या के आधार पर वितरित किया जाता है। ये पुरस्कार अनुक्रमणकों को उप-अनुच्छेदों को अनुक्रमणित करने के लिए प्रोत्साहित करते हैं, कभी-कभी क्वेरी शुल्क शुरू होने से पहले, यह सत्यापित करने के लिए कि उन्होंने डेटा को सटीक रूप से अनुक्रमित किया है, अनुक्रमणिका (POIs) के प्रमाण अर्जित करने और जमा करने के लिए। diff --git a/website/pages/it/about.mdx b/website/pages/it/about.mdx index c1f7c886900f..d7ae599345a1 100644 --- a/website/pages/it/about.mdx +++ b/website/pages/it/about.mdx @@ -1,5 +1,5 @@ --- -title: About The Graph +title: Informazioni su The Graph --- This page will explain what The Graph is and how you can get started. diff --git a/website/pages/it/arbitrum/arbitrum-faq.mdx b/website/pages/it/arbitrum/arbitrum-faq.mdx index 849d08c92b93..32629b6f1be4 100644 --- a/website/pages/it/arbitrum/arbitrum-faq.mdx +++ b/website/pages/it/arbitrum/arbitrum-faq.mdx @@ -2,77 +2,77 @@ title: Arbitrum FAQ --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Clicca [qui](#billing-on-arbitrum-faqs) se desideri saltare alle domande frequenti sulla fatturazione su Arbitrum. -## Why is The Graph implementing an L2 Solution? +## Perché The Graph sta implementando una soluzione L2? -By scaling The Graph on L2, network participants can expect: +Scalando su L2, i partecipanti alla rete The Graph possono aspettarsi: -- Upwards of 26x savings on gas fees +- Risparmi fino a 26 volte sulle commissioni di gas -- Faster transaction speed +- Velocità di transazione più veloce -- Security inherited from Ethereum +- Sicurezza ereditata da Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +La scalabilità degli smart contract del protocollo su L2 consente ai partecipanti della rete di interagire più frequentemente a un costo ridotto delle commissioni di gas. Ad esempio, gli Indexer potrebbero aprire e chiudere allocazioni per indicizzare un maggior numero di subgraph con maggiore frequenza, gli sviluppatori potrebbero distribuire e aggiornare i subgraph con maggiore facilità, i Delegator potrebbero delegare GRT con maggiore frequenza e i Curator potrebbero aggiungere o rimuovere segnali a un maggior numero di subgraph - azioni che in passato erano considerate troppo costose da eseguire frequentemente a causa delle commissioni di gas. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +La comunità di The Graph ha deciso di procedere con Arbitrum l'anno scorso dopo l'esito della discussione [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). -## What do I need to do to use The Graph on L2? +## Cosa devo fare per utilizzare The Graph su L2? -Users bridge their GRT and ETH  using one of the following methods: +Gli utenti possono fare un bridge dei loro GRT ed ETH utilizzando uno dei seguenti metodi: -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [The Graph Bridge su Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +Per sfruttare l'utilizzo di The Graph su L2, utilizza il selettore a discesa per passare tra le chain. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Selettore a discesa per cambiare a Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## In quanto sviluppatore di subgraph, consumatore di dati, Indexer, Curator o Delegator, cosa devo fare ora? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Non è richiesta alcuna azione immediata, tuttavia si incoraggiano i partecipanti della rete a iniziare a passare ad Arbitrum per beneficiare dei vantaggi di L2. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +I team di sviluppatori principali stanno lavorando per creare strumenti di trasferimento a L2 che faciliteranno notevolmente il passaggio di deleghe, cure e subgraph su Arbitrum. Ci si aspetta che gli strumenti di trasferimento a L2 siano disponibili entro l'estate del 2023. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +I principali team di sviluppo stanno lavorando per creare strumenti di trasferimento a L2 che faciliteranno notevolmente il passaggio dei GRT delegati, curati, e dei subgraph su Arbitrum. Ci si aspetta che gli strumenti di trasferimento a L2 siano disponibili entro l'estate del 2023. -## If I would like to participate in the network on L2, what should I do? +## Se desiderassi partecipare alla rete su L2, cosa devo fare? Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Ci sono rischi associati alla scalabilità della rete su L2? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Tutti gli smart contract sono stati attentamente sottoposti a [audit](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Tutto è stato testato accuratamente e un piano di contingenza è in atto per garantire una transizione sicura e senza intoppi. I dettagli possono essere trovati [qui](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## I subgraph esistenti su Ethereum continueranno a funzionare? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Sì, gli smart contract di The Graph Network opereranno parallelamente su entrambe le reti Ethereum e Arbitrum fino a quando non si sposteranno completamente su Arbitrum in una data successiva. -## Will GRT have a new smart contract deployed on Arbitrum? +## Verrà implementato un nuovo smart contract per i GRT su Arbitrum? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Sì, GRT avrà un nuovo [smart contract su Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). Tuttavia, il contratto principale [GRT](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) su Ethereum continuerà a essere operativo. -## Billing on Arbitrum FAQs +## FAQ sulla fatturazione su Arbitrum -## What do I need to do about the GRT in my billing balance? +## Cosa devo fare riguardo ai miei GRT nel mio saldo di fatturazione? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Nulla! I tuoi GRT sono stati migrati in modo sicuro su Arbitrum e vengono utilizzati per pagare le query in questo istante. -## How do I know my funds have migrated securely to Arbitrum? +## Come posso essere certo che i miei fondi siano stati migrati in modo sicuro su Arbitrum? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Tutti i saldi di fatturazione GRT sono stati già migrati con successo su Arbitrum. Puoi visualizzare il contratto di fatturazione su Arbitrum [qui](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Come posso essere sicuro che il bridge di Arbitrum sia sicuro? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Il bridge è stato rigorosamente sottoposto ad [audit](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) per garantire la sicurezza per tutti gli utenti. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Cosa devo fare se sto aggiungendo nuovi GRT dal mio wallet di Ethereum mainnet? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Aggiungere GRT al tuo saldo di pagamento su Arbitrum può essere fatto con un solo click in [Subgraph Studio](https://thegraph.com/studio/). Sarai in grado di trasferire facilmente i tuoi GRT su Arbitrum e compilare le tue chiavi API in una sola transazione. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Visita la [pagina di fatturazione](https://thegraph.com/docs/en/billing/) per istruzioni più dettagliate su come aggiungere, prelevare, o acquisire GRT. diff --git a/website/pages/it/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/it/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..1c4edce5199b 100644 --- a/website/pages/it/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/it/arbitrum/l2-transfer-tools-faq.mdx @@ -2,83 +2,199 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### Cosa sono gli Strumenti di Trasferimento L2? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. -## Subgraph Transfer +### Posso usare lo stesso wallet utilizzato su Ethereum mainnet? -## How do I transfer my subgraph? +Se stai usando un wallet [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), puoi utilizzare lo stesso address. Se il tuo wallet su Ethereum mainnet è un contratto (ad esempio un multisig), allora dovrai specificare un [wallet address Arbitrum](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) a cui verrà inviato il trasferimento. Controlla attentamente l'address poiché qualsiasi trasferimento a un indirizzo errato può comportare una perdita permanente. Se desideri utilizzare un multisig su L2, assicurati di deployare un contratto multisig su Arbitrum One. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Cosa succede se non completo il trasferimento in 7 giorni? -3. Confirm subgraph transfer on Arbitrum\* +Gli Strumenti di Trasferimento L2 utilizzano il meccanismo nativo di Arbitrum per inviare messaggi da L1 a L2. Questo meccanismo è chiamato "retryable ticket" e viene utilizzato da tutti i bridge di token nativi, incluso il bridge GRT di Arbitrum. Puoi leggere ulteriori dettagli sui retryable tickets nella [documentazione di Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +Quando trasferisci i tuoi asset (subgraph, stake, delegation o curation) su L2, un messaggio viene inviato tramite il bridge GRT di Arbitrum, che crea un "retryable ticket" su L2. Lo strumento di trasferimento include un valore in ETH nella transazione, che viene utilizzato per 1) pagare la creazione del ticket e 2) coprire il costo del gas per eseguire il ticket su L2. Tuttavia, poiché i prezzi del gas potrebbero variare nel tempo fino a quando il ticket non è pronto per l'esecuzione su L2, è possibile che questo tentativo di auto-esecuzione fallisca. Quando ciò accade, il bridge Arbitrum manterrà il "retryable ticket" attivo per un massimo di 7 giorni, e chiunque può riprovare a "riscattare" il ticket (il che richiede un wallet con un po' di ETH trasferiti su Arbitrum). -5. Update Query URL (recommended) +Questo è ciò che chiamiamo il passaggio "Conferma" in tutti gli strumenti di trasferimento: in molti casi verrà eseguito automaticamente, poiché l'auto-esecuzione ha spesso successo, ma è importante che tu verifichi che sia andato a buon fine. Se non è andato a buon fine e nessuna riprova ha successo entro 7 giorni, il bridge Arbitrum scarterà il "retryable ticket" e i tuoi asset (subgraph, stake, delegation o curation) andranno persi e non potranno essere recuperati. I core devs di The Graph hanno un sistema di monitoraggio per rilevare queste situazioni e cercare di riscattare i ticket prima che sia troppo tardi, ma alla fine è tua responsabilità assicurarti che il trasferimento venga completato in tempo. Se hai difficoltà a confermare la tua transazione, ti preghiamo di contattarci utilizzando [questo modulo](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) e i core devs saranno pronti ad aiutarti. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## Traserimento del Subgraph + +### Come faccio a trasferire un mio subgraph? + + + +Per fare un trasferimento del tuo subgraph, dovrai completare i seguenti passaggi: + +1. Inizializza il trasferimento su Ethereum mainnet + +2. Aspetta 20 minuti per la conferma + +3. Conferma il trasferimento del subgraph su Arbitrum\* + +4. Termina la pubblicazione del subgraph su Arbitrum + +5. Aggiorna l'URL della Query (raccomandato) \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Da dove devo inizializzare il mio trasferimento? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Puoi inizializzare il tuo trasferimento da [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) o dalla pagina di dettaglio di qualsiasi subgraph. Clicca sul bottone "Trasferisci Subgraph" sulla pagina di dettaglio del subgraph e inizia il trasferimento. -## How long do I need to wait until my subgraph is transferred +### Quanto devo aspettare per il completamento del trasferimento del mio subgraph -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +Il tempo di trasferimento richiede circa 20 minuti. Il bridge Arbitrum sta lavorando in background per completare automaticamente il trasferimento. In alcuni casi, i costi del gas potrebbero aumentare e dovrai confermare nuovamente la transazione. -## Will my subgraph still be discoverable after I transfer it to L2? +### I miei subgraph saranno ancora rintracciabili dopo averli trasferiti su L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Il tuo subgraph sarà rintracciabile solo sulla rete su cui è stata pubblicata. Ad esempio, se il tuo subgraph è su Arbitrum One, potrai trovarlo solo su Explorer su Arbitrum One e non sarai in grado di trovarlo su Ethereum. Assicurati di avere selezionato Arbitrum One nel tasto in alto nella pagina per essere sicuro di essere sulla rete corretta. Dopo il transfer, il subgraph su L1 apparirà come deprecato. -## Does my subgraph need to be published to transfer it? +### Il mio subgraph deve essere pubblicato per poterlo trasferire? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +Per usufruire dello strumento di trasferimento del subgraph, il tuo subgraph deve già essere pubblicato sulla mainnet di Ethereum e deve possedere alcuni segnali di curation di proprietà del wallet che possiede il subgraph. Se il tuo subgraph non è stato pubblicato, è consigliabile pubblicarlo direttamente su Arbitrum One: le commissioni di gas associate saranno considerevolmente più basse. Se desideri trasferire un subgraph pubblicato ma l'account proprietario non inserito nessun segnale di curation su di esso, puoi segnalare una piccola quantità (ad esempio 1 GRT) da quell'account; assicurati di selezionare il segnale "auto-migrante". -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Cosa succede alla versione del mio subgraph sulla mainnet di Ethereum dopo il trasferimento su Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +Dopo aver trasferito il tuo subgraph su Arbitrum, la versione sulla mainnet di Ethereum sarà deprecata. Ti consigliamo di aggiornare l'URL della query entro 48 ore. Tuttavia, è previsto un periodo di tolleranza che mantiene funzionante l'URL sulla mainnet in modo che il supporto per eventuali dApp di terze parti possa essere aggiornato. -## After I transfer, do I also need to re-publish on Arbitrum? +### Dopo il trasferimento, devo anche pubblicare di nuovo su Arbitrum? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Dopo la finestra di trasferimento di 20 minuti, dovrai confermare il trasferimento con una transazione nell'interfaccia utente per completare il trasferimento, ma lo strumento di trasferimento ti guiderà attraverso questo processo. Il tuo endpoint L1 continuerà a essere supportato durante la finestra di trasferimento e un periodo di grazia successivo. Ti incoraggiamo ad aggiornare il tuo endpoint quando ti risulti più comodo. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Will my subgraph's curation move with my subgraph? If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my subgraph back to Ethereum mainnet after I transfer? Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Why do I need bridged ETH to complete my transfer? Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Aspetta 20 minuti per la conferma +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curation Signal -## How do I transfer my curation? +### How do I transfer my curation? To transfer your curation, you will need to complete the following steps: @@ -86,41 +202,45 @@ To transfer your curation, you will need to complete the following steps: 2. Specify an L2 Curator address\* -3. Wait 20 minutes for confirmation +3. Aspetta 20 minuti per la conferma \*If necessary - i.e. you are using a contract address. -## How will I know if the subgraph I curated has moved to L2? +### How will I know if the subgraph I curated has moved to L2? When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. -## What if I do not wish to move my curation to L2? +### What if I do not wish to move my curation to L2? When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. -## How do I know my curation successfully transferred? +### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -## Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one subgraph at a time? There is no bulk transfer option at this time. ## Indexer Stake -## How do I transfer my stake to Arbitrum? +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + To transfer your stake, you will need to complete the following steps: 1. Initiate stake transfer on Ethereum mainnet -2. Wait 20 minutes for confirmation +2. Aspetta 20 minuti per la conferma 3. Confirm stake transfer on Arbitrum \*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Will all of my stake transfer? You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. @@ -128,101 +248,57 @@ If you plan on transferring parts of your stake over multiple transactions, you Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. -## How much time do I have to confirm my stake transfer to Arbitrum? +### How much time do I have to confirm my stake transfer to Arbitrum? \*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. -## What if I have open allocations? +### What if I have open allocations? If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. -## How long will it take to transfer my stake? +### How long will it take to transfer my stake? It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. -## Do I have to index on Arbitrum before I transfer my stake? +### Do I have to index on Arbitrum before I transfer my stake? You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. -## Can Delegators move their delegation before I move my indexing stake? +### Can Delegators move their delegation before I move my indexing stake? No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -## Delegation - -## How do I transfer my delegation? - -To transfer your delegation, you will need to complete the following steps: - -1. Initiate delegation transfer on Ethereum mainnet - -2. Wait 20 minutes for confirmation - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? - -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. - -## What if I can't find the Indexer I'm delegating to on L2? - -The L2 transfer tool will automatically detect the Indexer you previously delegated to. - -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? - -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. - -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? - -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? - -It is anticipated that all network participation will move to Arbitrum One in the future. - -## How long does it take to complete the transfer of my delegation to L2? - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -## Is there any delegation tax? - -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## Vesting Contract Transfer -## How do I transfer my vesting contract? +### How do I transfer my vesting contract? To transfer your vesting, you will need to complete the following steps: 1. Initiate the vesting transfer on Ethereum mainnet -2. Wait 20 minutes for confirmation +2. Aspetta 20 minuti per la conferma 3. Confirm vesting transfer on Arbitrum -## How do I transfer my vesting contract if I am only partially vested? +### How do I transfer my vesting contract if I am only partially vested? + + 1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) @@ -232,7 +308,9 @@ To transfer your vesting, you will need to complete the following steps: 4. Withdraw any remaining ETH from the transfer tool contract -## How do I transfer my vesting contract if I am fully vested? +### How do I transfer my vesting contract if I am fully vested? + + For those that are fully vested, the process is similar: @@ -244,7 +322,7 @@ For those that are fully vested, the process is similar: 4. Withdraw any remaining ETH from the transfer tool contract -## Can I transfer my vesting contract to Arbitrum? +### Can I transfer my vesting contract to Arbitrum? You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). @@ -256,27 +334,27 @@ Please note that you will not be able to release/withdraw GRT from the L2 vestin If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -290,7 +368,7 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi 4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet -5. Wait 20 minutes for confirmation +5. Aspetta 20 minuti per la conferma 6. Confirm the balance transfer on L2 @@ -298,18 +376,36 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/it/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/it/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/it/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/it/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/it/billing.mdx b/website/pages/it/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/it/billing.mdx +++ b/website/pages/it/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/it/chain-integration-overview.mdx b/website/pages/it/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/it/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/it/cookbook/arweave.mdx b/website/pages/it/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/it/cookbook/arweave.mdx +++ b/website/pages/it/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/it/cookbook/cosmos.mdx b/website/pages/it/cookbook/cosmos.mdx index ef21e4bc0855..d2c71b409493 100644 --- a/website/pages/it/cookbook/cosmos.mdx +++ b/website/pages/it/cookbook/cosmos.mdx @@ -198,7 +198,7 @@ $ graph build Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: -**Hosted Service** +**Servizio di Hosting** ```bash graph create account/subgraph-name --product hosted-service diff --git a/website/pages/it/cookbook/grafting.mdx b/website/pages/it/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/it/cookbook/grafting.mdx +++ b/website/pages/it/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/it/cookbook/near.mdx b/website/pages/it/cookbook/near.mdx index 879e8e5c15aa..395c5f4dae12 100644 --- a/website/pages/it/cookbook/near.mdx +++ b/website/pages/it/cookbook/near.mdx @@ -193,7 +193,7 @@ $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # u The node configuration will depend on where the subgraph is being deployed. -### Hosted Service +### Servizio di Hosting ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/it/cookbook/upgrading-a-subgraph.mdx b/website/pages/it/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/it/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/it/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/it/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/it/deploying/hosted-service.mdx b/website/pages/it/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/it/deploying/hosted-service.mdx +++ b/website/pages/it/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/it/deploying/subgraph-studio.mdx b/website/pages/it/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/it/deploying/subgraph-studio.mdx +++ b/website/pages/it/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/it/developing/creating-a-subgraph.mdx b/website/pages/it/developing/creating-a-subgraph.mdx index 1fc288833c35..ef17c8f98d49 100644 --- a/website/pages/it/developing/creating-a-subgraph.mdx +++ b/website/pages/it/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,11 +1019,11 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. -### Overview +### Panoramica Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. @@ -1169,7 +1227,7 @@ File data sources currently require ABIs, even though ABIs are not used ([issue] Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. -#### Examples +#### Esempi [Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) diff --git a/website/pages/it/developing/developer-faqs.mdx b/website/pages/it/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/it/developing/developer-faqs.mdx +++ b/website/pages/it/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/it/developing/graph-ts/api.mdx b/website/pages/it/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/it/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/it/developing/graph-ts/common-issues.mdx b/website/pages/it/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/it/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/it/developing/supported-networks.json b/website/pages/it/developing/supported-networks.json index 5e12392b8c7d..fa21cfc48534 100644 --- a/website/pages/it/developing/supported-networks.json +++ b/website/pages/it/developing/supported-networks.json @@ -1,5 +1,5 @@ { - "network": "Network", + "network": "La rete", "cliName": "CLI Name", "chainId": "Chain ID", "studioAndHostedService": "Studio and Hosted Service", diff --git a/website/pages/it/developing/supported-networks.mdx b/website/pages/it/developing/supported-networks.mdx index 3e0779b8a476..08c9df030c11 100644 --- a/website/pages/it/developing/supported-networks.mdx +++ b/website/pages/it/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Reti supportate --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/it/docsearch.json b/website/pages/it/docsearch.json index 8cfff967936d..e52341030c26 100644 --- a/website/pages/it/docsearch.json +++ b/website/pages/it/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Cerca", + "buttonAriaLabel": "Cerca" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", - "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "resetButtonTitle": "Cancella la query", + "resetButtonAriaLabel": "Cancella la query", + "cancelButtonText": "Annulla", + "cancelButtonAriaLabel": "Annulla" }, "startScreen": { - "recentSearchesTitle": "Recent", - "noRecentSearchesText": "No recent searches", - "saveRecentSearchButtonTitle": "Save this search", - "removeRecentSearchButtonTitle": "Remove this search from history", - "favoriteSearchesTitle": "Favorite", - "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + "recentSearchesTitle": "Recenti", + "noRecentSearchesText": "Nessuna ricerca recente", + "saveRecentSearchButtonTitle": "Salva questa ricerca", + "removeRecentSearchButtonTitle": "Rimuovi questa ricerca dalla cronologia", + "favoriteSearchesTitle": "Preferiti", + "removeFavoriteSearchButtonTitle": "Rimuovi questa ricerca dai preferiti" }, "errorScreen": { - "titleText": "Unable to fetch results", - "helpText": "You might want to check your network connection." + "titleText": "Impossibile recuperare i risultati", + "helpText": "Controlla la tua connessione di rete." }, "footer": { - "selectText": "to select", - "selectKeyAriaLabel": "Enter key", - "navigateText": "to navigate", - "navigateUpKeyAriaLabel": "Arrow up", - "navigateDownKeyAriaLabel": "Arrow down", - "closeText": "to close", - "closeKeyAriaLabel": "Escape key", - "searchByText": "Search by" + "selectText": "selezionare", + "selectKeyAriaLabel": "Tasto invio", + "navigateText": "navigare", + "navigateUpKeyAriaLabel": "Freccia su", + "navigateDownKeyAriaLabel": "Freccia giù", + "closeText": "chiudere", + "closeKeyAriaLabel": "Tasto Esci", + "searchByText": "Ricerca per" }, "noResultsScreen": { - "noResultsText": "No results for", - "suggestedQueryText": "Try searching for", - "reportMissingResultsText": "Believe this query should return results?", - "reportMissingResultsLinkText": "Let us know." + "noResultsText": "Nessun risultato per", + "suggestedQueryText": "Prova a cercare per", + "reportMissingResultsText": "Credi che questa query dovrebbe restituire risultati?", + "reportMissingResultsLinkText": "Facci sapere." } } } diff --git a/website/pages/it/firehose.mdx b/website/pages/it/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/it/firehose.mdx +++ b/website/pages/it/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/it/global.json b/website/pages/it/global.json index 6a3eb234bfce..07831b3b64a8 100644 --- a/website/pages/it/global.json +++ b/website/pages/it/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "collapse": "Riduci", + "expand": "Espandi", + "previous": "Precedente", + "next": "Successivo", + "editPage": "Modifica pagina", + "pageSections": "Sezioni della pagina", + "linkToThisSection": "Collegamento a questa sezione", + "technicalLevelRequired": "Livello tecnico richiesto", + "notFoundTitle": "Oops! Questa pagina è andata persa nello spazio...", + "notFoundSubtitle": "Verifica se stai utilizzando l'indirizzo corretto o esplora il nostro sito web cliccando sul link qui sotto.", + "goHome": "Torna alla pagina iniziale", "video": "Video" } diff --git a/website/pages/it/glossary.mdx b/website/pages/it/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/it/glossary.mdx +++ b/website/pages/it/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/it/graphcast.mdx b/website/pages/it/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/it/graphcast.mdx +++ b/website/pages/it/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/it/index.json b/website/pages/it/index.json index 9e28e13d5001..02af4531bba5 100644 --- a/website/pages/it/index.json +++ b/website/pages/it/index.json @@ -1,10 +1,10 @@ { - "title": "Get Started", + "title": "Iniziare", "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Informazioni su The Graph", + "description": "Scopri di più su The Graph" }, "quickStart": { "title": "Quick Start", @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -32,26 +32,26 @@ "description": "Learn about The Graph’s network roles.", "roles": { "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "title": "Sviluppatore", + "description": "Crea un subgraph o utilizza subgraph esistenti in una dapp" }, "indexer": { "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "description": "Gestisci un nodo per indicizzare dati e fornire interrogazioni" }, "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "title": "Curatore", + "description": "Organizza i dati segnalando i subgraph" }, "delegator": { "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "description": "Proteggi la rete delegando GRT agli Indexer" } } }, - "readMore": "Read more", + "readMore": "Leggi di più", "products": { - "title": "Products", + "title": "Prodotti", "products": { "subgraphStudio": { "title": "Subgraph Studio", @@ -59,19 +59,18 @@ }, "graphExplorer": { "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "description": "Esplora subgraph e interagisci con il protocollo" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "Servizio di Hosting", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Reti supportate", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/it/mips-faqs.mdx b/website/pages/it/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/it/mips-faqs.mdx +++ b/website/pages/it/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/it/network/benefits.mdx b/website/pages/it/network/benefits.mdx index 839a0a7b9cf7..482b5fbbcd0d 100644 --- a/website/pages/it/network/benefits.mdx +++ b/website/pages/it/network/benefits.mdx @@ -1,96 +1,97 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graph Network contro Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +La rete decentralizzata di The Graph è stata progettata e perfezionata per creare una solida esperienza di indicizzazione e query, e migliora ogni giorno grazie a migliaia di collaboratori in tutto il mondo. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +I vantaggi di questo protocollo decentralizzato non possono essere replicati eseguendo un `graph-node` a livello locale. The Graph Network è più affidabile, più efficiente e meno costosa. -Here is an analysis: +Ecco l'analisi: -## Why You Should Use The Graph Network +## Perché si dovrebbe usare The Graph Network -- 60-98% lower monthly cost -- $0 infrastructure setup costs -- Superior uptime -- Access to 438 Indexers (and counting) -- 24/7 technical support by global community +- Costo mensile ridotto del 60-98% +- $0 di costi di installazione dell'infrastruttura +- Tempo di attività superiore +- Access to hundreds of independent Indexers around the world +- Assistenza tecnica 24/7 da parte della comunità globale -## The Benefits Explained +## I vantaggi spiegati -### Lower & more Flexible Cost Structure +### Struttura dei costi più bassa e flessibile -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. +Nessun contratto. Nessuna spesa mensile. Si paga solo per le query utilizzate, con un costo medio per query di $0,0002. Le query sono quotate in dollari e pagate in GRT. -Query costs may vary; the quoted cost is the average at time of publication (December 2022). +I costi di query possono variare; il costo indicato è quello medio al momento della pubblicazione (dicembre 2022). -## Low Volume User (less than 30,000 queries per month) +## Utente di basso volume (meno di 30.000 query al mese) -| Cost Comparison | Self Hosted | Graph Network | +| Confronto costi | Self Hosted | The Graph Network | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | ~$15 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | -| Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | ~$15 | - -## Medium Volume User (3,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $0+ | ~$15 al mese | +| Tempo di progettazione | $400 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | 30,000 (autoscaling) | +| Costo per query | $0 | $0.0005 | +| Infrastruttura | Centralizzato | Decentralizzato | +| Ridondanza geografica | $750+ per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $750+ | ~$15 | + +## Utente di medio volume (3,000,000+ di query al mese) + +| Confronto dei costi | Self Hosted | The Graph Network | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $750 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 3,000,000+ | -| Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $750 | - -## High Volume User (30,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Costo mensile del server\* | $350 al mese | $0 | +| Costi di query | $500 al mese | $750 al mese | +| Tempo di progettazione | $800 al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità dell'infrastruttura | 3,000,000+ | +| Costo per query | $0 | $0.00025 | +| Infrastruttura | Centralizzato | Decentralizzato | +| Costi di ingegneria | $200 all'ora | Incluso | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $1,650+ | $750 | + +## Utente di alto volume (3,000,000+ di query al mese) + +| Confronto costi | Self Hosted | The Graph Network | | :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $4,500 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000,000+ | -| Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $4,500 | +| Costo mensile del server\* | $1100 al mese, per nodo | $0 | +| Costi di query | $4000 | $4,500 al mese | +| Numero di nodi necessari | 10 | Non applicabile | +| Tempo di progettazione | $6.000 o più al mese | Nessuno, integrato nella rete con indicizzatori distribuiti a livello globale | +| Query al mese | Limitato alle capacità di infra | 30,000,000+ | +| Costo per query | $0 | $0.00015 | +| Infrastruttura | Centralizzato | Decentralizzato | +| Ridondanza geografica | $1.200 di costi totali per nodo aggiuntivo | Incluso | +| Tempo di attività | Variabile | 99.9%+ | +| Costo totale mensile | $11,000+ | $4,500 | -\*including costs for backup: $50-$100 per month +\*inclusi i costi per il backup: $50-$100 al mese -Engineering time based on $200 per hour assumption + Tempo di progettazione basato su un'ipotesi di $200 all'ora -using the max query budget function in the budget billing tab, while maintaining high quality of service + utilizzando la funzione di budget massimo della query nella scheda di fatturazione del budget, mantenendo un'alta +qualità del servizio -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. +I costi previsti sono solo per i subgraph di Ethereum Mainnet - i costi sono ancora più alti quando si ospita un `graph-node` su altre reti. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +La curation del segnale su un subgraph è opzionale, una tantum, a costo zero (ad esempio, $1.000 in segnale possono essere curati su un subgraph e successivamente ritirati, con un potenziale di guadagno nel processo). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Alcuni utenti potrebbero dover aggiornare il proprio subgraph a una nuova versione. A causa delle tariffe del gas Ethereum, un aggiornamento costa circa $50 dollari al momento della scrittura. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +Si noti che le tariffe del gas su [Arbitrum](/arbitrum/arbitrum-faq) sono sostanzialmente inferiori a quelle di Ethereum mainnet. -## No Setup Costs & Greater Operational Efficiency +## Nessun costo di installazione e maggiore efficienza operativa -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Zero costi di configurazione. Iniziate subito senza costi di configurazione o spese generali. Nessun requisito hardware. Nessuna interruzione a causa dell'infrastruttura centralizzata, e più tempo per concentrarsi sul prodotto principale. Non sono necessari i server di backup, risoluzione dei problemi o risorse ingegneristiche costose. -## Reliability & Resiliency +## Affidabilità e resilienza -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +In conclusione: The Graph Network è meno costosa, più facile da usare e produce risultati superiori rispetto alla gestione locale di `graph-node`. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Iniziate a usare The Graph Network oggi e scoprite come [aggiornare il vostro subgraph alla rete decentralizzata di The Graph](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/it/network/curating.mdx b/website/pages/it/network/curating.mdx index 797d9b9dd896..4711c0475bfb 100644 --- a/website/pages/it/network/curating.mdx +++ b/website/pages/it/network/curating.mdx @@ -2,95 +2,95 @@ title: Curating --- -Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. +I Curator sono fondamentali per l'economia decentralizzata di The Graph. Usano la loro conoscenza dell'ecosistema web3 per valutare e segnalare i subgraph che dovrebbero essere indicizzati da The Graph Network. Attraverso l'Explorer, i curatori possono visualizzare i dati della rete per prendere decisioni di segnalazione. La rete di The Graph ricompensa i curatori che segnalano sui subgraph di buona qualità con una quota delle tariffe di query generate dai subgraph. I Curator sono economicamente incentivati a segnalare tempestivamente. Le indicazioni dei Curator sono importanti per gli Indexer, che possono elaborare o indicizzare i dati di questi subgraph segnalati. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +Quando segnalano, i curator possono decidere di segnalare su una versione specifica del subgraph o di segnalare usando l'auto-migrazione. Quando si segnala usando l'auto-migrazione, la quota di un Curator sara sempre migrata all'ultima versione pubblicata dallo sviluppatore. Se invece si decide di segnalare su una versione specifica, la quota rimarra sempre su questa versione specifica. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Ricordate che la curation è rischiosa. Si prega di fare attenzione ad assicurarsi di curare i subgraph di cui ci si fida. La creazione di un subgraph è priva di autorizzazioni, quindi le persone possono creare subgraph e chiamarli con il nome che preferiscono. Per maggiori informazioni sui rischi della curation, consultare [La Guida alla curation di The Graph Academy.](https://thegraph.academy/curators/) ## Bonding Curve 101 -First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. +Per prima cosa, facciamo un passo indietro. Ogni subgraph ha una curva di legame sulla quale vengono coniate le quote di curation quando un utente aggiunge un segnale **alla** curva. La curva di legame di ogni subgraph è unica. Le curve di legame sono strutturate in modo che il prezzo per coniare una quota di curation su un subgraph aumenti linearmente rispetto al numero di quote coniate. -![Price per shares](/img/price-per-share.png) +![Prezzo per quote di partecipazione](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +Di conseguenza, il prezzo aumenta linearmente, il che significa che l'acquisto di una quota diventerà più costoso nel tempo. Ecco un esempio di ciò che intendiamo, vedi la curva di legame qui sotto: -![Bonding curve](/img/bonding-curve.png) +![Curva di legame](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Si consideri che abbiamo due curation che coniano quote di partecipazione per un subgraph: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. -- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. +- Il curator A è il primo a segnalare il subgraph. Aggiungendo 120,000 GRT alla curva, riesce a coniarne 2000 quote di partecipazione. +- Il segnale del Curator B si trova sul subgraph in un momento successivo. Per ricevere la stessa quantità di quote di partecipazione come Curator A, dovrebbe aggiungere 360.000 GRT nella curva. +- Dal momento che entrambi i curator detengono la metà del totale delle quote di curation, riceveranno una quantità uguale di royalties di curation. +- Se uno qualsiasi dei curator bruciasse ora le sue 2000 quote di curation, riceverebbe 360.000 GRT. +- Il curator rimanente riceverebbe ora tutte le royalties di curation per quel subgraph. Se dovessero bruciare le loro quote per ritirare GRT, riceverebbero 120.000 GRT. +- **TLDR:** La valutazione del GRT delle quote di curation è determinata dalla curva di legame e può essere volatile. È possibile subire grosse perdite. Segnalare in anticipo significa investire meno GRT per ogni quota di partecipazione. Per estensione, ciò significa che si guadagnano più royalties di curation per GRT rispetto ai curator successivi per lo stesso subgraph. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +In generale, una curva di legame è una curva matematica che definisce la relazione tra l'offerta di token e il prezzo dell'asset. Nel caso specifico della subgraph curation, **il prezzo di ogni quota di partecipazione del subgraph aumenta con ogni token investito** e il **prezzo di ogni quota di partecipazione diminuisce con ogni token venduto.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +Nel caso di The Graph, [l'implementazione da parte di Bancor della formula della curva di legame](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) viene sfruttata. -## How to Signal +## Come segnalare -Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) +Dopo aver trattato le nozioni fondamentali sul funzionamento della curva di legame, ecco come procedere per segnalare su un subgraph. All'interno della scheda di Curator su the Graph Explorer, i curator saranno in grado di segnale e non segnale su certi subgraph in base alle statistiche di rete. Per una panoramica passo-dopo-passo su come effettuare questa operazione su Explorer, [cliccare qui.](/network/explorer) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Un curator può scegliere di segnalare su una versione specifica del subgraph, oppure può scegliere di far migrare automaticamente il proprio segnale alla versione di produzione più recente di quel subgraph. Entrambe le strategie sono valide e hanno i loro pro e contro. -Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +La segnalazione di una versione specifica è particolarmente utile quando un subgraph è utilizzato da multipli dApp. Una dApp potrebbe aver bisogno di aggiornare regolarmente il subgraph con nuove funzionalità. Un'altra dApp potrebbe preferire l'uso di una versione più vecchia e ben testata del subgraph. Al momento della curation iniziale, viene applicata una tassa standard del 1%. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +La migrazione automatica del segnale alla più recente versione di produzione può essere utile per garantire l'accumulo di tariffe di query. Ogni volta che si effettua una curation, si paga una tassa di curation del 1%. Si pagherà anche una tassa di curation del 0,5% per ogni migrazione. Gli sviluppatori di subgraph sono scoraggiati dal pubblicare frequentemente nuove versioni: devono pagare una tassa di curation del 0,5% su tutte le quote di curation auto-migrate. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Nota**: Il primo indirizzo che segnala un particolare subgraph è considerato il primo curator e dovrà svolgere un lavoro molto più intenso di gas rispetto agli altri curator successivi, perché il primo curator inizializza i token della quota di curation, inizializza la b e trasferisce anche i token nel Graph proxy. -## What does Signaling mean for The Graph Network? +## Cosa significa segnalazione per The Graph Network? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +Per i consumatori finali per poter interrogare un subgraph, il subgraph deve essere prima indicizzato. L'indicizzazione è un processo in cui i file, i dati e i metadati vengono esaminati, catalogati e quindi indicizzati, in modo che i risultati possono essere trovati più rapidamente. Per i dati di un subgraph per essere ricercabili, loro devono essere organizzati. -And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. +Quindi, se gli indicizzatori dovessero indovinare quali subgraph indicizzare, ci sarebbe una bassa possibilità di guadagnare robuste tariffe per query, perché non avrebbero modo di convalidare quali subgraph sono di buona qualità. Entra in gioco la curation. -Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! +I curator rendono efficiente The Graph Network e la segnalazione è il processo che i curator utilizzano per far sapere agli Indexer che un subgraph è buono da indicizzare, dove GRT viene aggiunto a una curva di legame per un subgraph. Gli Indexer possono intrinsecamente fidarsi del segnale di un curator perché al momento della segnalazione i curator coniano una quota di curation per il subgraph, che dà loro diritto a una parte delle future tariffe per query che il subgraph genera. Il segnale del curator è rappresentato come token ERC20 chiamati Graph Curation Shares (GCS). I curator che vogliono guadagnare più commissioni per query devono segnalare il loro GRT ai subgraph che prevedono genereranno un forte flusso di commissioni per la rete. I curator non possono essere tagliati fuori per il loro cattivo comportamento, ma c'è una tassa di deposito sui curator per disincentivare le decisioni sbagliate che potrebbero danneggiare l'integrità della rete. I curator guadagnano anche meno commissioni per query se scelgono di curare un subgraph di bassa qualità, poiché ci saranno meno query da elaborare o meno Indexer che le elaborano. Vedere il diagramma seguente! -![Signaling diagram](/img/curator-signaling.png) +![Diagramma di segnalazione](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +Gli Indexer possono trovare i subgraph da indicizzare in base ai segnali di curation che vedono in The Graph Explorer (screenshot di seguito). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Subgraph Explorer](/img/explorer-subgraphs.png) -## Risks +## Rischi -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. -3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. Il mercato delle query è intrinsecamente giovane per The Graph e c'è il rischio che la vostra %APY possa essere inferiore a quella prevista a causa delle dinamiche di mercato nascenti. +2. Costo di curation - quando un Curator segnala il GRT su un subgraph, incorre in una tassa di curation di 1%. Questa tassa viene bruciata e il resto viene depositato nella riserva della curva di legame. +3. Quando i curator bruciano le loro quote di partecipazione per ritirare il GRT, la valutazione del GRT delle quote rimanenti sarà ridotta. Si tenga presente che in alcuni casi i curator possono decidere di bruciare le loro quote di partecipazione **tutte in una volta**. Questa situazione può essere comune se lo sviluppatore di una dApp smette di modificare/migliorare e fare il query di proprio subgraph o se un subgraph fallisce. Di conseguenza, i curator rimanenti potrebbero essere in grado di ritirare solo una frazione del loro GRT iniziale. Per un ruolo di rete con un profilo di rischio inferiore, vedere [Delegator](/network/delegating). +4. Un subgraph può fallire a causa di un bug. Un subgraph fallito non matura commissioni della query. Di conseguenza, si dovrà attendere che lo sviluppatore risolva il bug e distribuisca una nuova versione. + - Se siete iscritti alla versione più recente di un subgraph, le vostre quote di partecipazione migreranno automaticamente a quella nuova versione. Questo comporta una tassa di curation di 0,5%. + - Se la segnalazione su una versione specifica del subgraph non va a buon fine, è necessario bruciare manualmente le quote di curation. Si noti che si possono ricevere più o meno GRT di quelli inizialmente depositati nella curva di curation, un rischio associato all'essere un curator. È quindi possibile segnalare su la nuova versione del subgraph, incorrendo così in una tassa di curation di 1%. -## Curation FAQs +## FAQ sulla curation -### 1. What % of query fees do Curators earn? +### 1. Quale % delle tariffe di query guadagnano i curator? -By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Segnalando un subgraph, si guadagnerà una quota di tutte le tariffe di query generate da questo subgraph. Il 10% di tutte le tariffe di query va ai curator in proporzione alle loro quote di curation. Questo 10% è soggetto a governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Come si fa a decidere quali subgraph sono di alta qualità da segnalare? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Trovare subgraph di alta qualità è un compito complesso, ma può essere affrontato in molti modi diversi. Come Curator, si desidera cercare subgraph affidabili che generano un volume di query. Un subgraph affidabile può essere utile se è completo, accurato e supporta le esigenze di dati di una dApp. Un subgraph mal progettato potrebbe dover essere rivisto o ripubblicato e potrebbe anche finire per fallire. È fondamentale che i Curator rivedano l'architettura o il codice di un subgraph per valutarne il valore. Di conseguenza: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- I curator possono utilizzare la loro comprensione di una rete per cercare di prevedere come un singolo subgraph possa generare un volume di query più o meno elevato in futuro +- I Curator dovrebbero anche comprendere le metriche disponibili attraverso The Graph Explorer. Metriche come il volume delle query passate e l'identità dello sviluppatore del subgraph possono aiutare a determinare se vale la pena segnalare un subgraph. -### 3. What’s the cost of updating a subgraph? +### 3. Qual è il costo dell'aggiornamento di un subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +La migrazione delle quote di curation a una nuova versione del subgraph comporta una tassa di curation di 1%. I curator possono scegliere di abbonarsi alla versione più recente di un subgraph. Quando le quote di curation vengono auto-migrate a una nuova versione, i curator pagheranno anche metà della tassa di curation, cioè 0,5%, perché l'aggiornamento dei subgraph è un'azione sulla blockchain che costa gas. -### 4. How often can I update my subgraph? +### 4. Con quale frequenza posso aggiornare il mio subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Si suggerisce di non aggiornare i subgraph troppo frequentemente. Si veda la domanda precedente per maggiori dettagli. -### 5. Can I sell my curation shares? +### 5. Posso vendere le mie quote di curation? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. +Le quote di curation non possono essere "comprate" o "vendute" come gli altri token ERC20 che conoscete. Possono solo essere coniate (create) o bruciate (distrutte) lungo la bonding curve per un subgraph particolare. La quantità di GRT necessaria per coniare un nuovo segnale e la quantità di GRT che si riceve quando si brucia il segnale esistente sono determinate da quella della bonding curve. Come Curator, dovete sapere che quando bruciate le vostre quote di curation per prelevare GRT, potete ritrovarvi con più o meno GRT di quelli depositati inizialmente. -Still confused? Check out our Curation video guide below: +Ancora confusi? Date un'occhiata alla nostra video-guida sulla Curation: diff --git a/website/pages/it/network/delegating.mdx b/website/pages/it/network/delegating.mdx index 4a6d6e00b73e..344420fccebd 100644 --- a/website/pages/it/network/delegating.mdx +++ b/website/pages/it/network/delegating.mdx @@ -1,98 +1,98 @@ --- -title: Delegating +title: Delegazione --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +I delegator sono partecipanti alla rete che delegano (cioè "fanno stake") i GRT a uno o più Indexer. I delegator contribuiscono alla sicurezza della rete senza gestire direttamente un Graph Node. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +Delegando a un Indexer, i delegator guadagnano una parte delle tariffe di query e dei premi dell'Indexer. La quantità di query che un Indexer può elaborare dipende dalla quota di partecipazione dell'Indexer stesso (e di quella delegata) e dal prezzo che l'Indexer applica per ogni query; pertanto, maggiore è la quota di partecipazione assegnata a un Indexer, maggiore è il numero di query potenziali che può elaborare. -## Delegator Guide +## Guida per i delegator -This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: +Questa guida spiega come essere un delegator efficace su The Graph Network. I delegator condividono i guadagni del protocollo con tutti gli Indexer in base alla loro quota di partecipazione delegata. Un delegator deve usare il proprio giudizio per scegliere gli Indexer in base a diversi fattori. Si prega di notare che la presente guida non illustra passi come la corretta impostazione di Metamask, in quanto questi informazioni sono ampiamente disponibili su Internet. Questa guida comprende tre sezioni: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator -- A video guide showing the steps to delegate in the Graph Network UI +- I rischi della delegazione dei token in The Graph Network +- Come calcolare i rendimenti previsti come Delegator +- Una guida video che mostra i passi per delegare nel UI di The Graph Network -## Delegation Risks +## Rischi di delegazione -Listed below are the main risks of being a Delegator in the protocol. +Di seguito sono elencati i principali rischi essere il Delegator nel protocollo. -### The delegation tax +### La tassa di delegazione -Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. +I delegator non possono essere penalizzati per un comportamento scorretto, ma c'è una tassa sui delegator per disincentivare un processo decisionale insufficiente che potrebbero danneggiare l'integrità della rete. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +È importante capire che ogni volta che delegate, vi verrà addebitato il 0,5%. Ciò significa che se delegate 1000 GRT, brucerete automaticamente 5 GRT. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Ciò significa che, per sicurezza, un Delegator dovrebbe calcolare quale sarà il suo rendimento delegando a un Indexer. Ad esempio, un Delegator potrebbe calcolare quanti giorni ci vorranno prima di aver recuperato il 0,5% di tasse sulla sua delega. -### The delegation unbonding period +### Il periodo di sblocco di delegazione -Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. +Ogni volta che un Delegator vuole riottenere i suoi token, questi sono soggetti a un periodo di sblocco di 28 giorni. Ciò significa che non può trasferire i token o guadagnare ricompense per 28 giorni. -One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. +Una cosa da considerare è anche la scelta oculata di un Indexer. Se scegliete un Indexer che non è affidabile o che non sta facendo un buon lavoro, vorrete annullare la delega, il che significa che perderete molte opportunità di guadagnare ricompense, il che può essere altrettanto negativo che bruciare GRT.
- ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + ![Sblocco di Delegator](/img/Delegation-Unbonding.png) _Nota la commissione del 0,5% nel UI della delegazione, così + come il periodo di sblocco di 28 giorni. periodo di sblocco._
-### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Scegliere un Indexer affidabile con una giusta ricompensa per i Delegator -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Questa è una parte importante da capire. Per prima cosa discutiamo di tre valori molto importanti, che sono i parametri di delegazione. -Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. +Taglio della ricompensa di indicizzazione - Il taglio della ricompensa di indicizzazione è la parte delle ricompense che l'Indexer terrà per sé. Ciò significa che se è impostato al 100%, come delegator riceverete 0 ricompense di indicizzazione. Se nel UI vedete 80%, significa che come Delegator riceverete il 20%. Una nota importante: all'inizio della rete, le ricompense di indicizzazione rappresentano la maggior parte delle ricompense.
- ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Taglio delle ricompense dell'indicizzazione](/img/Indexing-Reward-Cut.png) *Il top Indexer sta dando ai Delegator il + 90% delle ricompense. Il centrale dà ai Delegator il 20%. Quello in basso dà ai Delegator ~83%.*
-- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Taglio delle tariffe di query - Funziona esattamente come il Taglio della ricompensa di indicizzazione. Tuttavia, riguarda specificamente i rendimenti delle tariffe di query raccolte dal Indexer. Va notato che all'inizio della rete, i rendimenti delle tariffe di query saranno molto ridotti rispetto alla ricompensa di indicizzazione. Si raccomanda di prestare attenzione alla rete per determinare quando le tariffe di query nella rete inizieranno a essere più significative. -As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. +Come si può notare, la scelta dell'Indexer giusto richiede una grande attenzione. Per questo motivo vi consigliamo vivamente di esplorare The Graph Discord per determinare quali sono gli Indexer con la migliore reputazione sociale e tecnica, in grado di premiare i delegator in modo coerente. Molti degli Indexer sono attivi su Discord e saranno felici di rispondere alle vostre domande. Molti di loro fanno indicizzazione da mesi nella testnet e fanno del loro meglio per aiutare i delegator a ottenere un buon ritorno, in quanto ciò migliora la salute e il successo della rete. -### Calculating Delegators expected return +### Calcolo del rendimento previsto dei Delegator -A Delegator has to consider a lot of factors when determining the return. These include: +Il Delegator deve considerare molti fattori per determinare il rendimento. Questi includono: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- Un Delegator tecnico può anche esaminare la capacità dell' Indexer di utilizzare i token delegati a sua disposizione. Se un Indexer non sta allocando tutti i token disponibili, non sta guadagnando il massimo profitto che potrebbe ottenere per sé o per i suoi Delegator. +- Attualmente nella rete un Indexer può scegliere di chiudere un'allocazione e raccogliere le ricompense in qualsiasi momento tra 1 e 28 giorni. È quindi possibile che un Indexer abbia molte ricompense che non ha ancora raccolto e che quindi le sue ricompense totali siano basse. Questo aspetto deve essere preso in considerazione nei primi giorni. -### Considering the query fee cut and indexing fee cut +### Considerando la riduzione delle tariffe di query e la riduzione delle tariffe di indicizzazione -As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: +Come descritto nelle sezioni precedenti, è necessario scegliere un Indexer che sia trasparente e onesto nell'impostare il taglio delle tariffe di query e tagli delle tariffe di indicizzazione. Il Delegator dovrebbe anche controllare il tempo di Cooldown dei Parametri per vedere quanto tempo di riserva ha a disposizione. Una volta fatto questo, è abbastanza semplice calcolare la quantità delle ricompense che i Delegator ricevono. La formula è: -![Delegation Image 3](/img/Delegation-Reward-Formula.png) +![Immagine delegator 3](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### Considerando il delegation pool del Indexer -Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: +Un altro aspetto che un Delegator deve considerare è il proporzione del Delegation Pool che possiede. Tutte le ricompense della delega sono condivise in modo uniforme, con un semplice ribilanciamento del pool determinato dall'importo che il Delegator ha depositato nel pool. In questo modo il Delegator ha una quota del pool: -![Share formula](/img/Share-Forumla.png) +![Formula di condivisione](/img/Share-Forumla.png) -Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. +Utilizzando questa formula, possiamo vedere che è possibile che un Indexer che offre solo il 20% ai Delegator, in realtà stia dando ai Delegator una ricompensa ancora migliore di un Indexer che offre il 90% ai Delegator. -A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. +Un Delegator può quindi fare i conti per determinare che l'Indexer che offre il 20% ai Delegatori offre un rendimento migliore. -### Considering the delegation capacity +### Considerando la capacità di delegazione -Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. +Un altro aspetto da considerare è la capacità di Delegator. Attualmente, il Delegation Ratio è impostato su 16. Ciò significa che se un Indexer ha fatto un stake di 1,000,000 GRT, la sua Delegation Capacity è di 16,000,000 di GRT di token delegati che può utilizzare nel protocollo. Tutti i token delegati che superano questa quantità diluiranno tutte le ricompense dei Delegator. -Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. +Immaginiamo che un Indexer abbia 100,000,000 di GRT delegati e che la sua capacità sia solo di 16,000,000 di GRT. Ciò significa che 84,000,000 di token GRT non vengono utilizzati per guadagnare token. E tutti i Delegator e l'Indexer stanno guadagnando meno ricompense di quanto potrebbero. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Perciò un delegator deve sempre considerare la Delegation Capacity di un Indexer e tenerla in considerazione nel processo decisionale. -## Delegator FAQs and Bugs +## FAQ e bug dei Delegator -### MetaMask "Pending Transaction" Bug +### MetaMask "Transazione in sospeso" Bug -**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** +**Quando provo a delegare la mia transazione in MetaMask appare come "In attesa" o "In coda" per un periodo più lungo del previsto. Cosa devo fare?** -At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. +A volte, i tentativi di delegare agli Indexer tramite MetaMask possono fallire e risultare in prolungati periodi di tentativi di transazione "In attesa" o "In coda". Ad esempio, un utente può tentare di delegare con una tariffa del gas insufficiente rispetto ai prezzi correnti, con il risultato che il tentativo di transazione viene visualizzato come "In sospeso" nel suo portafoglio MetaMask per oltre 15 minuti. Quando si verifica questa situazione, l'utente può tentare di effettuare transazioni successive, ma queste non saranno elaborate finché la transazione iniziale non sarà stata estratta, poiché le transazioni per un indirizzo devono essere elaborate in ordine. In questi casi, le transazioni possono essere annullate in MetaMask, ma i tentativi di transazione accumuleranno tariffe del gas senza alcuna garanzia che i tentativi successivi vadano a buon fine. Una soluzione più semplice a questo bug è il riavvio del browser (ad esempio, utilizzando "abort:restart" nel address bar), che annullerà tutti i tentativi precedenti senza che il gas venga sottratto dal portafoglio. Diversi utenti che hanno riscontrato questo problema hanno riportato transazioni riuscite dopo aver riavviato il browser e aver tentato di delegare. -## Video guide for the network UI +## Guida video per UI della rete -This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. +Questa guida fornisce una revisione completa del documento e spiega come tenere conto di tutti i suoi elementi durante l'interazione con UI. diff --git a/website/pages/it/network/developing.mdx b/website/pages/it/network/developing.mdx index 9c543348259d..1db9ab61061d 100644 --- a/website/pages/it/network/developing.mdx +++ b/website/pages/it/network/developing.mdx @@ -1,53 +1,53 @@ --- -title: Developing +title: Sviluppo --- -Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. +Gli sviluppatori sono il lato della domanda di The Graph Ecosystem. Gli sviluppatori costruiscono subgraph e li pubblicano su The Graph Network. Quindi, fanno query sui subgraph in tempo reale con GraphQL per alimentare le loro applicazioni. -## Subgraph Lifecycle +## Ciclo di vita dei subgraph -Subgraphs deployed to the network have a defined lifecycle. +I subgraph distribuiti nella rete hanno un ciclo di vita definito. -### Build locally +### Costruire a livello locale -As with all subgraph development, it starts with local development and testing. Developers can use the same local setup whether they are building for The Graph Network, the hosted service or a local Graph Node, leveraging `graph-cli` and `graph-ts` to build their subgraph. Developers are encouraged to use tools such as [Matchstick](https://github.com/LimeChain/matchstick) for unit testing to improve the robustness of their subgraphs. +Come per lo sviluppo di tutti i subgraph, si inizia con lo sviluppo e il test in locale. Gli sviluppatori possono usare la stessa configurazione locale sia che stiano costruendo per The Graph Network, per il hosted service o per un Graph Node locale, sfruttando `graph-cli` and `graph-ts` per costruire il loro subgraph. Gli sviluppatori sono incoraggiati a usare strumenti come [Matchstick](https://github.com/LimeChain/matchstick) per i test unitari, per migliorare la solidità dei loro subgraph. -> There are certain constraints on The Graph Network, in terms of feature and network support. Only subgraphs on [supported networks](/developing/supported-networks) will earn indexing rewards, and subgraphs which fetch data from IPFS are also not eligible. +> Ci sono alcuni vincoli su The Graph Network, in termini di funzionalità e supporto di rete. Solo i subgraph su [reti supportate](/developing/supported-networks) otterranno ricompense per l'indicizzazione e i subgraph che recuperano dati da IPFS non sono ammissibili. -### Deploy to the Subgraph Studio +### Distribuzione nel Subgraph Studio -Once defined, the subgraph can be built and deployed to the [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). The Subgraph Studio is a sandbox environment which will index the deployed subgraph and make it available for rate-limited development and testing. This gives developers an opportunity to verify that their subgraph does not encounter any indexing errors, and works as expected. +Una volta definito, il subgraph può essere costruito e distribuito nel [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). Subgraph Studio è un ambiente sandbox che indicizza il subgraph distribuito e lo rende disponibile per lo sviluppo e il test a velocità limitata. In questo modo gli sviluppatori hanno la possibilità di verificare che il loro subgraph non presenti errori di indicizzazione e che funzioni come previsto. -### Publish to the Network +### Pubblicare nella rete -When the developer is happy with their subgraph, they can publish it to The Graph Network. This is an on-chain action, which registers the subgraph so that it is discoverable by Indexers. Published subgraphs have a corresponding NFT, which is then easily transferable. The published subgraph has associated metadata, which provides other network participants with useful context and information. +Quando lo sviluppatore è soddisfatto del suo subgraph, può pubblicarlo su The Graph Network. Si tratta di un'azione on-chain, che registra il subgraph in modo che possa essere scoperto dagli Indexer. I subgraph pubblicati hanno un NFT corrispondente, che è poi facilmente trasferibile. Il subgraph pubblicato ha metadati associati, che forniscono agli altri partecipanti alla rete un contesto e informazioni utili. -### Signal to Encourage Indexing +### Segnale per incoraggiare l'indicizzazione -Published subgraphs are unlikely to be picked up by Indexers without the addition of signal. Signal is locked GRT associated with a given subgraph, which indicates to Indexers that a given subgraph will receive query volume, and also contributes to the indexing rewards available for processing it. Subgraph developers will generally add signal to their subgraph, in order to encourage indexing. Third party Curators may also signal on a given subgraph, if they deem the subgraph likely to drive query volume. +È improbabile che i subgraph pubblicati vengano raccolti dagli Indexer senza l'aggiunta di un segnale. Il segnale è un GRT bloccato associato a un determinato subgraph, che indica agli Indexer che un dato subgraph riceverà un volume di query, inoltre contribuisce anche ai premi di indicizzazione disponibili per la sua elaborazione. Gli sviluppatori di subgraph aggiungono generalmente un segnale al loro subgraph, per incoraggiarne l'indicizzazione. Anche i Curator di terze parti possono aggiungere un segnale a un determinato subgraph, se ritengono che il subgraph possa generare un volume di query. -### Querying & Application Development +### Query e sviluppo di applicazioni -Once a subgraph has been processed by Indexers and is available for querying, developers can start to use the subgraph in their applications. Developers query subgraphs via a gateway, which forwards their queries to an Indexer who has processed the subgraph, paying query fees in GRT. +Una volta che un subgraph è stato elaborato dagli Indexer ed è disponibile per fare query, gli sviluppatori possono iniziare a utilizzare il subgraph nelle loro applicazioni. Gli sviluppatori fanno query di subgraph tramite un gateway, che inoltra le loro queries a un Indexer che ha elaborato il subgraph, pagando le tariffe di query in GRT. -In order to make queries, developers must generate an API key, which can be done in the Subgraph Studio. This API key must be funded with GRT, in order to pay query fees. Developers can set a maximum query fee, in order to control their costs, and limit their API key to a given subgraph or origin domain. The Subgraph Studio provides developers with data on their API key usage over time. +Per effettuare le query, gli sviluppatori devono generare una chiave API, che può essere creata nel Subgraph Studio. Questa chiave API deve essere finanziata con GRT, per pagare le tariffe di query. Gli sviluppatori possono impostare una tariffa massima di query, per controllare i costi, e limitare la loro chiave API a un determinato subgraph o dominio di origine. Subgraph Studio fornisce agli sviluppatori dati sull'utilizzo della chiave API nel tempo. -Developers are also able to express an Indexer preference to the gateway, for example preferring Indexers whose query response is faster, or whose data is most up to date. These controls are set in the Subgraph Studio. +Gli sviluppatori possono anche esprimere una preferenza per gli Indexer al gateway, ad esempio preferendo gli Indexer la cui risposta alle query è più veloce o i cui dati sono più aggiornati. Questi controlli vengono impostati nel Subgraph Studio. -### Updating Subgraphs +### Aggiornare i subgraph -After a time a subgraph developer may want to update their subgraph, perhaps fixing a bug or adding new functionality. The subgraph developer may deploy new version(s) of their subgraph to the Subgraph Studio for rate-limited development and testing. +Dopo un certo periodo di tempo, uno sviluppatore di subgraph potrebbe voler aggiornare il proprio subgraph, magari correggendo un bug o aggiungendo nuove funzionalità. Lo sviluppatore del subgraph può distribuire le nuove versioni del suo subgraph nel Subgraph Studio per lo sviluppo e il test a velocità limitata. -Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +Una volta che lo sviluppatore del sottografo è pronto per l'aggiornamento, può avviare una transazione per puntare il suo subgraph alla nuova versione. L'aggiornamento del subgraph migra qualsiasi segnale alla nuova versione (supponendo che l'utente che ha applicato il segnale abbia selezionato "auto-migrate"), il che comporta anche una tassa di migrazione. La migrazione del segnale dovrebbe indurre gli Indexer a iniziare l'indicizzazione della nuova versione del subgraph, che dovrebbe quindi diventare presto disponibile per le query. -### Deprecating Subgraphs +### Deprecazione dei Subgraph -At some point a developer may decide that they no longer need a published subgraph. At that point they may deprecate the subgraph, which returns any signalled GRT to the Curators. +A un certo punto uno sviluppatore può decidere di non aver più bisogno di un subgraph pubblicato. A quel punto può deprecare il subgraph, restituendo ai Curator ogni GRT segnalato. -### Diverse Developer Roles +### Diversi Ruoli dello Sviluppatore -Some developers will engage with the full subgraph lifecycle on the network, publishing, querying and iterating on their own subgraphs. Some may be focused on subgraph development, building open APIs which others can build on. Some may be application focused, querying subgraphs deployed by others. +Alcuni sviluppatori si occuperanno dell'intero ciclo di vita dei subgraph sulla rete, pubblicando, facendo query e iterando i propri subgraph. Alcuni si concentreranno sullo sviluppo di subgraph, costruendo API aperte su cui altri potranno basarsi. Alcuni possono concentrarsi sulle applicazioni, interrogando i subgraph distribuiti da altri. -### Developers and Network Economics +### Sviluppatori ed economia di rete -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. +Gli sviluppatori sono un attore economico fondamentale nella rete, in quanto bloccano i GRT per incoraggiare l'indicizzazione e, soprattutto, le query dei subgraph, che rappresenta il principale scambio di valore della rete. Anche gli sviluppatori di subgraph bruciano GRT ogni volta che un subgraph viene aggiornato. diff --git a/website/pages/it/network/explorer.mdx b/website/pages/it/network/explorer.mdx index b3a549900b83..991f2e4836ef 100644 --- a/website/pages/it/network/explorer.mdx +++ b/website/pages/it/network/explorer.mdx @@ -2,202 +2,202 @@ title: Graph Explorer --- -Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): +Benvenuti nel Graph Explorer, o come ci piace chiamarlo, il vostro portale decentralizzato nel mondo dei subgraph e dei dati di rete. 👩🏽‍🚀 Il Graph Explorer è composto da più parti in cui è possibile interagire con altri sviluppatori di subgraph, sviluppatori di dapp, Curator, Indexer e Delegator. Per una panoramica generale del Graph Explorer, guardate il video qui sotto (o continuate a leggere): -## Subgraphs +## Subgraph -First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. +Prima di tutto, se avete appena finito di distribuire e pubblicare il vostro subgraph in Subgraph Studio, il tab Subgraph in cima alla barra di navigazione è il posto giusto per visualizzare i vostri subgraph finiti (e i subgraph di altri) sulla rete decentralizzata. Qui è possibile trovare il subgraph esatto che si sta cercando in base alla data di creazione, alla quantità di segnale o al nome. ![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) -When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. +Quando si fa clic su un subgraph, è possibile testare le query nel playground e sfruttare i dettagli della rete per prendere decisioni informate. Sarà inoltre possibile segnalare il GRT sul proprio subgraph o su quello di altri per far capire agli Indexer la sua importanza e qualità. Questo è fondamentale perché la segnalazione di un subgraph ne incentiva l'indicizzazione, il che significa che verrà fuori dalla rete per servire le query. ![Explorer Image 2](/img/Subgraph-Details.png) -On each subgraph’s dedicated page, several details are surfaced. These include: +Nella pagina dedicata a ciascun subgraph vengono visualizzati diversi dettagli. Questi includono: -- Signal/Un-signal on subgraphs -- View more details such as charts, current deployment ID, and other metadata -- Switch versions to explore past iterations of the subgraph -- Query subgraphs via GraphQL -- Test subgraphs in the playground -- View the Indexers that are indexing on a certain subgraph -- Subgraph stats (allocations, Curators, etc) -- View the entity who published the subgraph +- Segnala/non segnala i subgraph +- Visualizza ulteriori dettagli, come grafici, ID di distribuzione corrente e altri metadati +- Cambia versione per esplorare le iterazioni passate del subgraph +- Consulta i subgraph tramite GraphQL +- Test dei subgraph nel playground +- Visualizza gli Indexer che stanno indicizzando su un determinato subgraph +- Statistiche del subgraph (allocazione, Curator, ecc.) +- Visualizza l'entità che ha pubblicato il subgraph ![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) -## Participants +## Partecipanti -Within this tab, you’ll get a bird’s eye view of all the people that are participating in the network activities, such as Indexers, Delegators, and Curators. Below, we’ll go into an in-depth review of what each tab means for you. +In questo tab è possibile avere una vista dall'alto di tutte le persone che partecipano alle attività della rete, come Indexer, Delegator e Curator. Di seguito, esamineremo in modo approfondito il significato di ogni tab per voi. -### 1. Indexers +### 1. Indexer ![Explorer Image 4](/img/Indexer-Pane.png) -Let’s start with the Indexers. Indexers are the backbone of the protocol, being the ones that stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. In the Indexers table, you’ll be able to see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made off of query fees and indexing rewards. Deep dives below: +Cominciamo con gli Indexer. Gli Indexer sono la spina dorsale del protocollo, in quanto sono quelli che puntano sui subgraph, li indicizzano e servono le query a chiunque consumi i subgraph. Nella tabella degli Indexer, è possibile vedere i parametri di delega di un Indexer, la sua stake, quanto ha fatto il stake su ogni subgraph e quanto ha guadagnato con le tariffe di query e le ricompense per l'indicizzazione. Approfondimenti di seguito: -- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators -- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards -- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters -- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior -- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed -- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing -- Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated -- Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. -- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time -- Indexer Rewards - this is the total indexer rewards earned by the Indexer and their Delegators over all time. Indexer rewards are paid through GRT issuance. +- Query Fee Cut - la percentuale dei rimborsi delle tariffe di query che l'Indexer trattiene quando divide con i Delegator +- Effective Reward Cut - il taglio della ricompensa di indicizzazione applicato al pool di delegator. Se è negativo, significa che l'Indexer sta cedendo parte delle sue ricompense. Se è positivo, significa che l'Indexer sta conservando una parte delle sue ricompense +- Cooldown Remaining - il tempo rimanente prima che l'Indexer possa modificare i parametri di delega di cui sopra. I periodi di Cooldown sono impostati dagli Indexer quando aggiornano i loro parametri di delegazione +- Owned - Si tratta del stake depositato dall'Indexer, che può essere ridotto in caso di comportamento dannoso o scorretto +- Delegated - Stake del Delegator che può essere allocato dall'Indexer, ma non può essere tagliato +- Allocated - Lo Stake che gli Indexer stanno attivamente allocando verso i subgraph che stanno indicizzando +- Available Delegation Capacity - la quantità di stake delegato che gli Indexer possono ancora ricevere prima di diventare sovra-delegati +- Max Delegation Capacity - l'importo massimo di stake delegato che l'Indexer può accettare in modo produttivo. Uno stake delegato in eccesso non può essere utilizzato per l'allocazione o per il calcolo dei premi. +- Query Fees - è il totale delle tariffe che gli utenti finali hanno pagato per le query da un Indexer in tutto il tempo +- Indexer Rewards - è il totale delle ricompense dell'Indexer guadagnate dall'Indexer e dai suoi Delegator in tutto il tempo. Le ricompense degli Indexer vengono pagate tramite l'emissione di GRT. -Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. Indexing parameters are set by clicking on the right-hand side of the table, or by going into an Indexer’s profile and clicking the “Delegate” button. +Gli Indexer possono guadagnare sia tariffe di query che ricompense per l'indicizzazione. Funzionalmente, ciò avviene quando i partecipanti alla rete delegano il GRT a un Indexer. Ciò consente agli Indexer di ricevere tariffe di query e ricompense in base ai loro parametri di indicizzazione. I parametri di indicizzazione si impostano facendo clic sul lato destro della tabella o accedendo al profilo dell'Indexer e facendo clic sul pulsante "Delegate". -To learn more about how to become an Indexer, you can take a look at the [official documentation](/network/indexing) or [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) +Per saperne di più su come diventare un Indexer, è possibile consultare la [documentazione ufficiale](/network/indexing) oppure [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) -![Indexing details pane](/img/Indexing-Details-Pane.png) +![Pannello dei dettagli di indicizzazione](/img/Indexing-Details-Pane.png) -### 2. Curators +### 2. Curator -Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially attractive subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. +I Curator analizzano i subgraph per identificare quelli di maggiore qualità. Una volta che un Curator ha trovato un subgraph potenzialmente interessante, può curarlo segnalando la sua bonding curve. In questo modo, i Curator fanno sapere agli Indexer quali subgraph sono di alta qualità e dovrebbero essere indicizzati. -Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. By depositing GRT, Curators mint curation shares of a subgraph. As a result, Curators are eligible to earn a portion of the query fees that the subgraph they have signaled on generates. The bonding curve incentivizes Curators to curate the highest quality data sources. The Curator table in this section will allow you to see: +I Curator possono essere membri della comunità, consumatori di dati o anche sviluppatori di subgraph che segnalano i propri subgraph depositando token GRT in una bonding curve. Depositando GRT, i Curator coniano quote di curation di un subgraph. Di conseguenza, i Curator hanno diritto a guadagnare una parte delle tariffe di query generate dal subgraph che hanno segnalato. La bonding curve incentiva i Curator a curare le fonti di dati di maggiore qualità. La tabella dei Curator in questa sezione consente di vedere: -- The date the Curator started curating -- The number of GRT that was deposited -- The number of shares a Curator owns +- La data in cui il Curator ha iniziato a curare +- Il numero di GRT depositato +- Il numero di azioni possedute da un Curator ![Explorer Image 6](/img/Curation-Overview.png) -If you want to learn more about the Curator role, you can do so by visiting the following links of [The Graph Academy](https://thegraph.academy/curators/) or [official documentation.](/network/curating) +Per saperne di più sul ruolo del Curator, è possibile visitare i seguenti link [The Graph Academy](https://thegraph.academy/curators/) oppure la [documentazione ufficiale.](/network/curating) -### 3. Delegators +### 3. Delegator -Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers seek to attract Delegators by offering them a portion of the indexing rewards and query fees that they earn. +I Delegator svolgono un ruolo chiave nel mantenere la sicurezza e la decentralizzazione di The Graph Network. Partecipano alla rete delegando (cioè "staking") i token GRT a uno o più Indexer. Senza Delegator, gli Indexer hanno meno probabilità di guadagnare ricompense e commissioni significative. Pertanto, gli Indexer cercano di attrarre i Delegator offrendo loro una parte delle ricompense per l'indicizzazione e delle tariffe di query che guadagnano. -Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! +I Delegator, a loro volta, selezionano gli Indexer in base a una serie di variabili diverse, come le prestazioni passate, i tassi di ricompensa per l'indicizzazione e le tariffe di query. Anche la reputazione all'interno della comunità può giocare un ruolo importante! Si consiglia di entrare in contatto con gli Indexer selezionati tramite [The Graph’s Discord](https://discord.gg/graphprotocol) oppure [The Graph Forum](https://forum.thegraph.com/)! ![Explorer Image 7](/img/Delegation-Overview.png) -The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: +La tabella dei Delegator consente di visualizzare i Delegator attivi nella comunità, oltre a metriche quali: -- The number of Indexers a Delegator is delegating towards -- A Delegator’s original delegation -- The rewards they have accumulated but have not withdrawn from the protocol -- The realized rewards they withdrew from the protocol -- Total amount of GRT they have currently in the protocol -- The date they last delegated at +- Il numero di Indexer verso cui un Delegator sta delegando +- La delega originale di un Delegator +- Le ricompense che hanno accumulato ma non ritirato dal protocollo +- Le ricompense realizzate ritirate dal protocollo +- Quantità totale di GRT che hanno attualmente nel protocollo +- La data dell'ultima delegazione -If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). +Se volete saperne di più su come diventare Delegator, non cercate oltre! Tutto ciò che dovete fare è andare alla [documentazione ufficiale](/network/delegating) oppure su [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). -## Network +## La rete -In the Network section, you will see global KPIs as well as the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. +Nella sezione Rete, oltre a trovare i KPI globali, è possibile passare a una base per epoche e analizzare le metriche di rete in modo più dettagliato. Questi dettagli danno un'idea dell'andamento della rete nel tempo. -### Activity +### Attività -The activity section has all the current network metrics as well as some cumulative metrics over time. Here you can see things like: +La sezione attività contiene tutte le metriche di rete attuali e alcune metriche cumulative nel tempo. Qui si possono vedere cose come: -- The current total network stake -- The stake split between the Indexers and their Delegators -- Total supply, minted, and burned GRT since the network inception -- Total Indexing rewards since the inception of the protocol -- Protocol parameters such as curation reward, inflation rate, and more -- Current epoch rewards and fees +- L'attuale stake totale della rete +- La ripartizione dello stake tra gli Indexer e i loro Delegator +- Fornitura totale, coniata e bruciata di GRT dall'inizio della rete +- Ricompense totali dell'indicizzazione dall'inizio del protocollo +- Parametri di protocollo come la ricompensa per la curation, il tasso di inflazione e altro ancora +- Premi e commissioni dell'epoca attuale -A few key details that are worth mentioning: +Alcuni dettagli chiave che meritano di essere notati: -- **Query fees represent the fees generated by the consumers**, and they can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. -- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once the Indexers close their allocations towards the subgraphs they’ve been indexing. Thus the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). +- **Le tariffe di query rappresentano le commissioni generate dai consumatori**, e possono essere reclamati (oppure no) dagli Indexer dopo un periodo di almeno 7 epoche (vedi sotto) dopo che le loro allocation verso i subgraph sono state chiuse e i dati che hanno servito sono stati convalidati dai consumatori. +- **Le ricompense dell'indicizzazione rappresentano la quantità di ricompense che gli Indexer hanno richiesto all'emissione della rete durante l'epoca.** Sebbene l'emissione del protocollo sia fissa, le ricompense vengono coniate solo quando gli Indexer chiudono le loro allocation verso i subgraph che hanno indicizzato. Pertanto, il numero di ricompense per ogni epoca varia (ad esempio, durante alcune epoche, gli Indexer potrebbero aver chiuso collettivamente allocation aperte da molti giorni). ![Explorer Image 8](/img/Network-Stats.png) -### Epochs +### Epoche -In the Epochs section, you can analyze on a per-epoch basis, metrics such as: +Nella sezione Epoche è possibile analizzare su base epocale metriche come: -- Epoch start or end block -- Query fees generated and indexing rewards collected during a specific epoch -- Epoch status, which refers to the query fee collection and distribution and can have different states: - - The active epoch is the one in which Indexers are currently allocating stake and collecting query fees - - The settling epochs are the ones in which the state channels are being settled. This means that the Indexers are subject to slashing if the consumers open disputes against them. - - The distributing epochs are the epochs in which the state channels for the epochs are being settled and Indexers can claim their query fee rebates. - - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers, thus being finalized. +- Blocco di inizio o fine epoca +- Tariffe di query generate e ricompense di indicizzazione raccolte durante un'epoca specifica +- Stato dell'epoca, che si riferisce alla raccolta e alla distribuzione delle tariffe di query e può avere diversi stati: + - L'epoca attiva è quella in cui gli Indexer stanno allocando le stake e riscuotendo le tariffe di query + - Le epoche di assestamento sono quelle in cui i canali di stato sono in fase di definizione. Ciò significa che gli Indexer sono soggetti a taglio se i consumatori aprono controversie contro di loro. + - Le epoche di distribuzione sono le epoche in cui i canali di stato per le epoche vengono regolati e gli Indexer possono richiedere gli sconti sulle tariffe di query. + - Le epoche finalizzate sono le epoche in cui gli Indexer non hanno più sconti sulle tariffe di query da richiedere e sono quindi finalizzate. ![Explorer Image 9](/img/Epoch-Stats.png) -## Your User Profile +## Il profilo utente -Now that we’ve talked about the network stats, let’s move on to your personal profile. Your personal profile is the place for you to see your network activity, no matter how you’re participating on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see: +Dopo aver parlato delle statistiche di rete, passiamo al profilo personale. Il vostro profilo personale è il luogo in cui potete vedere la vostra attività, indipendentemente da come state partecipando alla rete. Il vostro wallet fungerà da profilo utente e, grazie alla User Dashboard, sarete in grado di vedere: -### Profile Overview +### Panoramica del profilo -This is where you can see any current actions you took. This is also where you can find your profile information, description, and website (if you added one). +Qui si possono vedere le azioni in corso. Qui si trovano anche le informazioni sul profilo, la descrizione e il sito web (se ne avete aggiunto uno). ![Explorer Image 10](/img/Profile-Overview.png) -### Subgraphs Tab +### Scheda di subgraph -If you click into the Subgraphs tab, you’ll see your published subgraphs. This will not include any subgraphs deployed with the CLI for testing purposes – subgraphs will only show up when they are published to the decentralized network. +Se si fa clic sulla scheda Subgraph, si vedranno i subgraph pubblicati. Questi non includono i subgraph distribuiti con la CLI a scopo di test: i subgraph vengono visualizzati solo quando sono pubblicati sulla rete decentralizzata. ![Explorer Image 11](/img/Subgraphs-Overview.png) -### Indexing Tab +### Scheda di indicizzazione -If you click into the Indexing tab, you’ll find a table with all the active and historical allocations towards the subgraphs, as well as charts that you can analyze and see your past performance as an Indexer. +Se si fa clic sulla scheda Indicizzazione, si troverà una tabella con tutte le allocation attive e storiche verso i subgraph, oltre a grafici che consentono di analizzare le performance passate come Indexer. -This section will also include details about your net Indexer rewards and net query fees. You’ll see the following metrics: +Questa sezione include anche i dettagli sui compensi netti degli Indexer e sulle tariffe nette di query. Verranno visualizzate le seguenti metriche: -- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed -- Total Query Fees - the total fees that users have paid for queries served by you over time -- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT -- Fee Cut - the % of query fee rebates that you will keep when you split with Delegators -- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators -- Owned - your deposited stake, which could be slashed for malicious or incorrect behavior +- Delegated Stake (stake delegato)- lo stake dei Delegator che può essere assegnata dall'utente, ma che non può essere tagliata +- Total Query Fees (totale delle tariffe di query)- il totale delle tariffe che gli utenti hanno pagato per le query servite da voi nel tempo +- Indexer Rewards (ricompense dell'Indexer) - l'importo totale delle ricompense dell'Indexer ricevuti, in GRT +- Fee Cut (percentuale delle tariffe) - la percentuale dei rimborsi delle tariffe di query che si trattiene quando si divide con i Delegator +- Rewards Cut (percentuale delle ricompense) - la percentuale delle ricompense dell'Indexer che verrà trattenuta quando si divide con i Delegator +- Owned (di proprietà)- la quota depositata, che potrebbe essere tagliata in caso di comportamento dannoso o scorretto ![Explorer Image 12](/img/Indexer-Stats.png) -### Delegating Tab +### Scheda di delege -Delegators are important to the Graph Network. A Delegator must use their knowledge to choose an Indexer that will provide a healthy return on rewards. Here you can find details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. +I delegator sono importanti per The Graph Network. Un Delegator deve utilizzare le proprie conoscenze per scegliere un Indexer che fornisca un buon ritorno sulle ricompense. Qui potete trovare i dettagli delle vostre delegazioni attive e storiche, insieme alle metriche degli Indexer verso cui avete delegato. -In the first half of the page, you can see your delegation chart, as well as the rewards-only chart. To the left, you can see the KPIs that reflect your current delegation metrics. +Nella prima metà della pagina è possibile vedere il grafico delle deleghe e quello dei sole ricompense. A sinistra, si possono vedere i KPI che riflettono le metriche delle delega attuali. -The Delegator metrics you’ll see here in this tab include: +Le metriche del Delegator visualizzate in questa scheda includono: -- Total delegation rewards -- Total unrealized rewards -- Total realized rewards +- Ricompense di delega totali +- Ricompense totali non realizzate +- Ricomepsne totali realizzate -In the second half of the page, you have the delegations table. Here you can see the Indexers that you delegated towards, as well as their details (such as rewards cuts, cooldown, etc). +Nella seconda metà della pagina si trova la tabella dei Delegator. Qui si possono vedere gli Indexer verso i quali si è delegata la delega, nonché i loro dettagli (come le percentuali delle ricompense, il cooldown, ecc.). -With the buttons on the right side of the table, you can manage your delegation - delegate more, undelegate, or withdraw your delegation after the thawing period. +Con i pulsanti sul lato destro della tabella è possibile gestire la delega - delegare di più, non delegare o ritirare la delega dopo il periodo di scongelamento. -Keep in mind that this chart is horizontally scrollable, so if you scroll all the way to the right, you can also see the status of your delegation (delegating, undelegating, withdrawable). +Tenete presente che questo grafico è scorrevole orizzontalmente, quindi se scorrete fino a destra, potete anche vedere lo stato della vostra delegazione (delegante, non delegante, revocabile). ![Explorer Image 13](/img/Delegation-Stats.png) -### Curating Tab +### Scheda di curation -In the Curation tab, you’ll find all the subgraphs you’re signaling on (thus enabling you to receive query fees). Signaling allows Curators to highlight to Indexers which subgraphs are valuable and trustworthy, thus signaling that they need to be indexed on. +Nella scheda di Curation si trovano tutti i subgraph sui quali si sta effettuando una segnalazione (che consente di ricevere commissioni della query). La segnalazione consente ai curator di evidenziare agli Indexer quali subgraph sono di valore e affidabili, segnalando così la necessità di indicizzarli. -Within this tab, you’ll find an overview of: +All'interno di questa scheda è presente una panoramica di: -- All the subgraphs you're curating on with signal details -- Share totals per subgraph -- Query rewards per subgraph -- Updated at date details +- Tutti i subgraph su cui si effettua la curation con i dettagli del segnale +- Totali delle quote per subgraph +- Ricompense della query per subgraph +- Aggiornamento attuale dei dettagli ![Explorer Image 14](/img/Curation-Stats.png) -## Your Profile Settings +## Impostazioni del profilo -Within your user profile, you’ll be able to manage your personal profile details (like setting up an ENS name). If you’re an Indexer, you have even more access to settings at your fingertips. In your user profile, you’ll be able to set up your delegation parameters and operators. +All'interno del vostro profilo utente, potrete gestire i dettagli del vostro profilo personale (come impostare un nome ENS). Se siete un Indexer, avrete ancora più accesso alle impostazioni a portata di mano. Nel vostro profilo utente, potrete impostare i parametri di delegazione e gli operatori. -- Operators take limited actions in the protocol on the Indexer's behalf, such as opening and closing allocations. Operators are typically other Ethereum addresses, separate from their staking wallet, with gated access to the network that Indexers can personally set -- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. +- Gli operatori compiono azioni limitate nel protocollo per conto dell'Indexer, come l'apertura e la chiusura delle allocation. Gli operatori sono in genere altri indirizzi Ethereum, separati dal loro wallet di staking, con un accesso limitato alla rete che gli Indexer possono impostare personalmente +- I parametri di delega vi permettono di controllare la distribuzione di GRT tra voi e i vostri Delegator. ![Explorer Image 15](/img/Profile-Settings.png) -As your official portal into the world of decentralized data, The Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. +Come portale ufficiale nel mondo dei dati decentralizzati, The Graph Explorer vi permette di intraprendere una serie di azioni, indipendentemente dal vostro ruolo nella rete. Per accedere alle impostazioni del vostro profilo, aprite il menu a tendina accanto al vostro indirizzo e fate clic sul pulsante Impostazioni.
![Wallet details](/img/Wallet-Details.png)
diff --git a/website/pages/it/network/indexing.mdx b/website/pages/it/network/indexing.mdx index c40fd87a22fe..33a42fc96831 100644 --- a/website/pages/it/network/indexing.mdx +++ b/website/pages/it/network/indexing.mdx @@ -1,48 +1,48 @@ --- -title: Indexing +title: Indicizzazione --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Gli Indexer sono operatori di nodi di The Graph Network che fanno staking di Graph Token (GRT) per fornire servizi di indicizzazione e di elaborazione delle query. Gli Indexer guadagnano tariffe di query e ricompense di indicizzazione per i loro servizi. Guadagnano anche tariffe di query che vengono rimborsate in base a una funzione di rimborso esponenziale. -GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. +Il GRT che viene fatto staking nel protocollo è soggetto a un periodo di scongelamento e può essere ridotto se gli Indexer sono malintenzionati e servono dati errati alle applicazioni o se indicizzano in modo errato. Gli Indexer guadagnano anche ricompense per le stake delegate dai Delegator, per contribuire alla rete. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Gli Indexer selezionano i subgraph da indicizzare in base al segnale di curation del subgraph, dove i Curator fanno staking di GRT per indicare quali subgraph sono di alta qualità e dovrebbero essere prioritari. I consumatori (ad esempio, le applicazioni) possono anche impostare i parametri per cui gli Indexer elaborano le query per i loro subgraph e stabilire le preferenze per le tariffe di query. ## FAQ -### What is the minimum stake required to be an Indexer on the network? +### Qual è lo stake minimo richiesto per essere un Indexer sulla rete? -The minimum stake for an Indexer is currently set to 100K GRT. +Lo stake minimo per un Indexer è attualmente fissato a 100K GRT. -### What are the revenue streams for an Indexer? +### Quali sono le fonti di guadagno di un Indexer? -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. +**Sconti sulle tariffe di query** - Pagamenti per servire le query sulla rete. Questi pagamenti sono mediati da canali di stato tra un Indexer e un gateway. Ogni richiesta di query da parte di un gateway contiene un pagamento e la risposta corrispondente una prova della validità del risultato della query. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Ricompense di indicizzazione** - Generate tramite un'inflazione annuale del 3% a livello di protocollo, le ricompense di indicizzazione sono distribuite agli Indexer che indicizzano le distribuzioni di subgraph per la rete. -### How are indexing rewards distributed? +### Come vengono distribuite le ricompense di indicizzazione? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Le ricompense di indicizzazione provengono dall'inflazione del protocollo, impostata al 3% di emissione annuale. Vengono distribuite tra i subgraph in base alla proporzione di tutti i segnali di curation su ciascuno di essi, quindi distribuite proporzionalmente agli Indexer in base allo stake di partecipazione assegnato a quel subgraph. **Un'allocation deve essere chiusa con una prova valida di indicizzazione (POI) che soddisfi gli standard stabiliti dalla carta dell'arbitrato per poter beneficiare delle ricompense.** -Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. +Numerosi strumenti sono stati creati dalla comunità per il calcolo delle ricompense; potete trovarne una collezione organizzata nella [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). È inoltre possibile trovare un elenco aggiornato di strumenti nei canali #Delegator e #Indexers sul [server Discord](https://discord.gg/graphprotocol). Qui un link ad un [ottimizzatore dell'allocazione consigliata](https://github.com/graphprotocol/AllocationOpt.jl) integrato nello stack software dell' Indexer. -### What is a proof of indexing (POI)? +### Che cos'è una prova di indicizzazione (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +I POI sono utilizzati nella rete per verificare che un Indexer stia indicizzando i subgraph su cui ha effettuato l'allocazione. Un POI per il primo blocco dell'epoca corrente deve essere presentato alla chiusura di un'allocazione affinché questa possa beneficiare delle ricompense di indicizzazione. Un POI per un blocco è un insieme di tutte le transazioni dell'entity store per una specifica distribuzione di subgraph fino a quel blocco incluso. -### When are indexing rewards distributed? +### Quando vengono distribuite le ricompense di indicizzazione? -Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). +Le allocazioni accumulano continuamente ricompense mentre sono attive e vengono allocate entro 28 epoche. Le ricompense vengono raccolte dagli Indexer e distribuite quando le loro allocazioni vengono chiuse. Ciò avviene manualmente, ogni volta che l'Indexer vuole chiuderle forzatamente, oppure dopo 28 epoche un Delegator può chiudere l'allocazione per l'Indexer, ma questo non comporta ricompense. 28 epoche è la durata massima dell'allocazione (al momento, un'epoca dura circa 24 ore). -### Can pending indexing rewards be monitored? +### È possibile monitorare le ricompense di indicizzazione in sospeso? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +Il contratto RewardsManager ha una funzione di sola lettura [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) che può essere utilizzata per controllare le ricompense in sospeso per una specifica allocazione. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +Molte delle dashboard create dalla comunità includono i valori delle ricompense in sospeso, che possono essere facilmente controllate manualmente seguendo questi passaggi: -1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: +1. Fare query su [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) per ottenere gli ID di tutte le allocazioni attive: ```graphql query indexerAllocations { @@ -58,139 +58,139 @@ query indexerAllocations { } ``` -Use Etherscan to call `getRewards()`: +Utilizzare Etherscan per chiamare `getRewards()`: -- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Andare su [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) -* To call `getRewards()`: - - Expand the **10. getRewards** dropdown. - - Enter the **allocationID** in the input. - - Click the **Query** button. +* Per chiamare `getRewards()`: + - Espandere il **10. getRewards** a tendina. + - Inserire l'**allocationID** nell'input. + - Fare clic sul pulsante **Query**. -### What are disputes and where can I view them? +### Cosa sono le controversie e dove posso vederle? -Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. +Le query e le allocazioni dell'Indexer possono essere contestate su The Graph durante il periodo di contestazione. Il periodo di contestazione varia a seconda del tipo di contestazione. Le query/attestazioni hanno una finestra di contestazione di 7 epoche, mentre le allocazioni 56 epoche. Una volta trascorsi questi periodi, non è più possibile aprire controversie né contro le allocation né contro le query. Quando viene aperta una controversia, i Fisherman devono versare un deposito minimo di 10.000 GRT, che rimarrà bloccato fino a quando la controversia non sarà conclusa e sarà stata data una risoluzione. I Fisherman sono tutti i partecipanti alla rete che aprono controversie. -Disputes have **three** possible outcomes, so does the deposit of the Fishermen. +Le controversie hanno **tre** esiti possibili, così come il deposito dei Fishermen. -- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. -- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. -- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. +- Se la controversia viene respinta, il GRT depositato dai Fishermen verrà bruciato e l'Indexer contestato non verrà tagliato. +- Se la controversia viene risolta con un pareggio, il deposito dei Fishermen verrà restituito e l'Indexer contestato non verrà tagliato. +- Se la controversia viene accettata, il GRT depositato dai Fishermen verrà restituito, l'Indexer contestato verrà tagliato e i Fishermen guadagneranno il 50% dei GRT tagliati. -Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. +Le controversie possono essere visualizzate nell'interfaccia utente nella pagina del profilo di un Indexer, sotto la scheda `Disputes`. -### What are query fee rebates and when are they distributed? +### Cosa sono gli sconti sulle tariffe di query e quando vengono distribuiti? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Le tariffe di query sono raccolte dal gateway e distribuite agli Indexer in base alla funzione di sconto esponenziale (vedi GIP [qui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). La funzione di sconto esponenziale è proposta come un modo per garantire che gli Indexer ottengano il miglior risultato servendo fedelmente le query. Funziona incentivando gli Indexer ad allocare una grande quantità di stake (che può essere tagliato in caso di errore quando si serve una query) rispetto all'ammontare delle tariffe di query che possono raccogliere. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Una volta chiusa un'allocation, gli sconti possono essere richiesti dall'Indexer. Al momento della richiesta, gli sconti sulle tariffe di query vengono distribuiti all'Indexer e ai suoi Delegator in base alla riduzione delle tariffe di query e alla funzione di sconto esponenziale. -### What is query fee cut and indexing reward cut? +### Che cos'è la query fee cut e la indexing reward cut? -The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. +I valori di `queryFeeCut` e `indexingRewardCut` sono parametri di delega che l'Indexer può impostare insieme ai cooldownBlocks per controllare la distribuzione dei GRT tra l'Indexer i suoi Delegator. Per le istruzioni sull'impostazione dei parametri di delega, si vedano gli ultimi passi di [Staking nel Protocollo](/network/indexing#stake-in-the-protocol). -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - la % delle riduzioni delle tariffe di query che verrà distribuita all'Indexer. Se questa opzione è impostata al 95%, l'Indexer riceverà il 95% delle tariffe di query guadagnate alla chiusura di un'allocazione, mentre il restante 5% andrà ai Delegator. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - la % delle ricompense di indicizzazione che verrà distribuita all'Indexer. Se è impostata al 95%, l'Indexer riceverà il 95% delle ricompense di indicizzazione quando viene chiusa un'allocazione e i Delegator si divideranno il restante 5%. -### How do Indexers know which subgraphs to index? +### Come fanno gli Indexer a sapere quali subgraph indicizzare? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Gli Indexer possono differenziarsi applicando tecniche avanzate per prendere decisioni sull'indicizzazione dei subgraph, ma per dare un'idea generale discuteremo diverse metriche chiave utilizzate per valutare i subgraph nella rete: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Segnale di curation** - La percentuale del segnale di curation della rete applicato a un particolare subgraph è un buon indicatore dell'interesse per quel subgraph, soprattutto durante la fase di bootstrap, quando il volume delle query è in aumento. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Tariffe di query raccolte** - I dati storici relativi al volume delle tariffe di query raccolte per uno specifico subgraph sono un buon indicatore della domanda futura. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Importo sullo staking** - Il monitoraggio del comportamento degli altri Indexer o l'esame delle proporzioni dello stake totale allocato a specifici subgraph può consentire a un Indexer di monitorare il lato dell'offerta per le query sui subgraph, per identificare i subgraph in cui la rete mostra fiducia o i subgraph che potrebbero avere bisogno di maggiore offerta. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraph senza ricompense di indicizzazione** - Alcuni subgraph non generano ricompense per l'indicizzazione principalmente perché utilizzano funzioni non supportate come IPFS o perché stanno facendo query su un'altra rete al di fuori della mainnet. Se un subgraph non genera ricompense di indicizzazione, viene visualizzato un messaggio. -### What are the hardware requirements? +### Quali sono i requisiti hardware? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Piccolo** - Sufficiente per iniziare a indicizzare diversi subgraph, probabilmente dovrà essere ampliato. +- **Standard** - Impostazione predefinita, è quella usata nei manifesti di distribuzione di esempio di k8s/terraform. +- **Medio** - Indexer di produzione che supporta 100 subgraph e 200-500 richieste al secondo. +- **Grande** - È pronto a indicizzare tutti i subgraph attualmente utilizzati e a servire le richieste per il relativo traffico. -| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | +| Setup | Postgres
(CPUs) | Postgres
(memoria in GBs) | Postgres
(disco in TBs) | VMs
(CPUs) | VMs
(memoria in GBs) | | --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | +| Piccolo | 4 | 8 | 1 | 4 | 16 | | Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Medio | 16 | 64 | 2 | 32 | 64 | +| Grande | 72 | 468 | 3.5 | 48 | 184 | -### What are some basic security precautions an Indexer should take? +### Quali sono le precauzioni di base per la sicurezza che un Indexer dovrebbe adottare? -- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. +- **Operator wallet** - La creazione di un operator wallet è una precauzione importante perché consente all'Indexer di mantenere una separazione tra le chiavi che controllano il stake e quelle che controllano le operazioni quotidiane. Vedere[ Stake al protocollo](/network/indexing#stake-in-the-protocol) per le istruzioni. -- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. +- **Firewall** - Solo l'Indexer service deve essere esposto pubblicamente e occorre prestare particolare attenzione a bloccare le porte di amministrazione e l'accesso al database: l'endpoint JSON-RPC di the Graph Node (porta predefinita: 8030), l'endpoint API di gestione dell'Indexer (porta predefinita: 18000) e l'endpoint del database Postgres (porta predefinita: 5432) non devono essere esposti. -## Infrastructure +## Infrastruttura -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +Al centro dell'infrastruttura di un Indexer c'è the Graph Node, che monitora le reti indicizzate, estrae e carica i dati secondo una definizione di subgraph e li serve come [GraphQL API](/about/#how-the-graph-works). The Graph Node deve essere collegato a un endpoint che espone i dati di ciascuna rete indicizzata, a un nodo IPFS per l'approvvigionamento dei dati, a un database PostgreSQL per il suo archivio e a componenti dell'Indexer che facilitano le interazioni con la rete. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **Database PostgreSQL** - È l'archivio principale del The Graph Node, dove vengono memorizzati i dati dei subgraph. Anche l'Indexer Service e l'Indexer Agent utilizzano il database per memorizzare i dati del canale di stato, i modelli di costo, le regole di indicizzazione e le azioni di allocation. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Endpoint dei dati** - Per le reti compatibili con EVM, il Graph Node deve essere collegato a un endpoint che esponga un'API JSON-RPC compatibile con EVM. Questo può assumere la forma di un singolo client o può essere una configurazione più complessa che bilancia il carico su più client. È importante sapere che alcuni subgraph richiedono particolari funzionalità del client, come la modalità di archiviazione e/o l'API di tracciamento della parità. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **Nodo IPFS (versione inferiore a 5)** - I metadati di distribuzione del subgraph sono memorizzati sulla rete IPFS. Il Graph Node accede principalmente al nodo IPFS durante la distribuzione del subgraph per recuperare il manifest del subgraph e tutti i file collegati. Gli Indexer di rete non hanno bisogno di ospitare il proprio nodo IPFS; un nodo IPFS per la rete è ospitato all'indirizzo https://ipfs.network.thegraph.com. -- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. +- **Indexer service** - Gestisce tutte le comunicazioni esterne necessarie con la rete. Condivide i modelli di costo e gli stati di indicizzazione, passa le richieste di query dai gateway a un Graph Node e gestisce i pagamenti delle query tramite canali di stato con il gateway. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - Facilita le interazioni degli Indexer sulla chain, compresa la registrazione sulla rete, la gestione delle distribuzioni di subgraph ai Graph Node e la gestione delle allocazioni. -- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. +- **Server di metriche Prometheus** - I componenti Graph Node e Indexer registrano le loro metriche sul server delle metriche. -Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. +Nota: Per supportare una scalabilità agile, si consiglia di separare le attività di query e indicizzazione tra diversi gruppi di nodi: nodi di query e nodi di indicizzazione. -### Ports overview +### Panoramica delle porte -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. +> **Importante**: Fate attenzione a esporre pubblicamente le porte - **le porte di amministrazione** devono essere sempre bloccate. Questo include gli endpoint JSON-RPC del Graph Node e quelli di gestione dell'Indexer, descritti di seguito. #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | +| 8000 | GraphQL HTTP server
(per le query di subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
(per le sottoscrizioni ai subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
(per la gestione dei deployment) | / | --admin-port | - | | 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8040 | Metriche di Prometheus | /metrics | --metrics-port | - | -#### Indexer Service +#### Servizio Indexer -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| 7600 | GraphQL HTTP server
(per le query di subgraph a pagamento) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Metriche di Prometheus | /metrics | --metrics-port | - | #### Indexer Agent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | +| ----- | ----------------------------- | ------ | ------------------------- | --------------------------------------- | +| 8000 | API di gestione degli Indexer | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Setup server infrastructure using Terraform on Google Cloud +### Configurare l'infrastruttura server utilizzando Terraform su Google Cloud -> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. +> Nota: gli Indexer possono utilizzare in alternativa AWS, Microsoft Azure o Alibaba. -#### Install prerequisites +#### Installare i prerequisiti - Google Cloud SDK - Kubectl command line tool - Terraform -#### Create a Google Cloud Project +#### Creare un progetto Google Cloud -- Clone or navigate to the Indexer repository. +- Clonare o navigare nella repository dell'Indexer. -- Navigate to the ./terraform directory, this is where all commands should be executed. +- Navigare nella directory ./terraform, dove devono essere eseguiti tutti i comandi. ```sh cd terraform ``` -- Authenticate with Google Cloud and create a new project. +- Autenticare con Google Cloud e creare un nuovo progetto. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use the Google Cloud Console's billing page to enable billing for the new project. +- Utilizzare la pagina di fatturazione di Google Cloud Console per abilitare la fatturazione del nuovo progetto. -- Create a Google Cloud configuration. +- Creare una configurazione di Google Cloud. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Enable required Google Cloud APIs. +- Abilitare le API di Google Cloud necessarie. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Create a service account. +- Creare un account di servizio. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Enable peering between database and Kubernetes cluster that will be created in the next step. +- Abilitare il peering tra il database e il cluster Kubernetes che verrà creato nella fase successiva. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Create minimal terraform configuration file (update as needed). +- Creare un file di configurazione minima di terraform (aggiornare secondo necessità). ```sh indexer= @@ -262,11 +262,11 @@ database_password = "" EOF ``` -#### Use Terraform to create infrastructure +#### Utilizzare Terraform per creare l'infrastruttura -Before running any commands, read through [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) and create a file `terraform.tfvars` in this directory (or modify the one we created in the last step). For each variable where you want to override the default, or where you need to set a value, enter a setting into `terraform.tfvars`. +Prima di eseguire qualsiasi comando, leggi [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) e crea un file `terraform.tfvars` in questa cartella (o modificare quella creata nell'ultimo passaggio). Per ogni variabile in cui si vuole sovrascrivere il valore predefinito o in cui è necessario impostare un valore, inserire un'impostazione in`terraform.tfvars`. -- Run the following commands to create the infrastructure. +- Eseguire i seguenti comandi per creare l'infrastruttura. ```sh # Install required plugins @@ -279,7 +279,7 @@ terraform plan terraform apply ``` -Download credentials for the new cluster into `~/.kube/config` and set it as your default context. +Scaricare le credenziali del nuovo cluster in `~/.kube/config` e impostarli come contesto predefinito. ```sh gcloud container clusters get-credentials $indexer @@ -287,21 +287,21 @@ kubectl config use-context $(kubectl config get-contexts --output='name' | grep $indexer) ``` -#### Creating the Kubernetes components for the Indexer +#### Creazione dei componenti Kubernetes per l'Indexer -- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. +- Copiare la directory `k8s/overlays` in una nuova directory `$dir,` e regola il `bases` d'ingresso in `$dir/kustomization.yaml` in modo da puntare alla directory `k8s/base`. -- Read through all the files in `$dir` and adjust any values as indicated in the comments. +- Leggi tutti i file in `$dir` e modificare i valori come indicato nei commenti. -Deploy all resources with `kubectl apply -k $dir`. +Distribuire tutte le risorse con `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) è un'implementazione open source di Rust che esegue l'event source della blockchain di Ethereum per aggiornare in modo deterministico un archivio di dati che può essere fatto query tramite l'endpoint GraphQL. Gli sviluppatori usano i subgraph per definire il loro schema e una serie di mappature per trasformare i dati provenienti dalla catena di blocchi e il Graph Node gestisce la sincronizzazione dell'intera catena, il monitoraggio dei nuovi blocchi e il servizio tramite un endpoint GraphQL. -#### Getting started from source +#### Iniziare dalla sorgente -#### Install prerequisites +#### Installare i prerequisiti - **Rust** @@ -309,7 +309,7 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Requisiti aggiuntivi per gli utenti di Ubuntu** - Per eseguire un Graph Node su Ubuntu potrebbero essere necessari alcuni pacchetti aggiuntivi. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -317,7 +317,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Setup -1. Start a PostgreSQL database server +1. Avviare un server di database PostgreSQL ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Clonare la repository di [Graph Node](https://github.com/graphprotocol/graph-node) e costruire il sorgente eseguendo `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Ora che tutte le dipendenze sono state configurate, avviare il Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -336,48 +336,48 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Come iniziare a usare Docker -#### Prerequisites +#### Prerequisiti -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. +- **Ethereum node** - Per impostazione predefinita, la configurazione di docker compose utilizzerà mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) per connettersi al nodo Ethereum sulla macchina host. È possibile sostituire il nome e l'url della rete aggiornando `docker-compose.yaml`. #### Setup -1. Clone Graph Node and navigate to the Docker directory: +1. Clonare il Graph Node e navigare nella directory Docker: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml`using the included script: +2. Solo per gli utenti di Linux - Utilizzare l'indirizzo IP dell'host invece di `host.docker.internal` nel `docker-compose.yaml` utilizzando lo script incluso: ```sh ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Avviare un Graph Node locale che si connetta all'endpoint di Ethereum: ```sh docker-compose up ``` -### Indexer components +### Componenti dell'Indexer -To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: +Per partecipare con successo alla rete è necessario un monitoraggio e un'interazione quasi costante, quindi abbiamo costruito una suite di applicazioni Typescript per facilitare la partecipazione alla rete degli Indexer. I componenti di Indexer sono tre: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards on chain and how much is allocated towards each. +- **Indexer agent** - L'agente monitora la rete e l'infrastruttura dell'Indexer e gestisce quali distribuzioni di subgraph vengono indicizzate e allocate sulla chain e quanto viene allocato su ciascuna di esse. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - L'unico componente che deve essere esposto all'esterno, il servizio passa le query del subgraph al The Graph Node, gestisce i canali di stato per i pagamenti delle query, condivide importanti informazioni decisionali ai client come i gateway. -- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. +- **Indexer CLI** - L'interfaccia a riga di comando per la gestione del Indexer Agent. Consente agli Indexer di gestire i modelli di costo, le allocazioni manuali, la coda delle azioni e le regole di indicizzazione. -#### Getting started +#### Per cominciare -The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! +L'Indexer Agent ed l'Indexer service devono essere collocati insieme all'infrastruttura di The Graph Node. Ci sono molti modi per impostare ambienti di esecuzione virtuali per i componenti di Indexer; qui spiegheremo come eseguirli su baremetal usando pacchetti NPM o sorgenti, oppure tramite kubernetes e docker su Google Cloud Kubernetes Engine. Se questi esempi di configurazione non si adattano bene alla vostra infrastruttura, è probabile che ci sia una community guide a cui fare riferimento; fate un salto su [Discord](https://discord.gg/graphprotocol)! Ricordatevi di fare [stake nel protocollo](/network/indexing#stake-in-the-protocol) prima di avviare i componenti dell'Indexer! -#### From NPM packages +#### Dai pacchetti NPM ```sh npm install -g @graphprotocol/indexer-service @@ -400,7 +400,7 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### From source +#### Dalla fonte ```sh # From Repo root directory @@ -420,16 +420,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Using docker +#### Utilizzo di docker -- Pull images from the registry +- Estrarre le immagini dal registro ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Or build images locally from source +Oppure costruire le immagini localmente dal sorgente ```sh # Indexer service @@ -444,22 +444,22 @@ docker build \ -t indexer-agent:latest \ ``` -- Run the components +- Eseguire i componenti ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). +**NOTA**: Dopo l'avvio dei contenitori, l'Indexer service dovrebbe essere accessibile all'indirizzo [http://localhost:7600](http://localhost:7600) e l'agente dell'Indexer dovrebbe esporre l'API di gestione dell'Indexer all'indirizzo [http://localhost:18000/](http://localhost:18000/). -#### Using K8s and Terraform +#### Utilizzo di K8s e Terraform -See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section +Vedere la sezione [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) -#### Usage +#### Utilizzo -> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **NOTA**: Tutte le variabili di configurazione di runtime possono essere applicate come parametri al comando all'avvio o utilizzando variabili d'ambiente del formato `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -518,56 +518,56 @@ graph-indexer-service start \ #### Indexer CLI -The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. +L'Indexer CLI è un plugin per [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessibile nel terminale a `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexer management using Indexer CLI +#### Gestione dell'Indexer tramite Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +Lo strumento suggerito per interagire con l'**Indexer Management API** è il **Indexer CLI**, un'estensione del **Graph CLI**. L'Indexer agent ha bisogno di input da un Indexer per interagire autonomamente con la rete per conto dell'Indexer. I meccanismi per definire il comportamento dell' Indexer agent sono modalità di **gestione dell'allocazione** e **regole di indicizzazione**. In modalità automatica, un Indexer può utilizzare le **regole di indicizzazione** per applicare la propria strategia specifica di selezione dei subgraph da indicizzare e per i quali servire le query. Le regole sono gestite tramite un' GraphQL API servito dall' agent e noto come Indexer Management API. In modalità manuale, un Indexer può creare azioni di allocation usando **actions queue** e approvarli esplicitamente prima che vengano eseguiti. In modalità di supervisione, le **regole di indicizzazione** sono utilizzate per popolare le **actions queue** e richiedono anche un'approvazione esplicita per l'esecuzione. -#### Usage +#### Utilizzo -The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. +Il **Indexer CLI** si connette all'Indexer Agent, in genere tramite port-forwarding, quindi non è necessario che la CLI venga eseguita sullo stesso server o cluster. Per aiutarvi a iniziare e per fornire un contesto, la CLI verrà descritta brevemente qui. -- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Connettersi all' Indexer management API. In genere la connessione al server viene aperta tramite il port forwarding, in modo che la CLI possa essere facilmente utilizzata in remoto. (Esempio: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. +- `graph indexer rules get [options] [ ...]` - Ottenere una o più regole di indicizzazione utilizzando `all` come `` per ottenere tutte le regole, oppure `global` per ottenere i valori predefiniti globali. Un argomento aggiuntivo `--merged` può essere usato per specificare che le regole specifiche dell'implementazione vengono unite alla regola globale. Questo è il modo in cui vengono applicate nell' Indexer agent. -- `graph indexer rules set [options] ...` - Set one or more indexing rules. +- `graph indexer rules set [options] ...` - Impostare una o più regole di indicizzazione. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Avviare l'indicizzazione di una distribuzione di subgraph, se disponibile, e impostare il suo valore di `decisionBasis` per `always`, quindi l' Indexer agent sceglierà sempre di indicizzarlo. Se la regola globale è impostata su sempre, tutti i subgraph disponibili sulla rete saranno indicizzati. -- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. +- `graph indexer rules stop [options] ` - Interrompere l'indicizzazione di un'installazione e impostare il suo `decisionBasis` a mai, quindi salterà questa distribuzione quando deciderà le distribuzioni da indicizzare. -- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. +- `graph indexer rules maybe [options] ` — Impostare il `decisionBasis` per una distribuzione a `rules`, in modo che l' Indexer agent utilizzi le regole di indicizzazione per decidere se indicizzare questa distribuzione. -- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additonal argument `--status` can be used to print out all actions of a certain status. +- `graph indexer actions get [options] ` - Recuperare una o più azioni utilizzando `all` oppure lasciare `action-id` vuoto per ottenere tutte le azioni. Un'argomento aggiuntivo `--status` può essere usato per stampare tutte le azioni di un certo stato. -- `graph indexer action queue allocate ` - Queue allocation action +- `graph indexer action queue allocate ` - Azione di allocation della coda -- `graph indexer action queue reallocate ` - Queue reallocate action +- `graph indexer action queue reallocate ` - Azione di riallocazione della coda -- `graph indexer action queue unallocate ` - Queue unallocate action +- `graph indexer action queue unallocate ` - Azione di deallocazione della coda -- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator +- `graph indexer actions cancel [ ...]` - Annulla tutte le azioni nella coda se l'id non è specificato, altrimenti annulla l'array di id con lo spazio come separatore -- `graph indexer actions approve [ ...]` - Approve multiple actions for execution +- `graph indexer actions approve [ ...]` - Approvare più azioni multiple da eseguire -- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately +- `graph indexer actions execute approve` - Forzare il lavoratore a eseguire immediatamente le azioni approvate -All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. +Tutti i comandi che visualizzano le regole nell'output possono scegliere tra i formati di output supportati (`table`, `yaml`, and `json`) utilizzando l'argomento `-output`. -#### Indexing rules +#### Regole di indicizzazione -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Le regole di indicizzazione possono essere applicate come valori predefiniti globali o per specifiche distribuzioni di subgraph utilizzando i loro ID. I campi `deployment` e `decisionBasis` sono obbligatori, mentre tutti gli altri campi sono facoltativi. Quando una regola di indicizzazione ha `rules` come `decisionBasis`, allora l' Indexer agent confronterà i valori di soglia non nulli di quella regola con i valori recuperati dalla rete per la distribuzione corrispondente. Se la distribuzione del subgraph ha valori superiori (o inferiori) a una qualsiasi delle soglie, verrà scelta per l'indicizzazione. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +Ad esempio, se la regola globale ha `minStake` di **5** (GRT) qualsiasi schieramento di subgraph che abbia più di 5 (GRT) di stake assegnati ad esso sarà indicizzato. Le regole di soglia includono `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, e `minAverageQueryFees`. -Data model: +Modello di dati: ```graphql type IndexingRule { @@ -601,7 +601,7 @@ IndexingDecisionBasis { } ``` -Example usage of indexing rule: +Esempio di utilizzo della regola di indicizzazione: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,20 +613,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Actions queue CLI +#### CLI della coda di azioni -The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. +L'indexer-cli fornisce un modulo di `actions` per lavorare manualmente con la coda di azioni. Utilizza il **Graphql API** ospitato dal server di gestione dell'Indexer per interagire con la coda delle azioni. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +L'operaio per l'esecuzione dell'azione prenderà gli elementi dalla coda per eseguirli solo se hanno `ActionStatus = approved`. Nel percorso consigliato le azioni vengono aggiunte alla coda con ActionStatus = queued, quindi devono essere approvate per essere eseguite sulla catena. Il flusso generale sarà simile a: -- Action added to the queue by the 3rd party optimizer tool or indexer-cli user -- Indexer can use the `indexer-cli` to view all queued actions -- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. -- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. -- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. -- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. +- Azione aggiunta alla coda dallo strumento ottimizzatore di terze parti o dall'utente di indexer-cli +- L' Indexer può utilizzare l'`indexer-cli` per visualizzare tutte le azioni in coda +- L'Indexer (o un altro software) può approvare o annullare le azioni nella coda utilizzando l'`indexer-cli`. I comandi approva e annulla prendono in input un array di id di azioni. +- L'operaio di esecuzione controlla regolarmente la coda per le azioni approvate. Prenderà le azioni `approved` dalla coda, tenterà di eseguirle e aggiornerà i valori nel db a seconda dello stato di esecuzione con `success` oppure `failed`. +- Se un'azione ha successo, l'operaio si assicurerà che sia presente una regola di indicizzazione che indichi all'agente come gestire l'allocazione in futuro, utile quando si intraprendono azioni manuali mentre l'agente è in modalità `auto` oppure `oversight`. +- L'Indexer può monitorare la coda delle azioni per vedere la cronologia dell'esecuzione delle azioni e, se necessario, riapprovare e aggiornare le voci di azione se non sono state eseguite. La coda di azioni fornisce una cronologia di tutte le azioni accodate ed eseguite. -Data model: +Modello di dati: ```graphql Type ActionInput { @@ -659,64 +659,64 @@ ActionType { } ``` -Example usage from source: +Esempio di utilizzo dalla sorgente: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` -Note that supported action types for allocation management have different input requirements: +Si noti che i tipi di azione supportati per la gestione dell'allocazione hanno requisiti di input diversi: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - allocare lo stake ad uno specifico deploy di subgraph - - required action params: + - parametri d'azione richiesti: - deploymentID - amount -- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere +- `Unallocate` - chiudere l'allocazione, liberando lo stake da riallocare altrove - - required action params: + - parametri d'azione richiesti: - allocationID - deploymentID - - optional action params: + - parametri dell'azione facoltativi: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (forza l'uso del POI fornito anche se non corrisponde a quello fornito dal the graph-node) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - chiudere atomicamente l'allocazione e aprire una nuova allocazione per lo stesso deplloy del subgraph - - required action params: + - parametri d'azione richiesti: - allocationID - deploymentID - amount - - optional action params: + - parametri dell'azione facoltativi: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (forza l'uso del POI fornito anche se non corrisponde a quello fornito dal the graph-node) -#### Cost models +#### Modelli di costo -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +I modelli di costo forniscono prezzi dinamici per le query in base al mercato e agli attributi della query. L'Indexer service condivide con i gateway un modello di costo per ogni subgraph per il quale intende rispondere alle query. I gateway, a loro volta, utilizzano il modello di costo per prendere decisioni sulla selezione degli Indexer per ogni query e per negoziare il pagamento con gli Indexer scelti. #### Agora -The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. +Il linguaggio Agora fornisce un formato flessibile per dichiarare i modelli di costo delle query. Un modello di prezzo Agora è una sequenza di istruzioni che vengono eseguite in ordine per ogni query di primo livello in una query GraphQL. Per ogni query di primo livello, la prima istruzione che vi corrisponde determina il prezzo per quella query. -A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. +Una dichiarazione è composta da un predicato, che viene usato per abbinare le query GraphQL, e da un'espressione di costo che, una volta valutata, produce un costo in GRT decimali. I valori nella posizione dell'argomento nominato di una query possono essere catturati nel predicato e usati nell'espressione. Si possono anche impostare dei globali e sostituirli ai segnaposto in un'espressione. -Example cost model: +Esempio di modello di costo: ``` # This statement captures the skip value, @@ -729,77 +729,73 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Example query costing using the above model: +Esempio di query di costo utilizzando il modello di cui sopra: -| Query | Price | +| Query | Prezzo | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### Applying the cost model +#### Applicazione del modello di costo -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. +I modelli di costo vengono applicati tramite l'Indexer CLI, che li passa all'Indexer Management API dell' Indexer agent per la memorizzazione nel database. L'Indexer Service li preleva e serve i modelli di costo ai gateway ogni volta che li richiedono. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interacting with the network +## Interazione con la rete -### Stake in the protocol +### Staking al protocollo -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ +I primi passi per partecipare alla rete come Indexer consistono nell'approvare il protocollo, nel puntare i fondi e (facoltativamente) nel creare un indirizzo operatore per le interazioni quotidiane con il protocollo. _ **Nota**: Per le finalità di queste istruzioni verrà utilizzato Remix per l'interazione con il contratto, ma sentitevi liberi di utilizzare lo strumento che preferite. ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) sono alcuni altri strumenti conosciuti)._ -Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. +Una volta che l'Indexer ha messo in staking i GRT nel protocollo, gli [Indexer components](/network/indexing#indexer-components) possono essere avviati e iniziare le loro interazioni con la rete. -#### Approve tokens +#### Approvare i token -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Aprire il [Remix app](https://remix.ethereum.org/) nel browser -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. Nel `File Explorer` creare un file chiamato **GraphToken.abi** con il [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. +3. Con `GraphToken.abi` selezionato e aperto nell'editor, passare alla sezione Deploy e sezione `Run Transactions` nell'interfaccia Remix. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. In Ambiente selezionare `Injected Web3` e nel `Account` selezionare l'indirizzo dell'Indexer. -5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. +5. Impostare l'indirizzo del contratto GraphToken - Incollare l'indirizzo del contratto GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) vicino a `At Address` e fare clic sul pulsante `At address` per applicare. -6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). +6. Chiamare la funzione `approve(spender, amount)` per approvare il contratto di staking. Inserire in `spender` l'indirizzo del contratto di Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) e `amount` con i token da fare staking (in wei). -#### Stake tokens +#### Fare staking dei token -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Aprire il [Remix app](https://remix.ethereum.org/) nel browser -2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. +2. Nel `File Explorer` creare un file chiamato **Staking.abi** con log staking ABI. -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. +3. Con `Staking.abi` selezionato e aperto nell'editor, passare alla sezione `Deploy` e `Run Transactions` nell'interfaccia di Remix. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. In Ambiente selezionare `Injected Web3` e nel `Account` selezionare l'indirizzo dell'Indexer. -5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. +5. Impostare l'indirizzo del contratto di Staking - Incollare l'indirizzo del contratto di Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) vicino a `At Address` e fare click sul pulsante `At address` per applicare. -6. Call `stake()` to stake GRT in the protocol. +6. Chiamare `stake()` per fare staking di GRT sul protocollo. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Opzionale) Gli Indexer possono approvare un altro indirizzo come operatore per la loro infrastruttura di indicizzazione, al fine di separare le chiavi che controllano i fondi da quelle che eseguono le azioni quotidiane, come l'allocazione sui subgraph e il servizio di query (a pagamento). Per impostare l'operatore chiamare `setOperator()` con l'indirizzo dell'operatore. -8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. +8. (Opzionale) Per controllare la distribuzione delle ricompense e attirare strategicamente i delegator, gli Indexer possono aggiornare i loro parametri di delega aggiornando i loro indexingRewardCut (parti per milione), queryFeeCut (parti per milione) e cooldownBlocks (numero di blocchi). Per farlo, chiamare `setDelegationParameters()`. L'esempio seguente imposta il queryFeeCut per distribuire il 95% degli sconti sulle query all'Indexer e il 5% ai Delegator, imposta l'indexingRewardCut per distribuire il 60% delle ricompense per l'indicizzazione all'Indexer e il 40% ai Delegator, e imposta il periodo di `thecooldownBlocks` a 500 blocks. ``` setDelegationParameters(950000, 600000, 500) ``` -### The life of an allocation +### La vita di un'allocazione -After being created by an Indexer a healthy allocation goes through four states. +Dopo essere stata creata da un Indexer, un'allocazione sana passa attraverso quattro stati. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Attiva** - Una volta creata un'allocazione sulla chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) è considerata **attiva**. Una parte dello stake proprio e/o delegato dall'Indexer viene assegnato a una distribuzione di subgraph, il che consente di richiedere ricompense per l'indicizzazione e di servire query per quella distribuzione di subgraph. L'Indexer agent gestisce la creazione di allocazioni basate sulle regole dell'Indexer. -- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). +- **Chiusa** - Un Indexer è libero di chiudere un'allocazione una volta che 1 epoca è passata ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) o il loro Indexer agent chiuderà automaticamente l'allocazione dopo il **maxAllocationEpochs** (attualmente 28 giorni). Quando un'allocazione viene chiusa con una prova di indicizzazione (POI) valida, le ricompense per l'indicizzazione vengono distribuite all'Indexer e ai suoi Delegator (per saperne di più, si veda la sezione "Come vengono distribuite le ricompense?). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Si raccomanda agli Indexer di utilizzare la funzionalità di sincronizzazione offchain per sincronizzare le distribuzioni dei subgraph con chainhead prima di creare l'allocazione on-chain. Questa funzione è particolarmente utile per i subgraph che possono richiedere più di 28 epoche per la sincronizzazione o che hanno qualche possibilità di fallire in modo indeterminato. diff --git a/website/pages/it/network/overview.mdx b/website/pages/it/network/overview.mdx index bee546908372..46d07c4060ba 100644 --- a/website/pages/it/network/overview.mdx +++ b/website/pages/it/network/overview.mdx @@ -1,15 +1,15 @@ --- -title: Network Overview +title: Panoramica della rete --- -The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. +The Graph Network è un protocollo di indicizzazione decentralizzato per organizzare i dati della blockchain. Le applicazioni utilizzano GraphQL per fare query su API aperte chiamate subgraph, per recuperare i dati indicizzati sulla rete. Con The Graph, gli sviluppatori possono creare applicazioni serverless che vengono eseguite interamente su infrastrutture pubbliche. -## Overview +## Panoramica -The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. +The Graph Network è composta da i Indexer, i Curator e i Delegator che forniscono servizi alla rete e forniscono i dati alle applicazioni Web3. I consumatori utilizzano le applicazioni e consumano i dati. -![Token Economics](/img/Network-roles@2x.png) +![Economia del token](/img/Network-roles@2x.png) -To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20 used to allocate resources in the network. +Per garantire la sicurezza economica di The Graph Network e l'integrità dei dati interrogati, i partecipanti puntano e utilizzano i Graph Token ([GRT](/tokenomics)). Il GRT è un work utility token, è ERC-20 utilizzato per fare allocation di risorse nella rete. -Active Indexers, Curators and Delegators can provide services and earn income from the network, proportional to the amount of work they perform and their GRT stake. +Gli Indexer, i Curator e i Delegator attivi possono fornire servizi e guadagnare dalla rete, proporzionalmente alla quantità di lavoro svolto e alla quantita' di GRT che hanno in staking. diff --git a/website/pages/it/new-chain-integration.mdx b/website/pages/it/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/it/new-chain-integration.mdx +++ b/website/pages/it/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/it/operating-graph-node.mdx b/website/pages/it/operating-graph-node.mdx index 832b6cccf347..7ce2a0076965 100644 --- a/website/pages/it/operating-graph-node.mdx +++ b/website/pages/it/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Getting started from source +### Iniziare dalla sorgente -#### Install prerequisites +#### Installare i prerequisiti - **Rust** @@ -42,7 +42,7 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Requisiti aggiuntivi per gli utenti di Ubuntu** - Per eseguire un Graph Node su Ubuntu potrebbero essere necessari alcuni pacchetti aggiuntivi. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config @@ -50,7 +50,7 @@ sudo apt-get install -y clang libpg-dev libssl-dev pkg-config #### Setup -1. Start a PostgreSQL database server +1. Avviare un server di database PostgreSQL ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Clonare la repository di [Graph Node](https://github.com/graphprotocol/graph-node) e costruire il sorgente eseguendo `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Ora che tutte le dipendenze sono state configurate, avviare il Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Porta | Obiettivo | Routes | Argomento CLI | Variabile d'ambiente | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | +| 8000 | GraphQL HTTP server
(per le query di subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
(per le sottoscrizioni ai subgraph) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
(per la gestione dei deployment) | / | --admin-port | - | | 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8040 | Metriche di Prometheus | /metrics | --metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. diff --git a/website/pages/it/publishing/publishing-a-subgraph.mdx b/website/pages/it/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..9fef5cdce1a3 100644 --- a/website/pages/it/publishing/publishing-a-subgraph.mdx +++ b/website/pages/it/publishing/publishing-a-subgraph.mdx @@ -1,33 +1,33 @@ --- -title: Publishing a Subgraph to the Decentralized Network +title: Pubblicare un subgraph nella rete decentralizzata --- -Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. +Una volta che il subgraph è stato [implementato nel Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), è stato testato ed è pronto per essere messo in produzione, è possibile pubblicarlo nella rete decentralizzata. -Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. +La pubblicazione di un Subgraph nella rete decentralizzata lo rende disponibile per [i Сurator](/network/curating) per iniziare a curarlo e [gli Indexer](/network/indexing) per iniziare a indicizzarlo. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + -You can find the list of the supported networks [Here](/developing/supported-networks). +La lista delle reti supportate è disponibile [qui](/developing/supported-networks). -## Publishing a subgraph +## Pubblicazione di un subgraph -Subgraphs can be published to the decentralized network directly from the Subgraph Studio dashboard by clicking on the **Publish** button. Once a subgraph is published, it will be available to view in the [Graph Explorer](https://thegraph.com/explorer/). +I subgraph possono essere pubblicati sulla rete decentralizzata direttamente dalla dashboard di Subgraph Studio facendo clic sul pulsante **Pubblica**. Una volta pubblicato, il subgraph sarà disponibile per la visualizzazione nel [Graph Explorer](https://thegraph.com/explorer/). -- Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. +- I subgraph possono essere pubblicati su Goerli, Arbitrum goerli, Arbitrum One o su Ethereum mainnet. -- Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). +- Indipendentemente dalla rete su cui è stato pubblicato il subgraph, può indicizzare i dati su qualsiasi [rete supportata](/developing/supported-networks). -- When publishing a new version for an existing subgraph the same rules apply as above. +- Quando si pubblica una nuova versione per un subgraph esistente, si applicano le stesse regole sopra indicate. -## Curating your subgraph +## Curation del subgraph -> It is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. +> Si consiglia di curare il proprio subgraph con 10.000 GRT per assicurarsi che sia indicizzato e disponibile per le query il prima possibile. -Subgraph Studio enables you to be the first to curate your subgraph by adding GRT to your subgraph's curation pool in the same transaction. When publishing your subgraph, make sure to check the button that says, "Be the first to signal on this subgraph." +Subgraph Studio vi permette di essere i primi a curare il vostro subgraph aggiungendo GRT al pool di curation del vostro subgraph nella stessa transazione. Quando pubblicate il vostro subgraph, assicuratevi di selezionare il pulsante "Sii il primo a segnalare questo subgraph" -![Curation Pool](/img/curate-own-subgraph-tx.png) +![Pool di curation](/img/curate-own-subgraph-tx.png) -## Updating metadata for a published subgraph +## Aggiornamento dei metadati per un subgraph pubblicato -Once your subgraph has been published to the decentralized network, you can modify the metadata at any time by making the update in the Subgraph Studio dashboard of the subgraph. After saving the changes and publishing your updates to the network, they will be reflected in The Graph Explorer. This won’t create a new version, as your deployment hasn’t changed. +Una volta che il subgraph è stato pubblicato sulla rete decentralizzata, è possibile modificare i metadati in qualsiasi momento effettuando l'aggiornamento nella dashboard di Subgraph Studio del subgraph. Dopo aver salvato le modifiche e pubblicato gli aggiornamenti sulla rete, questi si rifletteranno nel Graph Explorer. Non verrà creata una nuova versione, poiché la distribuzione non è cambiata. diff --git a/website/pages/it/querying/distributed-systems.mdx b/website/pages/it/querying/distributed-systems.mdx index 85337206bfd3..5d8dcf020fbe 100644 --- a/website/pages/it/querying/distributed-systems.mdx +++ b/website/pages/it/querying/distributed-systems.mdx @@ -1,37 +1,37 @@ --- -title: Distributed Systems +title: Sistemi distribuiti --- -The Graph is a protocol implemented as a distributed system. +The Graph è un protocollo implementato come sistema distribuito. -Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. +Le connessioni falliscono. Le richieste arrivano in ordine sparso. Computer diversi con orologi e stati non sincronizzati elaborano richieste correlate. I server si riavviano. Tra una richiesta e l'altra si verificano delle riorganizzazioni. Questi problemi sono inerenti a tutti i sistemi distribuiti, ma sono esacerbati nei sistemi che operano su scala globale. -Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. +Si consideri questo esempio di ciò che può accadere se un cliente interroga un Indexer per ottenere i dati più recenti durante una riorganizzazione. -1. Indexer ingests block 8 -2. Request served to the client for block 8 -3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +1. L'Indexer inserisce il blocco 8 +2. Richiesta servita al cliente per il blocco 8 +3. L'Indexer inserisce il blocco 9 +4. L'Indexer inserisce il blocco 10A +5. Richiesta servita al cliente per il blocco 10A +6. L'Indexer rileva la riorganizzazione a 10B e fa rollback di 10A +7. Richiesta servita al cliente per il blocco 9 +8. L'Indexer inserisce il blocco 10B +9. L'Indexer inserisce il blocco 11 +10. Richiesta servita al cliente per il blocco 11 -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +Dal punto di vista dell'Indexer, le cose stanno procedendo in modo logico. Il tempo sta avanzando, anche se abbiamo dovuto fare un rollback di un blocco sconosciuto e riprodurre il blocco sotto consenso in avanti sopra di esso. Lungo il percorso, l'Indexer serve le richieste utilizzando lo stato più recente che conosce in quel momento. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +Dal punto di vista del cliente, tuttavia, la situazione appare caotica. Il cliente osserva che le risposte sono state date per i blocchi 8, 10, 9 e 11 in questo ordine. Questo è il cosiddetto problema del "block wobble". Quando un cliente sperimenta il block wobble, i dati possono sembrare contraddirsi nel tempo. La situazione peggiora se si considera che gli Indexer non ingeriscono tutti i blocchi più recenti contemporaneamente e le richieste possono essere indirizzate a più Indexer. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +È responsabilità del cliente e del server lavorare insieme per fornire dati coerenti all'utente. È necessario utilizzare approcci diversi a seconda della coerenza desiderata, poiché non esiste un unico programma giusto per ogni problema. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Ragionare sulle implicazioni dei sistemi distribuiti è difficile, ma la soluzione potrebbe non esserlo! Abbiamo creato delle API e dei modelli per aiutarvi a navigare in alcuni casi d'uso comuni. Gli esempi che seguono illustrano questi modelli, ma eludono i dettagli richiesti dal codice di produzione (come la gestione degli errori e la cancellazione) per non offuscare le idee principali. -## Polling for updated data +## Polling per i dati aggiornati -The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. +The Graph fornisce l'API di `blocco: { number_gte: $minBlock }` che assicura che la risposta sia per un singolo blocco uguale o superiore a `$minBlock`. Se la richiesta è fatta a un'istanza di `graph-node` e il blocco min non è ancora sincronizzato, `graph-node` restituirà un errore. Se `graph-node` ha sincronizzato il blocco min, eseguirà la risposta per il blocco più recente. Se la richiesta viene fatta a un Edge&Node Gateway, il Gateway filtrerà tutti gli Indexer che non hanno ancora sincronizzato il blocco min e farà la richiesta per l'ultimo blocco che l'Indexer ha sincronizzato. -We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: +Possiamo usare `numero_gte` per assicurarci che il tempo non viaggi mai all'indietro durante il polling dei dati in un ciclo. Ecco un esempio: ```javascript /// Updates the protocol.paused variable to the latest @@ -74,11 +74,11 @@ async function updateProtocolPaused() { } ``` -## Fetching a set of related items +## Recuperare una serie di elementi correlati -Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. +Un altro caso d'uso è il recupero di un grande serie oppure, più in generale, il recupero di elementi correlati tra più richieste. A differenza del caso del polling (in cui la consistenza desiderata era quella di andare avanti nel tempo), la consistenza desiderata riguarda un singolo punto nel tempo. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Qui useremo l'argomento `block: { hash: $blockHash }` per fissare tutti i risultati allo stesso blocco. ```javascript /// Gets a list of domain names from a single block using pagination @@ -131,4 +131,4 @@ async function getDomainNames() { } ``` -Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. +Si noti che, in caso di riorganizzazione, il cliente dovrà riprovare a partire dalla prima richiesta di aggiornamento dell'hash del blocco a un blocco non-uncle. diff --git a/website/pages/it/querying/graphql-api.mdx b/website/pages/it/querying/graphql-api.mdx index 89cda460d58f..d3dd3c57a549 100644 --- a/website/pages/it/querying/graphql-api.mdx +++ b/website/pages/it/querying/graphql-api.mdx @@ -1,16 +1,16 @@ --- -title: GraphQL API +title: API GraphQL --- -This guide explains the GraphQL Query API that is used for the Graph Protocol. +Questa guida spiega l'API GraphQL Query utilizzata per il protocollo the Graph. -## Queries +## Query -In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. +Nello schema di subgraph si definiscono tipi chiamati `Entities`. Per ogni tipo di `Entity`, un'`entity` e un campo `entities` saranno generati sul tipo `Query` di livello superiore. Si noti che `query` non deve essere inclusa all'inizio della query `graphql` quando si usa The Graph. -### Examples +### Esempi -Query for a single `Token` entity defined in your schema: +Eseguire query di una singola entità `Token` definita nello schema: ```graphql { @@ -21,9 +21,9 @@ Query for a single `Token` entity defined in your schema: } ``` -> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. +> **Nota:** Quando si esegue una query per una singola entità, il campo `id` è obbligatorio e deve essere una stringa. -Query all `Token` entities: +Eseguire query di tutte le entità `Token`: ```graphql { @@ -34,11 +34,11 @@ Query all `Token` entities: } ``` -### Sorting +### Ordinamento -When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. +Quando si esegue query di una collezione, il parametro `orderBy` può essere usato per ordinare in base a un attributo specifico. Inoltre, l'opzione `orderDirection` può essere usata per specificare la direzione dell'ordinamento, `asc` per l'ascendente oppure `desc` per il discendente. -#### Example +#### Esempio ```graphql { @@ -49,11 +49,11 @@ When querying a collection, the `orderBy` parameter may be used to sort by a spe } ``` -#### Example for nested entity sorting +#### Esempio di ordinamento di entità annidate -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. +A partire da Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) le entità possono essere ordinate sulla base delle entità annidate. -In the following example, we sort the tokens by the name of their owner: +Nell'esempio seguente, ordiniamo i token in base al nome del loro proprietario: ```graphql { @@ -66,19 +66,19 @@ In the following example, we sort the tokens by the name of their owner: } ``` -> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. +> Attualmente, è possibile ordinare per tipi di `String` oppure `ID` profondi un livello sui campi `@entity` e `@derivedFrom`. Purtroppo non è ancora supportato [l'ordinamento per interfacce su entità profonde un livello](https://github.com/graphprotocol/graph-node/pull/4058), l'ordinamento per campi che sono array e entità annidate. -### Pagination +### Impaginazione -When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. +Quando si esegue una query di una collezione, il parametro `first` può essere usato per impaginare dall'inizio della raccolta. Va notato che l'ordinamento predefinito è per ID in ordine alfanumerico crescente, non per ora di creazione. -Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +Inoltre, il parametro `skip` può essere usato per saltare le entità ed impaginare. Ad esempio, `first:100` mostra le prime 100 entità e `first:100, skip:100` mostra le 100 entità successive. -Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. +Le query dovrebbero evitare di usare valori `skip` molto grandi, perché in genere hanno un rendimento scarso. Per recuperare un gran numero di elementi, è molto meglio sfogliare le entità in base a un attributo, come mostrato nell'ultimo esempio. -#### Example using `first` +#### Esempio di utilizzo di `first` -Query the first 10 tokens: +Eseguire query di primi 10 token: ```graphql { @@ -89,11 +89,11 @@ Query the first 10 tokens: } ``` -To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. +Per eseguire query di gruppi di entità nel mezzo di una collezione, il parametro `skip` può essere usato insieme al parametro `first` per saltare un numero specifico di entità a partire dall'inizio della collezione. -#### Example using `first` and `skip` +#### Esempio di utilizzo di `first` e `skip` -Query 10 `Token` entities, offset by 10 places from the beginning of the collection: +Eseguire query di 10 entità `Token`, sfalsate di 10 posizioni rispetto all'inizio della collezione: ```graphql { @@ -104,9 +104,9 @@ Query 10 `Token` entities, offset by 10 places from the beginning of the collect } ``` -#### Example using `first` and `id_ge` +#### Esempio di utilizzo di `first` e `id_ge` -If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: +Se un client deve recuperare un gran numero di entità, è molto più performante basare le query su un attributo e filtrare in base a tale attributo. Ad esempio, un client potrebbe recuperare un gran numero di token utilizzando questa query: ```graphql query manyTokens($lastID: String) { @@ -117,15 +117,15 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +La prima volta, si invierebbe la query con `lastID = ""` e per le richieste successive si imposterebbe `lastID` sull'attributo `id` dell'ultima entità della richiesta precedente. Questo approccio è nettamente migliore rispetto all'utilizzo di valori di `skip` crescenti. -### Filtering +### Filtraggio -You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. +È possibile utilizzare il parametro `where` nelle query per filtrare diverse proprietà. È possibile filtrare su più valori all'interno del parametro `where`. -#### Example using `where` +#### Esempio di utilizzo di `where` -Query challenges with `failed` outcome: +Query con esito `failed`: ```graphql { @@ -139,9 +139,9 @@ Query challenges with `failed` outcome: } ``` -You can use suffixes like `_gt`, `_lte` for value comparison: +È possibile utilizzare suffissi come `_gt`, `_lte` per confrontare i valori: -#### Example for range filtering +#### Esempio di filtraggio dell'intervallo ```graphql { @@ -153,11 +153,11 @@ You can use suffixes like `_gt`, `_lte` for value comparison: } ``` -#### Example for block filtering +#### Esempio di filtraggio dei blocchi -You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. +È anche possibile filtrare le entità in base al metodo `_change_block(number_gte: Int)` - questo filtra le entità che sono state aggiornate nel o dopo il blocco specificato. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +Questo può essere utile se si vuole recuperare solo le entità che sono cambiate, ad esempio dall'ultima volta che è stato effettuato il polling. In alternativa, può essere utile per indagare o fare il debug di come le entità stanno cambiando nel subgraph (se combinato con un filtro di blocco, è possibile isolare solo le entità che sono cambiate in un blocco specifico). ```graphql { @@ -169,11 +169,11 @@ This can be useful if you are looking to fetch only entities which have changed, } ``` -#### Example for nested entity filtering +#### Esempio di filtraggio di entità annidate -Filtering on the basis of nested entities is possible in the fields with the `_` suffix. +Il filtraggio sulla base di entità annidate è possibile nei campi con il suffisso `_`. -This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. +Questo può essere utile se si vuole recuperare solo le entità il cui livello di figlio soddisfa le condizioni fornite. ```graphql { @@ -187,13 +187,13 @@ This can be useful if you are looking to fetch only entities whose child-level e } ``` -#### Logical operators +#### Operatori logici -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. +A partire dalla versione Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) è possibile raggruppare più parametri nello stesso argomento `where` usando gli operatori `and` oppure `or` per filtrare i risultati in base a più di un criterio. -##### `AND` Operator +##### Operatore `AND` -In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +Nell'esempio seguente, si filtrano le sfide con `outcome` `succeeded` e `number` maggiore o uguale a `100`. ```graphql { @@ -207,7 +207,7 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> **Syntactic sugar:** Si può semplificare la query precedente eliminando l'operatore `and` passando una sottoespressione separata da virgole. > > ```graphql > { @@ -221,9 +221,9 @@ In the following example, we are filtering for challenges with `outcome` `succee > } > ``` -##### `OR` Operator +##### Operatore `OR` -In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +Nell'esempio seguente, si filtrano le sfide con `outcome` `succeeded` oppure `number` maggiore o uguale a `100`. ```graphql { @@ -237,11 +237,11 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. +> **Nota**: Quando si costruiscono le query, è importante considerare l'impatto sulle prestazioni dell'uso dell'operatore `or`. Sebbene `or` possa essere uno strumento utile per ampliare i risultati della ricerca, può anche avere costi significativi. Uno dei problemi principali di `or` è che può causare un rallentamento delle query. Questo perché `or` richiede al database di eseguire la scansione di più indici, un processo che può richiedere molto tempo. Per evitare questi problemi, si consiglia agli sviluppatori di utilizzare gli operatori e al posto di o quando possibile. Ciò consente di effettuare filtri più precisi e di ottenere query più rapide e precise. -#### All Filters +#### Tutti i filtri -Full list of parameter suffixes: +Elenco completo dei suffissi dei parametri: ``` _ @@ -266,23 +266,23 @@ _not_ends_with _not_ends_with_nocase ``` -> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. +> Si noti che alcuni suffissi sono supportati solo per tipi specifici. Ad esempio, `Boolean` supporta solo `_not`, `_in` e `_not_in`, mentre `_` è disponibile solo per i tipi oggetto e interfaccia. -In addition, the following global filters are available as part of `where` argument: +Inoltre, i seguenti filtri globali sono disponibili come parte dell'argomento `where`: ```gr _change_block(number_gte: Int) ``` -### Time-travel queries +### Query Time-travel -You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. +È possibile effetuare query dello stato delle entità non solo per l'ultimo blocco, che è quello predefinito, ma anche per un blocco nel passato. Il blocco in cui deve avvenire una query può essere specificato dal suo numero di blocco o dal suo hash, includendo un argomento `block` nei campi di livello superiore delle query. -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +Il risultato di una query di questo tipo non cambia nel tempo, cioè la query di un determinato blocco passato restituirà lo stesso risultato indipendentemente dal momento in cui viene eseguita, con l'eccezione che se si fa query di un blocco molto vicino alla testa della catena, il risultato potrebbe cambiare se quel blocco risulta non essere nella catena principale e la catena viene riorganizzata. Una volta che un blocco può essere considerato definitivo, il risultato della query non cambierà. -Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +Si noti che l'attuale implementazione è ancora soggetta ad alcune limitazioni che potrebbero violare queste garanzie. L'implementazione non è sempre in grado di dire che un determinato block hash non è affatto presente nella chain principale, o che il risultato di una query per il block hash per un blocco che non può ancora essere considerato definitivo potrebbe essere influenzato da una riorganizzazione di blocco in corso contemporaneamente alla query. Non influiscono sui risultati delle query in base all'block hash quando il blocco è definitivo e si sa che si trova nella chain principale. [Qui](https://github.com/graphprotocol/graph-node/issues/1405) è spiegato in dettaglio quali sono queste limitazioni. -#### Example +#### Esempio ```graphql { @@ -296,9 +296,9 @@ Note that the current implementation is still subject to certain limitations tha } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. +Questa query restituirà le entità `Challenge` e le entità `Application` ad esse associate, così come esistevano direttamente dopo l'elaborazione del blocco numero 8.000.000. -#### Example +#### Esempio ```graphql { @@ -312,26 +312,26 @@ This query will return `Challenge` entities, and their associated `Application` } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. +Questa query restituirà le entità `Challenge` e le entità `Application` ad esse associate, così come esistevano direttamente dopo l'elaborazione del blocco con l'hash indicato. -### Fulltext Search Queries +### Query di ricerca fulltext -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. +I campi di ricerca fulltext forniscono un'API di ricerca testuale espressiva che può essere aggiunta allo schema del subgraph e personalizzata. Fare riferimento a [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) per aggiungere la ricerca fulltext al subgraph. -Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. +Le query di ricerca fulltext hanno un campo obbligatorio, `text`, per fornire i termini di ricerca. In questo campo di ricerca `text` sono disponibili diversi operatori speciali per il fulltext. -Fulltext search operators: +Operatori di ricerca fulltext: -| Symbol | Operator | Description | +| Simbolo | Operatore | Descrizione | | --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| `&` | `And` | Per combinare più termini di ricerca in un filtro per le entità che includono tutti i termini forniti | +| | | `Or` | Le query con più termini di ricerca separati dall'operatore Or restituiranno tutte le entità con una corrispondenza tra i termini forniti | +| `<->` | `Follow by` | Specifica la distanza tra due parole. | +| `:*` | `Prefisso` | Utilizzare il termine di ricerca del prefisso per trovare le parole il cui prefisso corrisponde (sono richiesti 2 caratteri.) | -#### Examples +#### Esempi -Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. +Utilizzando l'operatore `or`, questa query filtrerà le entità blog con variazioni di "anarchism" o "crumpet" nei loro campi fulltext. ```graphql { @@ -344,7 +344,7 @@ Using the `or` operator, this query will filter to blog entities with variations } ``` -The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" +L'operatore `follow by` specifica le parole a una distanza specifica nei documenti fulltext. La seguente query restituirà tutti i blog con variazioni di "decentralize" seguite da "philosophy" ```graphql { @@ -357,7 +357,7 @@ The `follow by` operator specifies a words a specific distance apart in the full } ``` -Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". +Combinare gli operatori fulltext per creare filtri più complessi. Con un operatore di ricerca pretext combinato con un follow by questa query di esempio corrisponderà a tutte le entità del blog con parole che iniziano con "lou" seguite da "music". ```graphql { @@ -370,27 +370,27 @@ Combine fulltext operators to make more complex filters. With a pretext search o } ``` -### Validation +### Validazione -Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. +Graph Node implementa la validazione [basata sulle specifiche](https://spec.graphql.org/October2021/#sec-Validation) delle query GraphQL che riceve utilizzando [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), che si basa sull'[implementazione di riferimento di graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Le query che non rispettano una regola di validazione vengono segnalate con un errore standard - per saperne di più, visitare le [specifiche di GraphQL](https://spec.graphql.org/October2021/#sec-Validation). ## Schema -The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +Lo schema dell'origine di dati-- cioè i tipi di entità, i valori e le relazioni disponibili per le query -- sono definiti attraverso [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. +Gli schemi GraphQL in genere definiscono i tipi di radice per le `query`, le `sottoscrizioni` e le `mutazioni`. The Graph supporta solo le `query`. Il tipo di `Query` principale per il subgraph viene generato automaticamente dallo schema GraphQL incluso nel manifest del subgraph. -> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> **Nota:** La nostra API non espone mutazioni perché gli sviluppatori devono emettere transazioni direttamente contro la blockchain sottostante dalle loro applicazioni. -### Entities +### Entità -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +Tutti i tipi GraphQL con direttive `@entity` nello schema saranno trattati come entità e devono avere un campo `ID`. -> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. +> **Nota:** Attualmente, tutti i tipi nello schema devono avere una direttiva `@entity`. In futuro, i tipi senza direttiva `@entity` saranno trattati come oggetti valore, ma questo non è ancora supportato. -### Subgraph Metadata +### Metadati del Subgraph -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +Tutti i subgraph hanno un oggetto `_Meta_` autogenerato, che fornisce accesso ai metadati del subgraph. Questo oggetto può essere interrogato come segue: ```graphQL { @@ -406,14 +406,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +Se viene fornito un blocco, i metadati si riferiscono a quel blocco, altrimenti viene utilizzato il blocco indicizzato più recente. Se fornito, il blocco deve essere successivo al blocco iniziale del subgraph e inferiore o uguale al blocco indicizzato più recente. -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` è un ID unico, corrispondente al CID IPFS del file `subgraph.yaml`. -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` fornisce informazioni sull'ultimo blocco (tenendo conto di eventuali vincoli di blocco passati a `_meta`): -- hash: the hash of the block -- number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- hash: l'hash del blocco +- numero: il numero del blocco +- timestamp: il timestamp del blocco, se disponibile (attualmente è disponibile solo per i subgraph che indicizzano le reti EVM) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` è un booleano che identifica se il subgraph ha incontrato errori di indicizzazione in qualche blocco passato diff --git a/website/pages/it/querying/managing-api-keys.mdx b/website/pages/it/querying/managing-api-keys.mdx index ee7c274bca10..d7793e6071f7 100644 --- a/website/pages/it/querying/managing-api-keys.mdx +++ b/website/pages/it/querying/managing-api-keys.mdx @@ -1,26 +1,26 @@ --- -title: Managing your API keys +title: Gestione delle chiavi API --- -Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. +Indipendentemente dal fatto che siate sviluppatori di dapp o di subgraph, dovrete gestire le vostre chiavi API. Questo è importante per poter effetuare query dei subgraph, perché le chiavi API assicurano che le connessioni tra i servizi applicativi siano valide e autorizzate. Ciò include l'autenticazione dell'utente finale e del dispositivo che utilizza l'applicazione. -The Studio will list out existing API keys, which will give you the ability to manage or delete them. +Lo Studio elencherà le chiavi API esistenti, dando la possibilità di gestirle o eliminarle. -1. The **Overview** section will allow you to: - - Edit your key name - - Regenerate API keys - - View the current usage of the API key with stats: - - Number of queries - - Amount of GRT spent -2. Under **Security**, you’ll be able to opt into security settings depending on the level of control you’d like to have over your API keys. In this section, you can: - - View and manage the domain names authorized to use your API key - - Assign subgraphs that can be queried with your API key -3. Under **Indexer Preference**, you’ll be able to set different preferences for Indexers who are indexing subgraphs that your API key is used for. You can assign up to 5 points for each of these: - - **Fastest Speed**: Time between the query and the response from an indexer. If you mark this as important we will optimize for fast indexers. - - **Lowest Price**: The amount paid per query. If you mark this as important we will optimize for the less expensive indexers. - - **Data Freshness**: How recent the latest block an indexer has processed for the subgraph you are querying. If you mark this as important we will optimize to find the indexers with the freshest data. - - **Economic Security**: The amount of GRT an indexer can lose if they respond incorrectly to your query. If you mark this as important we will optimize for indexers with a large stake. -4. Under **Budget**, you’ll be able to update the maximum price per query. Note that we have a dynamic setting for that that's based on a volume discounting algorithm. **We strongly recommend using the default settings unless you are experiencing a specific problem.** Otherwise, you can update it under "Set a custom maximum budget". On this page you can also view different KPIs (in GRT and USD): - - Average cost per query - - Failed queries over max price - - Most expensive query +1. La sezione **Overview** vi permetterà di: + - Modificare il nome della chiave + - Rigenerare le chiavi API + - Visualizza l'utilizzo attuale della chiave API con le statistiche: + - Numero di query + - Importo di GRT speso +2. Sotto **Security**, potrete scegliere le impostazioni di sicurezza a seconda del livello di controllo che desiderate avere sulle vostre chiavi API. In questa sezione è possibile: + - Visualizzare e gestire i nomi di dominio autorizzati a utilizzare la chiave API + - Assegnare i subgraph che possono essere interrogati con la chiave API +3. Sotto **Indexer Preference**, potrete impostare diverse preferenze per gli Indexer che indicizzano i subgraph per i quali viene utilizzata la vostra chiave API. È possibile assegnare fino a 5 punti per ciascuno di essi: + - **Velocità massima**: Tempo tra la query e la risposta di un Indexer. Se si contrassegna questa voce come importante, verrà ottimizzata per Indexer veloci. + - **Prezzo più basso**: L'importo pagato per ogni query. Se lo si segna come importante, ottimizzeremo per gli Indexer meno costosi. + - **Freschezza dei dati**: Quanto è recente l'ultimo blocco elaborato da un Indexer per il subgraph che si sta effettuando query. Se si indica questo dato come importante, ottimizzeremo per trovare gli Indexer con i dati più freschi. + - **Sicurezza economica**: La quantità di GRT che un Indexer può perdere se risponde in modo errato alla vostra query. Se si indica questo valore come importante, ottimizzeremo gli Indexer con un stake elevato. +4. Sotto **Budget**, è possibile aggiornare il prezzo massimo per query. Si noti che abbiamo un'impostazione dinamica per questo, basata su un algoritmo di sconto per volume. **Si consiglia di utilizzare le impostazioni predefinite, a meno che non si verifichi un problema specifico.** Altrimenti, è possibile aggiornarle in "Imposta un budget massimo personalizzato". In questa pagina è possibile visualizzare diversi KPI (in GRT e in USD): + - Costo medio per query + - Query fallite oltre il prezzo massimo + - Query più costosa diff --git a/website/pages/it/querying/querying-the-hosted-service.mdx b/website/pages/it/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/it/querying/querying-the-hosted-service.mdx +++ b/website/pages/it/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/it/querying/querying-with-python.mdx b/website/pages/it/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/it/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/it/quick-start.mdx b/website/pages/it/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/it/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/it/substreams.mdx b/website/pages/it/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/it/substreams.mdx +++ b/website/pages/it/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/it/sunrise.mdx b/website/pages/it/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/it/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/it/tokenomics.mdx b/website/pages/it/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/it/tokenomics.mdx +++ b/website/pages/it/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/ja/arbitrum/arbitrum-faq.mdx b/website/pages/ja/arbitrum/arbitrum-faq.mdx index 6b4cafe5a7b1..f4504380dd8e 100644 --- a/website/pages/ja/arbitrum/arbitrum-faq.mdx +++ b/website/pages/ja/arbitrum/arbitrum-faq.mdx @@ -2,21 +2,21 @@ title: Arbitrum FAQ --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Arbitrum Billing FAQ にスキップしたい場合は[here](#billing-on-arbitrum-faqs) をクリックしてください。 ## The GraphがL2ソリューションを導入する理由は? The GraphをL2でスケールさせることで、ネットワーク参加者は以下を期待できます: -- Upwards of 26x savings on gas fees +- ガス料金を 26 倍以上節約 - 取引スピードの高速化 -- Security inherited from Ethereum +- イーサリアムから継承したセキュリティ -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +プロトコル スマート コントラクトを L2 に拡張すると、ネットワーク参加者はガス料金を削減しながら、より頻繁に対話できるようになります。たとえば、インデクサーは割り当てを開いたり閉じたりして、より多くのサブグラフにインデックスをより頻繁に付けることができ、開発者はサブグラフのデプロイと更新をより簡単に行うことができ、委任者はより高い頻度で GRT を委任でき、キュレーターはより多くのサブグラフにシグナルを追加または削除できます。サブグラフ – 以前は、ガスのために頻繁に実行するにはコストが高すぎると考えられていたアクション。 -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +Graph コミュニティは、[GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) の議論の結果を受けて、昨年 Arbitrum を進めることを決定しました。 ## The Graph on L2を利用するために必要なことは何ですか? @@ -29,15 +29,15 @@ The Graph community decided to move forward with Arbitrum last year after the ou L2でのThe Graphの活用には、このドロップダウンスイッチャーを使用して、チェーンを切り替えてください。 -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Arbitrum を切り替えるドロップダウン スイッチャー](/img/arbitrum-screenshot-toggle.png) ## サブグラフ開発者、データ消費者、インデクサー、キュレーター、デリゲーターは何をする必要がありますか? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +直ちに対応する必要はありませんが、ネットワーク参加者は L2 の利点を活用するために Arbitrum への移行を開始することをお勧めします。 -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +コア開発者チームは、委任、キュレーション、およびサブグラフを Arbitrum に移行するのを大幅に容易にする L2 転送ツールの作成に取り組んでいます。ネットワーク参加者は、2023 年の夏までに L2 転送ツールが利用可能になることを期待できます。 -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +2023 年 4 月 10 日の時点で、すべてのインデックス作成報酬の 5% が Arbitrum で鋳造されています。ネットワークへの参加が増加し、評議会がそれを承認すると、インデックス作成の報酬はイーサリアムからアービトラムに徐々に移行し、最終的には完全にアービトラムに移行します。 ## L2でのネットワークに参加したい場合は、どうすればいいのでしょうか? @@ -45,9 +45,9 @@ Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and ## L2へのネットワーク拡張に伴うリスクはありますか? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +すべてのスマート コントラクトは徹底的に[audited]されています(https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf)。 -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +すべてが徹底的にテストされており、安全かつシームレスな移行を保証するための緊急時対応計画が整備されています。詳細は[here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20)をご覧ください。 ## イーサリアムの既存のサブグラフは引き続き使えるのでしょうか? @@ -55,7 +55,7 @@ Everything has been tested thoroughly, and a contingency plan is in place to ens ## GRTはArbitrumに新しいスマートコントラクトをデプロイするのでしょうか? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +はい、GRT には追加の [Arbitrum 上のスマート コントラクト](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) があります。ただし、イーサリアムのメインネット [GRT 契約](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) は引き続き運用されます。 ## Arbitrumでの課金に関するFAQ @@ -65,14 +65,14 @@ Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/addr ## 自分の資金がArbitrumに安全に移行されたことを確認するにはどうすればよいですか? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +すべての GRT 請求残高はすでに Arbitrum に正常に移行されています。請求契約は Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a) でご覧いただけます。 ## Arbitrumブリッジの安全性を確認するにはどうすればよいですか? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +すべてのユーザーの安全とセキュリティを確保するために、ブリッジは [厳重な監査](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) を受けています。 ## イーサリアムメインネットのウォレットからGRTを追加する場合、何をする必要があるのでしょうか? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Arbitrum 請求残高への GRT の追加は、[Subgraph Studio](https://thegraph.com/studio/) でワンクリックで行うことができます。 GRT を Arbitrum に簡単にブリッジし、1 回のトランザクションで API キーを入力できるようになります。 -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +GRT の追加、撤回、または取得に関する詳細な手順については、[請求ページ](https://thegraph.com/docs/en/billing/) を参照してください。 diff --git a/website/pages/ja/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ja/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..af2478e5ff99 100644 --- a/website/pages/ja/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ja/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: L2 転送ツールに関するよくある質問 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. -## Subgraph Transfer +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -## How do I transfer my subgraph? +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -To transfer your subgraph, you will need to complete the following steps: +### 7日以内に転送を完了しないとどうなりますか? -1. Initiate the transfer on Ethereum mainnet +L2転送ツールは、アービトラムのネイティブメカニズムを使用してL1からL2にメッセージを送信します。このメカニズムは「再試行可能チケット」と呼ばれ、Arbitrum GRTブリッジを含むすべてのネイティブトークンブリッジで使用されます。再試行可能なチケットの詳細については、[アービトラムドキュメント]\(https://docs.arbitrum.io/arbos/l1 からl2へのメッセージング)を参照してください。 -2. Wait 20 minutes for confirmation +資産(サブグラフ、ステーク、委任、またはキュレーション)をL2に転送する際、Arbitrum GRTブリッジを介してメッセージが送信され、L2でretryable ticketが作成されます。転送ツールにはトランザクションに一部のETHが含まれており、これは1)チケットの作成に支払われ、2)L2でのチケットの実行に必要なガスに使用されます。ただし、チケットがL2で実行可能になるまでの時間でガス料金が変動する可能性があるため、この自動実行試行が失敗することがあります。その場合、Arbitrumブリッジはretryable ticketを最大7日間保持し、誰でもそのチケットを「償還」しようと再試行できます(これにはArbitrumにブリッジされた一部のETHを持つウォレットが必要です)。 -3. Confirm subgraph transfer on Arbitrum\* +これは、すべての転送ツールで「確認」ステップと呼んでいるものです。ほとんどの場合、自動実行は成功するため、自動的に実行されますが、確認が完了したことを確認するために戻ってチェックすることが重要です。成功せず、7日間で成功した再試行がない場合、Arbitrumブリッジはそのチケットを破棄し、あなたの資産(サブグラフ、ステーク、委任、またはキュレーション)は失われ、回復できません。The Graphのコア開発者は、これらの状況を検出し、遅すぎる前にチケットを償還しようとする監視システムを設置していますが、最終的には転送が時間内に完了することを確認する責任があなたにあります。トランザクションの確認に問題がある場合は、[this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) を使用して連絡し、コア開発者が助けてくれるでしょう。 -4. Finish publishing subgraph on Arbitrum +### 委任/ステーク/キュレーション転送を開始しましたが、L2 まで転送されたかどうかわかりません。正しく転送されたことを確認するにはどうすればよいですか? -5. Update Query URL (recommended) +プロフィールに転送を完了するように求めるバナーが表示されない場合、おそらくトランザクションは安全にL2に到達し、それ以上の操作は必要ありません。疑念がある場合、ExplorerがArbitrum Oneでのあなたの委任、ステーク、またはキュレーションを表示しているかどうかを確認できます。 -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +L1トランザクションのハッシュを持っている場合(これはウォレット内の最近のトランザクションを見ることで見つけることができます)、メッセージをL2に運ぶ「retryable ticket」が償還されたかどうかをこちらで確認できます:https://retryable-dashboard.arbitrum.io/ - 自動償還が失敗した場合、そこでウォレットを接続して償還することもできます。コア開発者もメッセージが詰まった場合を監視し、期限切れになる前にそれらを償還しようと試みますので、安心してください。 -## Where should I initiate my transfer from? +## 部分グラフの転送 -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +### サブグラフを転送するにはどうすればよいですか? -## How long do I need to wait until my subgraph is transferred + -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +サブグラフを転送するには、次の手順を完了する必要があります。 -## Will my subgraph still be discoverable after I transfer it to L2? +1. イーサリアムメインネットで転送を開始する -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +2. 確認を待つために20分お待ちください。 -## Does my subgraph need to be published to transfer it? +3. Arbitrum でサブグラフ転送を確認します\* -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +4. Arbitrum でサブグラフの公開を完了する -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +5. クエリ URL を更新 (推奨) -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +\*注意:7日以内に転送を確認する必要があります。それ以外の場合、サブグラフが失われる可能性があります。ほとんどの場合、このステップは自動的に実行されますが、Arbitrumでガス価格が急上昇した場合には手動で確認する必要があるかもしれません。このプロセス中に問題が発生した場合、サポートを受けるためのリソースが用意されています:support@thegraph.com に連絡するか、[Discord](https://discord.gg/graphprotocol)でお問い合わせください\\。 -## After I transfer, do I also need to re-publish on Arbitrum? +### どこから転送を開始すればよいですか? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +トランスファーを開始するには、[Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer)またはサブグラフの詳細ページからトランスファーを開始できます。サブグラフの詳細ページで「サブグラフを転送」ボタンをクリックしてトランスファーを開始してください。 -## Will there be a down-time to my endpoint while re-publishing? +### サブグラフが転送されるまでどれくらい待つ必要がありますか -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +トランスファーには約20分かかります。Arbitrumブリッジはバックグラウンドでブリッジトランスファーを自動的に完了します。一部の場合、ガス料金が急上昇する可能性があり、トランザクションを再度確認する必要があるかもしれません。 -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### 私のサブグラフは L2 に転送した後も検出可能ですか? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +あなたのサブグラフは、それが公開されたネットワーク上でのみ発見できます。たとえば、あなたのサブグラフがArbitrum Oneにある場合、それはArbitrum OneのExplorerでのみ見つけることができ、Ethereum上では見つけることはできません。正しいネットワークにいることを確認するために、ページの上部にあるネットワーク切り替えツールでArbitrum Oneを選択していることを確認してください。トランスファー後、L1サブグラフは非推奨として表示されます。 -## Will my subgraph's curation move with my subgraph? +### 私のサブグラフを転送するには公開する必要がありますか? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +サブグラフ転送ツールを活用するには、サブグラフがすでにEthereumメインネットに公開され、そのサブグラフを所有するウォレットが所有するキュレーション信号を持っている必要があります。サブグラフが公開されていない場合、Arbitrum Oneに直接公開することをお勧めします。関連するガス料金はかなり低くなります。公開されたサブグラフを転送したいが、所有者のアカウントがそれに対してキュレーション信号を出していない場合、そのアカウントから少額(たとえば1 GRT)の信号を送ることができます。必ず「auto-migrating(自動移行)」信号を選択してください。 -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### Arbitrumへの転送後、Ethereumメインネットバージョンの私のサブグラフはどうなりますか? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +サブグラフをArbitrumに転送した後、Ethereumメインネットワークのバージョンは非推奨とされます。おすすめでは、48時間以内にクエリURLを更新することをお勧めしています。ただし、サードパーティのDAppサポートが更新されるために、メインネットワークのURLが機能し続ける猶予期間も設けられています。 -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### 転送後、Arbitrum上で再公開する必要がありますか? -## Why do I need bridged ETH to complete my transfer? +20分のトランスファーウィンドウの後、トランスファーを完了するためにUI内でトランザクションを確認する必要がありますが、トランスファーツールがこれをガイドします。トランスファーウィンドウおよびその後の猶予期間中、L1エンドポイントはサポートされ続けます。都合が良いときにエンドポイントを更新することが奨励されています。 -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### 再公開中にエンドポイントでダウンタイムが発生しますか? -## Curation Signal +短期間のダウンタイムを経験する可能性は低いですが、L1でサブグラフをサポートしているインデクサーと、サブグラフが完全にL2でサポートされるまでインデクシングを続けるかどうかに依存することがあります。 -## How do I transfer my curation? +### L2上での公開とバージョニングは、Ethereumメインネットと同じですか? -To transfer your curation, you will need to complete the following steps: +はい、Subgraph Studioで公開する際には、公開ネットワークとしてArbitrum Oneを選択してください。Studioでは、最新のエンドポイントが利用可能で、最新の更新されたサブグラフバージョンを指します。 -1. Initiate signal transfer on Ethereum mainnet +### 私のサブグラフのキュレーションは、サブグラフと一緒に移動しますか? -2. Specify an L2 Curator address\* +自動移行信号を選択した場合、あなたのキュレーションの100%はサブグラフと一緒にArbitrum Oneに移行します。サブグラフのすべてのキュレーション信号は、転送時にGRTに変換され、あなたのキュレーション信号に対応するGRTがL2サブグラフ上で信号を発行するために使用されます。 -3. Wait 20 minutes for confirmation +他のキュレーターは、自分の一部のGRTを引き出すか、それをL2に転送して同じサブグラフで信号を発行するかを選択できます。 -\*If necessary - i.e. you are using a contract address. +### 転送後にサブグラフをEthereumメインネットに戻すことはできますか? -## How will I know if the subgraph I curated has moved to L2? +一度転送されると、Ethereumメインネットワークのサブグラフバージョンは非推奨とされます。メインネットワークに戻りたい場合、再デプロイしてメインネットワークに再度公開する必要があります。ただし、Ethereumメインネットワークに戻すことは強く勧められていません。なぜなら、将来的にはインデクシングリワードが完全にArbitrum Oneで分配されるためです。 -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### なぜ転送を完了するためにブリッジされたETHが必要なのですか? -## What if I do not wish to move my curation to L2? +Arbitrum One上のガス料金は、ブリッジを介してArbitrum OneにブリッジされたETHを使用して支払われます。ただし、Ethereumメインネットワークと比較して、ガス料金はかなり低いです。 -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +## Delegation -## How do I know my curation successfully transferred? +### 委任を転送するにはどうすればよいですか? -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. + -## Can I transfer my curation on more than one subgraph at a time? +委任を転送するには、次の手順を完了する必要があります。 -There is no bulk transfer option at this time. +1. イーサリアムメインネットで委任転送を開始する +2. 確認を待つために20分お待ちください。 +3. アービトラムでの委任転送の確認 -## Indexer Stake +\*\*\*\*Arbitrum上での委任トランスファーを完了するには、トランザクションを確認する必要があります。このステップは7日以内に完了する必要があり、それ以外の場合、委任が失われる可能性があります。ほとんどの場合、このステップは自動的に実行されますが、Arbitrumでガス価格が急上昇した場合、手動の確認が必要になることがあります。このプロセス中に問題が発生した場合、サポートを受けるためのリソースが用意されています:support@thegraph.com または[Discord](https://discord.gg/graphprotocol)でサポートに連絡してください。 -## How do I transfer my stake to Arbitrum? +### イーサリアムメインネットでオープンアロケーションで送金を開始した場合、報酬はどうなりますか? -To transfer your stake, you will need to complete the following steps: +もし委任先のインデクサーがまだL1上で運営されている場合、Arbitrumに移行するとEthereumメインネットワークのオープンアロケーションからの委任報酬が失われます。つまり、最大で過去28日間の報酬が失われる可能性があります。報酬の損失を最小限に抑えるために、インデクサーがアロケーションを終了した直後にトランスファーをタイミングすることができます。インデクサーとコミュニケーションチャネルがある場合、彼らと相談してトランスファーの最適なタイミングを見つけることを検討してください。 -1. Initiate stake transfer on Ethereum mainnet +### 現在委任しているインデクサーがアービトラム One にない場合はどうなりますか? -2. Wait 20 minutes for confirmation +L2トランスファーツールは、あなたが委任したインデクサーが自分のステークをArbitrumに転送した場合にのみ有効になります。 -3. Confirm stake transfer on Arbitrum +### デリゲーターは、別のインデクサーに委任する選択肢がありますか? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +別のインデクサーに委任したい場合、まずArbitrum上で同じインデクサーにトランスファーし、その後委任を解除して解凍期間を待ちます。その後、別のアクティブなインデクサーを選択して委任できます。 -## Will all of my stake transfer? +### L2 で委任先のインデクサーが見つからない場合はどうすればよいですか? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +L2 転送ツールは、以前に委任したインデクサーを自動的に検出します。 -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### 以前のインデクサーではなく、新しいインデクサーまたは複数のインデクサーに委任を組み合わせて一致させたり、"分散" したりできますか? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +L2トランスファーツールは常に、以前に委任した同じインデクサーにあなたの委任を移動させます。L2に移動した後、委任を解除し、解凍期間を待ってから、委任を分割するかどうかを決定できます。 -## How much time do I have to confirm my stake transfer to Arbitrum? +### クールダウン期間の対象になりますか、それともL2委任転送ツールを使用した直後に撤退できますか? -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +トランスファーツールを使用すると、即座にL2に移動できます。委任を解除したい場合は、解凍期間を待たなければなりません。ただし、インデクサーがステークをすべてL2に移動させた場合、Ethereumメインネットワークで即座に引き出すことができます。 -## What if I have open allocations? +### 委任を譲渡しない場合、報酬に悪影響が及ぶ可能性がありますか? -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +将来的には、すべてのネットワーク参加者がArbitrum Oneに移行すると予想されています。 -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### L2への委任の転送が完了するまでどのくらいかかりますか? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +委任転送には 20 分間の確認が必要です。20 分後、7 日以内に転送プロセスのステップ 3 を戻って完了する必要があります。これを怠ると、委任が失われる可能性があります。ほとんどの場合、転送ツールはこのステップを自動的に完了することに注意してください。自動試行が失敗した場合は、手動で完了する必要があります。このプロセス中に問題が発生した場合は、心配しないでください。サポートのためにここにいます。support@thegraph.com または[Discord](https://discord.gg/graphprotocol). -## How long will it take to transfer my stake? +### GRT権利確定契約/トークンロックウォレットを使用している場合、委任を譲渡できますか? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +はい!プロセスは少し異なります。ベスティングコントラクトはL2ガスの支払いに必要なETHを転送できないため、事前にそれを預ける必要があります。ベスティングコントラクトが完全にベスティングされていない場合、まずL2上でカウンターパートのベスティングコントラクトを初期化する必要があり、委任をこのL2ベスティングコントラクトにのみ転送できます。Explorerを使用してベスティングロックウォレットに接続した場合、UIがこのプロセスを案内することができます。 -## Do I have to index on Arbitrum before I transfer my stake? +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## Can Delegators move their delegation before I move my indexing stake? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### 委任税はありますか? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +いいえ、L2で受け取ったトークンは、指定された委任者の代わりに指定されたインデクサーに委任され、委任税は課金されません。 -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### 委任を譲渡すると、未実現の報酬も譲渡されますか? -## Delegation +はい!転送できない報酬は、アロケーションがクローズされるまで存在しないため、オープンアロケーションの報酬だけです(通常、28日ごとにクローズされます)。長い間委任している場合、これはおそらく報酬のほんの一部です。 + +スマート コントラクト レベルでは、未実現報酬はすでに委任残高の一部となっているため、委任を L2 に転送するときに転送されます。 + +### 委任を L2 に移動することは必須ですか? 期限はありますか? + +L2への委任移行は強制ではありませんが、[GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193) で説明されているタイムラインに従ってL2でのインデクシング報酬が増加しています。最終的には、評議会が増加を承認し続ける限り、すべての報酬がL2で分配され、L1のインデクサーや委任者にはインデクシング報酬がなくなる可能性があります。 + +### すでにステークを L2 に移管しているインデクサーに委任している場合、L1 での報酬の受け取りは停止されますか? + +多くのインデクサーは段階的にステークを移行しており、したがってL1でのインデクサーは引き続きL1で報酬と手数料を稼ぎ、それらはその後委任者と共有されます。インデクサーがステークをすべて移行した場合、それからはL1での運用を停止し、委任者はL2に移行しない限り追加の報酬を受け取らなくなります。 + +最終的に、評議会がL2でのインデクシング報酬の増加を承認し続ける場合、すべての報酬はL2で分配され、L1のインデクサーや委任者にはインデクシング報酬が存在しなくなるでしょう。 + +### 委任を転送するボタンが表示されません。 何故ですか? + +あなたのインデクサーは、ステークを転送するために L2 転送ツールをまだ使用していない可能性があります。 + +Indexerに連絡できる場合、彼らにL2トランスファーツールを使用するように奨励し、委任者が委任を彼らのL2インデクサーアドレスに転送できるようにすることができます。 + +### 私のインデクサーも Arbitrum 上にありますが、プロファイルに委任を転送するボタンが表示されません。 何故ですか? + +インデクサーがL2での運用を設定しているが、ステークを転送するためにL2トランスファーツールを使用していない可能性があります。したがって、L1のスマートコントラクトはインデクサーのL2アドレスについては知らないかもしれません。インデクサーに連絡できる場合、彼らに転送ツールを使用するよう奨励し、委任者が委任を彼らのL2インデクサーアドレスに転送できるようにすることができます。 + +### 委任解除プロセスを開始していてまだ撤回していない場合、委任を L2 に転送できますか? + +いいえ。 代表団が解凍されつつある場合は、28 日間待って代表団を撤回する必要があります。 + +委任が解除されているトークンは「ロック」されているため、L2 に転送できません。 + +## キュレーションシグナル + +### キュレーションを転送するにはどうすればよいですか? + +キュレーションを転送するには、次の手順を完了する必要があります。 + +1. イーサリアムメインネットでシグナル転送を開始する + +2. イーサリアムメインネットでシグナル転送を開始する + +3. 確認を待つために20分お待ちください。 + +\*必要な場合 - つまり、契約アドレスを使用している場合。 + +### 私がキュレーションしたサブグラフが L2 に移動したかどうかはどうすればわかりますか? + +サブグラフの詳細ページを表示すると、このサブグラフが転送されたことを通知するバナーが表示されます。バナーに従ってキュレーションを転送できます。また、移動したサブグラフの詳細ページでもこの情報を見つけることができます。 + +### 自分のキュレーションを L2 に移動したくない場合はどうすればよいですか? + +サブグラフが非推奨になった場合、信号を引き出すオプションがあります。同様に、サブグラフがL2に移動した場合、Ethereumメインネットワークで信号を引き出すか、L2に送信することを選択できます。 + +### 私のキュレーションが正常に転送されたことを確認するにはどうすればよいですか? + +L2トランスファーツールを開始してから約20分後、Explorerを介して信号の詳細にアクセスできるようになります。 + +### 一度に複数のサブグラフへキュレーションを転送することはできますか? + +現時点では一括転送オプションは提供されていません。 + +## Indexer Stake + +### 自分の株式をArbitrumに移管するにはどうすればよいですか? + +> 免責事項: インデクサーからGRTの一部を現在アンステークしている場合、L2トランスファーツールを使用することはできません。 + + + +ステークを譲渡するには、次の手順を完了する必要があります。 + +1. イーサリアムメインネットでのステーク転送を開始してください。 + +2. 確認を待つために20分お待ちください。 + +3. Arbitrumでのステーク転送を確認してください。 + +\*注意:7日以内に転送を確認する必要があります。それ以外の場合、あなたのステークが失われる可能性があります。ほとんどの場合、このステップは自動的に実行されますが、Arbitrumでガス価格が急上昇した場合には手動で確認が必要な場合があります。このプロセス中に問題が発生した場合、サポートを受けるためのリソースが用意されています:support@thegraph.com に連絡するか、[Discord](https://discord.gg/graphprotocol)でお問い合わせください。 + +### 私のすべてのステークは譲渡されますか? + +どれだけのステークを転送するかを選択できます。すべてのステークを一度に転送する場合、まずオープンなアロケーションを閉じる必要があります。 + +ステークの一部を複数のトランザクションで転送する予定の場合、常に同じ受益者アドレスを指定する必要があります。 + +注意: トランスファーツールを初めて使用する際には、L2での最小ステーク要件を満たす必要があります。インデクサーは、最初にこの関数を呼び出す際に最小100k GRTを送信する必要があります。L1にステークの一部を残す場合、それも100k GRT以上でなければならず、オープンなアロケーションをカバーするのに十分である必要があります(委任と合わせて)。 + +### アービトラムへのステーク譲渡の確認にはどのくらいの時間が必要ですか? + +\*\*\* Arbitrum上でのステーク転送を完了するには、トランザクションを確認する必要があります。このステップは7日以内に完了する必要があり、それ以外の場合、ステークが失われる可能性があります。 + +### 未処理の配賦がある場合はどうなりますか? -## How do I transfer my delegation? +すべてのステークを送信しない場合、L2トランスファーツールはEthereumメインネットで最低でも100k GRTが残っており、残りのステークと委任がオープンなアロケーションをカバーするのに十分であることを検証します。GRTの残高が最小要件とオープンなアロケーションをカバーしない場合、オープンなアロケーションを閉じる必要があるかもしれません。 -To transfer your delegation, you will need to complete the following steps: +### 転送ツールを使用して、転送する前にイーサリアムメインネットのステーキングを解除するために28日間待つ必要がありますか? -1. Initiate delegation transfer on Ethereum mainnet +いいえ、L2にステークをすぐに転送できます。トランスファーツールを使用する前にアンステークして待つ必要はありません。28日の待機期間は、ステークをEthereumメインネットまたはL2のウォレットに戻す場合にのみ適用されます。 -2. Wait 20 minutes for confirmation +### 賭け金の譲渡にはどのくらい時間がかかりますか? -3. Confirm delegation transfer on Arbitrum +L2トランスファーツールがステークの転送を完了するのに約20分かかります。 -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### 株式を譲渡する前に、Arbitrum でインデックスを作成する必要がありますか? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +インデクシングのセットアップよりも先にステークを効果的に転送できますが、L2でのサブグラフへの割り当て、それらのサブグラフのインデクシング、およびPOIの提出を行うまで、L2での報酬を請求することはできません。 -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +### 委任者は、インデックス作成の賭け金を移動する前に委任を移動できますか? -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +いいえ、委任者が委任されたGRTをArbitrumに転送するためには、委任しているインデクサーがL2でアクティブである必要があります。 -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +### GRT権利確定契約/トークンロックウォレットを使用している場合、株式を譲渡できますか? -## Do Delegators have the option to delegate to another Indexer? +はい!プロセスは少し異なります。ベスティングコントラクトはL2ガスの支払いに必要なETHを転送できないため、事前にそれを預ける必要があります。ベスティングコントラクトが完全にベスティングされていない場合、まずL2上でカウンターパートのベスティングコントラクトを初期化する必要があり、ステークはこのL2ベスティングコントラクトにのみ転送できます。Explorerを使用してベスティングロックウォレットを介してExplorerに接続した場合、UIがこのプロセスを案内することができます。 -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### 私はすでにL2の権益を持っています。 初めて転送ツールを使用するときも 100k GRT を送信する必要がありますか? -## What if I can't find the Indexer I'm delegating to on L2? +はい。L1のスマートコントラクトはあなたのL2のステークを認識しないため、最初に転送する際に少なくとも100k GRTを転送する必要があります。 -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### GRT のステーキングを解除している最中に、ステークを L2 に移すことはできますか? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +いいえ。ステークの一部が解凍中の場合、ステークを転送する前に28日間待つ必要があり、解凍中のトークンは「ロックされ」、転送やL2へのステークはできません。 -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +## 権利確定契約の譲渡 -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +### 権利確定契約を譲渡するにはどうすればよいですか? -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +権利確定を譲渡するには、次の手順を完了する必要があります: -## Can my rewards be negatively impacted if I do not transfer my delegation? +1. イーサリアムメインネットで権利確定転送を開始する -It is anticipated that all network participation will move to Arbitrum One in the future. +2. 確認を待つために20分お待ちください。 -## How long does it take to complete the transfer of my delegation to L2? +3. アービトラムでの権利確定転送の確認 -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### 部分的にしか権利が与えられていない場合、権利確定契約を譲渡するにはどうすればよいですか? -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +1. 転送ツール契約にETHを入金します(UIは妥当な金額を見積もるのに役立ちます) -## Is there any delegation tax? +2. ロックされたGRTを転送ツールコントラクトを介してL2に送信して、L2ベスティングロックを初期化します。これにより、L2の受益者アドレスも設定されます。 -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +3. ステーク/委任をL2に送信するには、L1Staking契約内の「ロックされた」トランスファーツール機能を使用します。 -## Vesting Contract Transfer +4. 転送ツール契約から残りの ETH を引き出します -## How do I transfer my vesting contract? +### 完全に権利が確定している場合、権利確定契約を譲渡するにはどうすればよいですか? -To transfer your vesting, you will need to complete the following steps: + -1. Initiate the vesting transfer on Ethereum mainnet +完全に権利が与えられているものの場合、プロセスは似ています: -2. Wait 20 minutes for confirmation +1. 転送ツール契約にETHを入金します(UIは妥当な金額を見積もるのに役立ちます) -3. Confirm vesting transfer on Arbitrum +2. 転送ツール契約への呼び出しでL2アドレスを設定する -## How do I transfer my vesting contract if I am only partially vested? +3. あなたのステーク/委任をL2に送信するには、L1ステーキング契約内の「ロック」されたトランスファーツール機能を使用してください。 -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +4. 転送ツール契約から残りの ETH を引き出します -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +### 私のベスティング契約をアービトラムに転送できますか? -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +あなたはベスティングコントラクトのGRT残高をL2のベスティングコントラクトに転送できます。これは、ベスティングコントラクトからステークまたは委任をL2に転送するための前提条件です。ベスティングコントラクトはGRTのゼロでない残高を保持している必要があります(必要に応じて1 GRTのような少額を転送できます)。 -4. Withdraw any remaining ETH from the transfer tool contract +L1のベスティングコントラクトからL2にGRTを転送する際、送信する金額を選択でき、何度でもこれを行うことができます。L2のベスティングコントラクトは、最初にGRTを転送する際に初期化されます。 -## How do I transfer my vesting contract if I am fully vested? +これらの転送は、ベスティングコントラクトアカウントでExplorerプロフィールに接続した際に表示されるTransfer Toolを使用して行われます。 -For those that are fully vested, the process is similar: +L2のベスティングコントラクトからGRTを解放または引き出すことは、契約が完全にベスティングされる契約終了時までできません。それ以前にGRTを解放する必要がある場合、その目的で使用できる別のトランスファーツールを使用してGRTをL1のベスティングコントラクトに戻すことができます。 -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +もしベスティングコントラクトの残高をL2に転送していない場合、かつベスティングコントラクトが完全にベスティングされている場合、ベスティングコントラクトをL2に転送しないでください。代わりに、トランスファーツールを使用してL2ウォレットアドレスを設定し、ステークまたは委任をこの通常のL2ウォレットに直接転送できます。 -2. Set your L2 address with a call to the transfer tool contract +### 私は権利確定契約を利用してメインネットに賭けています。 自分の株式をArbitrumに譲渡できますか? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +はい、ただし、契約がまだベスティング中の場合、ステークをL2ベスティングコントラクトが所有するように転送できます。まず、Explorerのベスティングコントラクトの転送ツールを使用して、このL2コントラクトを初期化する必要があります。契約が完全にベスティングされている場合、ステークをL2内の任意のアドレスに転送できますが、事前に設定し、L2ガスの支払いに必要な一部のETHをデポジットする必要があります。 -4. Withdraw any remaining ETH from the transfer tool contract +### 私は権利確定契約を使用してメインネットを委任しています。 私の代表団をArbitrumに移管できますか? -## Can I transfer my vesting contract to Arbitrum? +はい、ただし、契約がまだベスティング中の場合、委任をL2ベスティングコントラクトが所有するように転送できます。まず、Explorerのベスティングコントラクトの転送ツールを使用して、このL2コントラクトを初期化する必要があります。契約が完全にベスティングされている場合、委任をL2内の任意のアドレスに転送できますが、事前に設定し、L2ガスの支払いに必要な一部のETHをデポジットする必要があります。 -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +### L2の権利確定契約に別の受益者を指定することはできますか? -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +はい、最初に残高を転送し、L2ベスティングコントラクトを設定する際、L2の受益者を指定できます。この受益者は、Arbitrum Oneでトランザクションを実行できるウォレットである必要があります。つまり、EOAまたはArbitrum Oneに展開されたマルチシグである必要があります。 -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +契約が完全にベスティングされている場合、L2でのベスティングコントラクトを設定しません。代わりに、L2ウォレットアドレスを設定し、これがArbitrum上でのステークまたは委任の受信ウォレットとなります。 -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +### 私の契約は完全に権利確定しています。 自分のステークまたは委任を、L2 権利確定契約ではない別のアドレスに移すことはできますか? -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +はい。もしベスティング契約の残高をL2に転送していない場合、かつベスティング契約が完全にベスティングされている場合、ベスティング契約をL2に転送すべきではありません。代わりに、トランスファーツールを使用してL2ウォレットアドレスを設定し、ステークまたは委任をこの通常のL2ウォレットに直接転送できます。 -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +これにより, ステークまたは委任を任意のL2アドレスに移すことができます。 -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### 私の権利確定契約はまだ権利確定中です。 権利確定契約残高を L2 に転送するにはどうすればよいですか? -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +これらのステップは、契約がまだベスティング中の場合、または契約がまだベスティング中であったときにこのプロセスを使用した場合にのみ適用されます。 -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +権利確定契約をL2に転送するには、L2権利確定契約を初期化する転送ツールを使用してGRT残高をL2に送信します。 -## Can I specify a different beneficiary for my vesting contract on L2? +1. 転送ツール契約にETHを入金します(これはL2ガスの支払いに使用されます) -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +2. 権利確定契約へのプロトコルアクセスを取り消す(次のステップで必要) -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +3. 権利確定契約へのプロトコルアクセスを許可します(契約が転送ツールと対話できるようにします) -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +4. L2受取人アドレス\*を指定し、イーサリアムメインネットで残高転送を開始します -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +5. 確認を待つために20分お待ちください。 -This allows you to transfer your stake or delegation to any L2 address. +6. L2での残高転送の確認 -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +\*必要な場合 - つまり、契約アドレスを使用している場合。 -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +\*\*\*\*Arbitrum での残高振替を完了するには、取引を確認する必要があります。このステップは 7 日以内に完了する必要があります。そうしないと、残高が失われる可能性があります。ほとんどの場合、このステップは自動的に実行されますが、Arbitrum にガス価格の急上昇がある場合は手動確認が必要になる場合があります。このプロセス中に問題がある場合は、サポートするためのリソースがあります: support@thegraph.com または [Discord](https://discord.gg/graphprotocol). -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +### 私の権利確定契約には 0 GRT と表示されているため、転送できません。これはなぜですか? どうすれば修正できますか? -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +L2ベスティングコントラクトを初期化するためには、L2にゼロでない額のGRTを転送する必要があります。これは、L2トランスファーツールで使用されるArbitrum GRTブリッジによって必要とされます。GRTはベスティングコントラクトの残高から提供される必要があり、ステークまたは委任されたGRTは含まれません。 -2. Revoke protocol access to the vesting contract (needed for the next step) +もしベスティング契約からステークまたは委任されたGRTをすべて行ってしまった場合、他のどこかから(別のウォレットや取引所からなど)ベスティング契約アドレスに1 GRTのような少額を手動で送信できます。 -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +### 権利確定契約を使用してステークまたは委任を行っていますが、ステークまたは委任を L2 に転送するボタンが表示されません。どうすればよいですか? -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +ベスティング契約のベスティングが完了していない場合、まずL2ベスティング契約を作成する必要があります。このL2ベスティング契約は、ベスティングタイムラインの終了までL2でトークンの解放を許可しませんが、L1ベスティング契約にトークンを転送してそちらで解放することができます。 -5. Wait 20 minutes for confirmation +Explorerでベスティング契約に接続した場合、L2ベスティング契約を初期化するボタンが表示されるはずです。まず、このプロセスに従って初期化を行い、その後、プロファイルでステークまたは委任を転送するためのボタンが表示されるはずです。 -6. Confirm the balance transfer on L2 +### L2 権利確定契約を初期化すると、委任も自動的に L2 に転送されますか? -\*If necessary - i.e. you are using a contract address. +いいえ、L2 権利確定契約の初期化は、権利確定契約からステークまたは委任を移管するための前提条件ですが、これらを個別に移管する必要があります。 -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +L2 権利確定契約を初期化した後、プロフィールにステークまたは委任の譲渡を促すバナーが表示されます。 -## Can I move my vesting contract back to L1? +### 権利確定契約をL1に戻すことはできますか? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +その必要はありません。なぜなら、あなたのベスティング契約はまだL1にあるからです。トランスファーツールを使用すると、単にL1ベスティング契約に接続された新しいL2契約を作成し、その間でGRTを送受信できるようになります。 -## Why do I need to move my vesting contract to begin with? +### そもそもなぜ権利確定契約を移動する必要があるのですか? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +L2ベスティング契約を設定する必要があります。これにより、このアカウントがL2上のステークまたは委任を所有できるようになります。それ以外の場合、ベスティング契約を"脱出"せずにステーク/委任をL2に転送する方法がありません。 -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### 部分的にしか権利が確定していないのに契約を現金化しようとするとどうなりますか? これは可能ですか? -This is not a possibility. You can move funds back to L1 and withdraw them there. +これは可能ではありません。資金をL1に戻し、そこで引き出すことはできます。 -## What if I don't want to move my vesting contract to L2? +### 権利確定契約をL2に移行したくない場合はどうすればよいですか? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +L1でのステーキング/委任を継続することができます。将来的に、プロトコルがArbitrumでスケーリングするにつれて、L2で報酬を有効にすることを検討するかもしれません。ただし、これらのトランスファーツールは、プロトコルでステーキングや委任が許可されているベスティング契約向けのものです。契約がステーキングや委任を許可していないか、取り消し可能な場合、トランスファーツールは利用できません。利用可能な場合、まだL1からGRTを引き出すことができます。 diff --git a/website/pages/ja/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ja/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..bee04117795e 100644 --- a/website/pages/ja/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ja/arbitrum/l2-transfer-tools-guide.mdx @@ -1,165 +1,165 @@ --- -title: L2 Transfer Tools Guide +title: L2 転送ツールガイド --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +グラフにより、アービトラムワンのL2に簡単に移動できるようになりました。プロトコル参加者ごとに、すべてのネットワーク参加者が L2 への転送をシームレスに行うための L2 転送ツールのセットがあります。これらのツールでは、転送する内容に応じて、特定の一連の手順に従う必要があります。 -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +これらのツールに関するよくある質問は、[L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq) で回答されています。FAQには、ツールの使用方法、機能、およびツールを使用する際の注意事項に関する詳細な説明が含まれています。 -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## サブグラフをアービトラムに転送する方法 (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## サブグラフを転送する利点 -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +グラフのコミュニティとコア開発者は、過去1年間、Arbitrumに移行する準備をしてきました(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)。レイヤー2または「L2」ブロックチェーンであるアービトラムは、イーサリアムからセキュリティを継承しますが、ガス料金を大幅に削減します。 -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +サブグラフをThe Graph Networkに公開またはアップグレードする際には、プロトコル上のスマートコントラクトとやり取りするため、ETHを使用してガスを支払う必要があります。サブグラフをArbitrumに移動することで、将来のサブグラフのアップデートにかかるガス料金が大幅に削減されます。低い手数料と、L2のキュレーションボンディングカーブがフラットであるという点も、他のキュレーターがあなたのサブグラフをキュレーションしやすくし、サブグラフのインデクサーへの報酬を増加させます。この低コストな環境は、インデクサーがサブグラフをインデックス化して提供するコストも削減します。アービトラム上のインデックス報酬は今後数か月間で増加し、Ethereumメインネット上では減少する予定です。そのため、ますます多くのインデクサーがステークを転送し、L2での運用を設定していくことになるでしょう。 -## Understanding what happens with signal, your L1 subgraph and query URLs +## シグナル、L1サブグラフ、クエリURLで何が起こるかを理解する -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +サブグラフをアービトラムに転送するには、アービトラムGRTブリッジが使用され、アービトラムGRTブリッジはネイティブアービトラムブリッジを使用してサブグラフをL2に送信します。「転送」はメインネット上のサブグラフを非推奨にし、ブリッジを使用してL2上のサブグラフを再作成するための情報を送信します。また、サブグラフ所有者のシグナル GRT も含まれ、ブリッジが転送を受け入れるには 0 より大きくなければなりません。 -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +サブグラフの転送を選択すると、サブグラフのすべてのキュレーション信号がGRTに変換されます。これは、メインネットのサブグラフを「非推奨」にすることと同じです。キュレーションに対応するGRTはサブグラフとともにL2に送信され、そこであなたに代わってシグナルを作成するために使用されます。 -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +他のキュレーターは、GRTの分数を引き出すか、同じサブグラフでシグナルをミントするためにL2に転送するかを選択できます。サブグラフの所有者がサブグラフをL2に転送せず、コントラクトコールを介して手動で非推奨にした場合、キュレーターに通知され、キュレーションを取り消すことができます。 -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +サブグラフが転送されるとすぐに、すべてのキュレーションがGRTに変換されるため、インデクサーはサブグラフのインデックス作成に対する報酬を受け取らなくなります。ただし、1) 転送されたサブグラフを24時間提供し続け、2) L2でサブグラフのインデックス作成をすぐに開始するインデクサーがあります。これらのインデクサーには既にサブグラフのインデックスが作成されているため、サブグラフが同期するのを待つ必要はなく、ほぼ即座にL2サブグラフを照会できます。 -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +L2 サブグラフへのクエリは別の URL (「arbitrum-gateway.thegraph.com」) に対して実行する必要がありますが、L1 URL は少なくとも 48 時間は機能し続けます。その後、L1ゲートウェイはクエリをL2ゲートウェイに転送しますが(しばらくの間)、これにより遅延が増えるため、できるだけ早くすべてのクエリを新しいURLに切り替えることをお勧めします。 -## Choosing your L2 wallet +## L2ウォレットの選択 -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +メインネットでサブグラフを公開したときに、接続されたウォレットを使用してサブグラフを作成し、このウォレットはこのサブグラフを表すNFTを所有し、更新を公開できます。 -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +サブグラフをアービトラムに転送する場合、L2でこのサブグラフNFTを所有する別のウォレットを選択できます。 -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +MetaMaskのような "通常の" ウォレット(外部所有アカウントまたはEOA、つまりスマートコントラクトではないウォレット)を使用している場合、これはオプションであり、L1と同じ所有者アドレスを保持することをお勧めします。 -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +マルチシグ(Safeなど)などのスマートコントラクトウォレットを使用している場合、このアカウントはメインネットにのみ存在し、このウォレットを使用してアービトラムで取引を行うことができない可能性が高いため、別のL2ウォレットアドレスを選択する必要があります。スマートコントラクトウォレットまたはマルチシグを使い続けたい場合は、Arbitrumで新しいウォレットを作成し、そのアドレスをサブグラフのL2所有者として使用します。 -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +\*\*あなたが管理し、アービトラムで取引を行うことができるウォレットアドレスを使用することは非常に重要です。そうしないと、サブグラフが失われ、復元できません。 -## Preparing for the transfer: bridging some ETH +## 転送の準備: 一部のETHのブリッジング -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +サブグラフを転送するには、ブリッジを介してトランザクションを送信し、その後アービトラム上で別のトランザクションを実行する必要があります。最初のトランザクションでは、メインネット上のETHを使用し、L2でメッセージが受信される際にガスを支払うためにいくらかのETHが含まれています。ただし、このガスが不足している場合、トランザクションを再試行し、L2で直接ガスを支払う必要があります(これが下記の「ステップ3:転送の確認」です)。このステップは、転送を開始してから7日以内に実行する必要があります。さらに、2つ目のトランザクション(「ステップ4:L2での転送の完了」)は、直接アービトラム上で行われます。これらの理由から、アービトラムウォレットに一定のETHが必要です。マルチシグまたはスマートコントラクトアカウントを使用している場合、ETHはトランザクションを実行するために使用している通常の個人のウォレット(EOAウォレット)にある必要があり、マルチシグウォレットそのものにはないことに注意してください -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +一部の取引所でETHを購入してアービトラムに直接引き出すか、アービトラムブリッジを使用してメインネットウォレットからL2にETHを送信することができます:[bridge.arbitrum.io](http://bridge.arbitrum.io)。アービトラムのガス料金は安いので、必要なのは少量だけです。トランザクションが承認されるには、低いしきい値(0.01 ETHなど)から始めることをお勧めします。 -## Finding the subgraph Transfer Tool +## サブグラフ転送ツールの検索 -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +L2転送ツールは、サブグラフスタジオでサブグラフのページを見ているときに見つけることができます。 ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +サブグラフを所有するウォレットに接続している場合は、エクスプローラーとエクスプローラーのそのサブグラフのページでも入手できます。 ![Transferring to L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +[L2に転送] ボタンをクリックすると、転送ツールが開き、転送プロセスを開始できます。 -## Step 1: Starting the transfer +## ステップ1: 転送を開始する -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +転送を開始する前に、どのアドレスがL2のサブグラフを所有するかを決定する必要があり(上記の「L2ウォレットの選択」を参照)、ガス用のETHをアービトラムにすでにブリッジすることを強くお勧めします(上記の「転送の準備: ETHのブリッジング」を参照)。 -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +また、サブグラフを転送するには、サブグラフを所有するのと同じアカウントを持つサブグラフにゼロ以外の量のシグナルが必要であることに注意してください。サブグラフでシグナルを出していない場合は、少しキュレーションを追加する必要があります(1 GRTのような少量を追加するだけで十分です)。 -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +「Transfer Tool」を開いた後、L2ウォレットアドレスを「受信ウォレットアドレス」フィールドに入力できるようになります。ここで正しいアドレスを入力していることを確認してください。「Transfer Subgraph」をクリックすると、ウォレット上でトランザクションを実行するよう求められます(注意:L2ガスの支払いに十分なETHの価値が含まれています)。これにより、トランスファーが開始され、L1サブグラフが廃止されます(詳細については、「背後で何が起こるか:シグナル、L1サブグラフ、およびクエリURLの理解」を参照してください)。 -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +このステップを実行する場合は、\*\*7日以内にステップ3を完了するまで続行してください。そうしないと、サブグラフとシグナルGRTが失われます。 これは、L1-L2メッセージングがアービトラムでどのように機能するかによるものです: ブリッジを介して送信されるメッセージは、7日以内に実行する必要がある「再試行可能なチケット」であり、アービトラムのガス価格に急上昇がある場合は、最初の実行で再試行が必要になる場合があります。 ![Start the trnasfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## ステップ2: サブグラフがL2に到達するのを待つ -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +転送を開始した後、L1サブグラフをL2に送信するメッセージは、アービトラムブリッジを介して伝播する必要があります。これには約20分かかります(ブリッジは、トランザクションを含むメインネットブロックが潜在的なチェーン再編成から「安全」になるまで待機します)。 -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +この待機時間が終了すると、アービトラムはL2契約の転送の自動実行を試みます。 ![Wait screen](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## ステップ3: 転送の確認 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +ほとんどの場合、ステップ1に含まれるL2ガスは、アービトラム契約のサブグラフを受け取るトランザクションを実行するのに十分であるため、このステップは自動実行されます。ただし、場合によっては、アービトラムのガス価格の急騰により、この自動実行が失敗する可能性があります。この場合、サブグラフをL2に送信する「チケット」は保留中であり、7日以内に再試行する必要があります。 -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +この場合、アービトラムにETHがあるL2ウォレットを使用して接続し、ウォレットネットワークをアービトラムに切り替え、[転送の確認] をクリックしてトランザクションを再試行する必要があります。 ![Confirm the transfer to L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## ステップ4: L2での転送の完了 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +この時点で、サブグラフとGRTはアービトラムで受信されましたが、サブグラフはまだ公開されていません。受信ウォレットとして選択したL2ウォレットを使用して接続し、ウォレットネットワークをArbitrumに切り替えて、[サブグラフの公開] をクリックする必要があります。 ![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) ![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +これにより、アービトラムで動作しているインデクサーがサブグラフの提供を開始できるように、サブグラフが公開されます。また、L1から転送されたGRTを使用してキュレーションシグナルをミントします。 -## Step 5: Updating the query URL +## ステップ 5: クエリ URL の更新 -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +サブグラフがアービトラムに正常に転送されました! サブグラフを照会するには、新しい URL は次のようになります: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +アービトラム上のサブグラフIDは、メインネット上でのものとは異なることに注意してください。ただし、エクスプローラやスタジオ上で常にそのIDを見つけることができます(詳細は「シグナル、L1サブグラフ、およびクエリURLの動作理解」を参照)。前述のように、古いL1 URLはしばらくの間サポートされますが、サブグラフがL2上で同期されたらすぐに新しいアドレスにクエリを切り替える必要があります。 -## How to transfer your curation to Arbitrum (L2) +## キュレーションをアービトラム(L2) に転送する方法 -## Understanding what happens to curation on subgraph transfers to L2 +## L2へのサブグラフ転送のキュレーションに何が起こるかを理解する -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +サブグラフの所有者がサブグラフをアービトラムに転送すると、サブグラフのすべての信号が同時にGRTに変換されます。これは、「自動移行」シグナル、つまりサブグラフのバージョンまたはデプロイに固有ではないが、サブグラフの最新バージョンに従うシグナルに適用されます。 -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +このシグナルからGRTへの変換は、サブグラフのオーナーがL1でサブグラフを非推奨にした場合と同じです。サブグラフが非推奨化または移管されると、すべてのキュレーションシグナルは同時に(キュレーションボンディングカーブを使用して)「燃やされ」、その結果得られるGRTはGNSスマートコントラクトに保持されます(これはサブグラフのアップグレードと自動移行されるシグナルを処理するコントラクトです)。そのため、そのサブグラフの各キュレーターは、所持していたシェアの量に比例したGRTの請求権を持っています。 -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +サブグラフの所有者に対応するこれらの GRT の一部は、サブグラフとともに L2 に送信されます。 -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +この時点では、キュレートされたGRTはこれ以上のクエリ手数料を蓄積しません。したがって、キュレーターは自分のGRTを引き出すか、それをL2上の同じサブグラフに移動して新しいキュレーションシグナルを作成するために使用することができます。いつ行うかに関わらず、GRTは無期限に保持でき、すべての人が自分のシェアに比例した額を受け取ることができるため、急ぐ必要はありません。 -## Choosing your L2 wallet +## L2ウォレットの選択 -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +キュレーションされたGRTをL2に転送する場合は、L2のキュレーションシグナルを所有する別のウォレットを選択できます。 -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +もしMetamaskのような「通常の」ウォレット(Externally Owned AccountまたはEOA、つまりスマートコントラクトではないウォレット)を使用している場合、これはオプションです。そして、L1と同じキュレーターアドレスを保持することが推奨されます。 -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +スマートコントラクトウォレット(例:マルチシグ、Safeなど)を使用している場合、異なるL2ウォレットアドレスを選択することは必須です。おそらくこのアカウントはメインネット上のみ存在しており、このウォレットを使用してArbitrumでトランザクションを行うことはできません。スマートコントラクトウォレットやマルチシグを引き続き使用したい場合は、Arbitrum上で新しいウォレットを作成し、そのアドレスをL2受信ウォレットアドレスとして使用してください。 -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**重要なのは、あなたがコントロールし、Arbitrum上でトランザクションを行えるウォレットアドレスを使用することです。そうしないとキュレーションが失われ、回復することはできません。** -## Sending curation to L2: Step 1 +## キュレーションを L2 に送信する: ステップ 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +転送を開始する前に、L2上でキュレーションを所有するアドレスを決定する必要があります(上記の「L2ウォレットの選択」を参照)。また、L2でメッセージの実行を再試行する必要がある場合に備えて、ガスのためにすでにArbitrumにブリッジされたいくらかのETHを持つことをお勧めします。ETHをいくつかの取引所で購入し、それを直接Arbitrumに引き出すことができます。または、Arbitrumブリッジを使用して、メインネットのウォレットからL2にETHを送信することもできます: [bridge.arbitrum.io](http://bridge.arbitrum.io)。Arbitrumのガス料金が非常に低いため、0.01 ETHなどの少額で十分です。 -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +もしキュレーションしているサブグラフがL2に移行された場合、エクスプローラ上でそのサブグラフが移行されたことを示すメッセージが表示されます。 -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +サブグラフのページを表示する際に、キュレーションを引き出すか、移行するかを選択できます。"Transfer Signal to Arbitrum" をクリックすると、移行ツールが開きます。 ![Transfer signal](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +移行ツールを開いた後、ウォレットにETHがない場合はETHを追加するよう促されることがあります。その後、L2ウォレットアドレスを「受信ウォレットアドレス」フィールドに入力できます。ここに正しいアドレスを入力していることを確認してください。"Transfer Signal" をクリックすると、ウォレット上でトランザクションを実行するよう求められます(L2ガスの支払いに一定のETH価値が含まれています)。これにより、移行が開始されます。 -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +このステップを実行する場合、**ステップ3を7日以内に完了するように注意してください。7日を過ぎると、シグナルGRTは失われる可能性があります。** これは、ArbitrumのL1-L2メッセージングの仕組みに起因しています。ブリッジを介して送信されるメッセージは「再試行可能なチケット」であり、7日以内に実行される必要があり、初回の実行がArbitrumのガス価格の急上昇により再試行が必要な場合もあります。 -## Sending curation to L2: step 2 +## キュレーションを L2 に送信する: ステップ 2 -Starting the transfer: +転送を開始します: ![Send signal to L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +移行を開始すると、L1キュレーションをL2に送信するメッセージがArbitrumブリッジを介して伝播する必要があります。これには約20分かかります(ブリッジは、トランザクションを含むメインネットのブロックが潜在的なチェーンの再編から「安全」であるのを待ちます)。 -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +この待機時間が終了すると、アービトラムはL2契約の転送の自動実行を試みます。 ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## キュレーションを L2 に送信する: ステップ 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +ほとんどの場合、ステップ1に含まれるL2ガスは、アービトラム契約でキュレーションを受け取るトランザクションを実行するのに十分であるため、このステップは自動実行されます。ただし、場合によっては、アービトラムのガス価格の急騰により、この自動実行が失敗する可能性があります。この場合、キュレーションをL2に送信する「チケット」は保留中であり、7日以内に再試行する必要があります。 -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +この場合、アービトラムにETHがあるL2ウォレットを使用して接続し、ウォレットネットワークをアービトラムに切り替え、[転送の確認] をクリックしてトランザクションを再試行する必要があります。 ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## L1 でキュレーションを取り消す -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +GRT を L2 に送信したくない場合、または GRT を手動でブリッジしたい場合は、L1 でキュレーションされた GRT を取り消すことができます。サブグラフページのバナーで、「シグナルの引き出し」を選択し、トランザクションを確認します。GRTはあなたのキュレーターアドレスに送信されます。 diff --git a/website/pages/ja/billing.mdx b/website/pages/ja/billing.mdx index da9b59c59ad5..0be594706c49 100644 --- a/website/pages/ja/billing.mdx +++ b/website/pages/ja/billing.mdx @@ -37,8 +37,12 @@ The GraphプロトコルはEthereum Mainnet上で動作しますが、[課金契 ### 暗号ウォレットを使ったGRTの追加 + + > このセクションは、あなたがすでに暗号ウォレットにGRTを持っていて、イーサリアムのメインネットにいることを想定して書かれています。GRTを持っていない場合は、GRTの入手方法[こちら](#getting-grt)を参照してください。 +暗号通貨ウォレットを使用して請求残高にGRTを追加する方法のビデオウォークスルーについては、この[ビデオ](https://youtu.be/4Bw2sh0FxCg)をご覧ください。 + 1. [サブグラフ・スタジオの課金ページ](https://thegraph.com/studio/billing/)にアクセスします。 2. ページ右上の「Connect Wallet」をクリックします。ウォレット選択ページに遷移します。ウォレットを選択し、「Connect」をクリックします。 @@ -71,6 +75,8 @@ The GraphプロトコルはEthereum Mainnet上で動作しますが、[課金契 ### マルチシグウォレットを使ったGRTの追加 + + 1. [サブグラフ・スタジオの課金ページ](https://thegraph.com/studio/billing/)にアクセスします。 2. ページの右上隅にある [Connect Wallet] ボタンをクリックします。ウォレットを選択し、「接続」をクリックします。 [Gnosis-Safe](https://gnosis-safe.io/) を使用している場合は、マルチシグと署名ウォレットを接続できます。次に、関連付けられたメッセージに署名します。これでガス代はかかりません。 @@ -97,11 +103,11 @@ The GraphプロトコルはEthereum Mainnet上で動作しますが、[課金契 ## GRTの獲得 -ここでは、GRTにクエリ費用を負担してもらう方法を紹介します。 +このセクションでは、GRTを支払ってクエリ料金を支払う方法を説明します。 ### Coinbase -CoinbaseでGRTを購入するためのステップバイステップガイドになります。 +これは、CoinbaseでGRTを購入するためのステップバイステップのガイドになります。 1. [Coinbase](https://www.coinbase.com/)にアクセスし、アカウントを作成する。 2. アカウントを作成したら、KYC(またはKnow Your Customer)として知られるプロセスを通じて、あなたの身元を確認する必要があります。これは、すべての中央集権型またはカストディ型の暗号取引所の標準的な手順です。 @@ -117,11 +123,11 @@ CoinbaseでGRTを購入するためのステップバイステップガイドに - 送信したいGRTの金額と送信先のウォレットアドレスを入力します。 - 「Continue」をクリックし、取引を確認します。-購入金額が大きい場合、Coinbaseは暗号ウォレットに全額を送金する前に7~10日待つよう要求することがありますので、ご注意ください。 -CoinbaseでのGRTの取得については、[こちら](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency)でご確認ください。 +CoinbaseでGRTを入手する詳細については、[こちら](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency)をご覧いただけます。 ### Binance -BinanceでGRTを購入するためのステップバイステップガイドになります。 +これは、BinanceでGRTを購入するためのステップバイステップのガイドになります。 1. [Binance](https://www.binance.com/en)にアクセスし、アカウントを作成する。 2. アカウントを作成したら、KYC(またはKnow Your Customer)として知られるプロセスを通じて、あなたの身元を確認する必要があります。これは、すべての中央集権型またはカストディ型の暗号取引所の標準的な手順です。 @@ -137,11 +143,11 @@ BinanceでGRTを購入するためのステップバイステップガイドに - 送信したいGRTの金額とホワイトリストウォレットアドレスを入力します。 - 「Continue」をクリックし、取引を確定してください。 -BinanceでのGRTの取得については、[こちら](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582)でご確認ください。 +BinanceでGRTを入手する詳細については、[こちら](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582)をご覧いただけます。 ### Uniswap -UniswapでGRTを購入する方法です。 +これがUniswapでGRTを購入する方法です。 1. [Uniswap](https://app.uniswap.org/#/swap) にアクセスし、ウォレットを接続します。 2. スワップ元となるトークンを選択します。ETHを選択します。 @@ -151,8 +157,52 @@ UniswapでGRTを購入する方法です。 5. 「Swap」をクリックします。 6. ウォレットで取引を確認し、あなたは取引が処理されるのを待ちます。 -UniswapでのGRTの取得については、[こちら](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-)でご確認ください。 +UniswapでGRTを入手する詳細については、[こちら](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-)をご覧いただけます。 + +## イーサリアムの入手 + +このセクションでは、トランザクション手数料やガスコストに支払うためにEthereum(ETH)を入手する方法を説明します。ETHは、トークンの送金や契約とのやり取りなど、Ethereumネットワークで操作を実行するために必要です。 + +### Coinbase + +これは、CoinbaseでETHを購入するためのステップバイステップのガイドになります。 + +1. [Coinbase](https://www.coinbase.com/)にアクセスし、アカウントを作成する。 +2. アカウントを作成したら、KYC(Know Your Customer)として知られるプロセスを通じて身元を確認してください。これは、すべての中央集権的またはカストディアルな暗号通貨取引所で行われる標準的な手続きです。 +3. 身元を確認したら、「Buy/Sell」ボタンをページの右上にクリックしてETHを購入してください。 +4. 購入したい通貨を選択してください。ETHを選択します。 +5. お好きな支払い方法を選択してください。 +6. 購入したいETHの数量を入力してください。 +7. 購入内容を確認し、「ETHを購入」をクリックしてください。 +8. 購入を確認すると、ETHを正常に購入することができます。 +9. Coinbaseアカウントから[MetaMask](https://metamask.io/)などの暗号通貨ウォレットにETHを送金できます。 + - ETHを暗号通貨ウォレットに送金するには、ページの右上にある「Accounts」ボタンをクリックしてください。 + - ETHアカウントの隣にある「送信」ボタンをクリックしてください。 + - 送金したいETHの金額と送金先のウォレットアドレスを入力してください。 + - 「Continue」をクリックし、取引を確定してください。 + +CoinbaseでETHを入手する詳細については、[こちら](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency)をご覧いただけます。 + +### Binance + +これは、BinanceでETHを購入するためのステップバイステップのガイドになります。 + +1. [Binance](https://www.binance.com/en)にアクセスし、アカウントを作成する。 +2. アカウントを作成したら、KYC(Know Your Customer)として知られるプロセスを通じて身元を確認してください。これは、すべての中央集権的またはカストディアルな暗号通貨取引所で行われる標準的な手続きです。 +3. 身元を確認したら、ホームページバナーの「Buy Now」ボタンをクリックしてETHを購入してください。 +4. 購入したい通貨を選択してください。ETHを選択します。 +5. お好きな支払い方法を選択してください。 +6. 購入したいETHの数量を入力してください。 +7. 購入内容を確認し、「ETHを購入」をクリックしてください。 +8. 購入を確認すると、BinanceのSpotウォレットにETHが表示されます。 +9. ETHをアカウントから[MetaMask](https://metamask.io/)などの暗号通貨ウォレットに引き出すことができます。 + - ETHを暗号通貨ウォレットに引き出すために、引き出しのホワイトリストに暗号通貨ウォレットのアドレスを追加してください。 + - 「ウォレット」ボタンをクリックし、その後「引き出し」をクリックし、ETHを選択してください。 + - 送金したいETHの金額と送金先のホワイトリストに登録されているウォレットアドレスを入力してください。 + - 「Continue」をクリックし、取引を確定してください。 + +BinanceでETHを入手する詳細については、[こちら](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582)をご覧いただけます。 ## Arbitrum Bridge -課金コントラクトは、Ethereum mainnetからArbitrumネットワークへのGRTのブリッジとしてのみ設計されています。ArbitrumからEthereumメインネットにGRTを戻したい場合は、[Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161)を使用する必要があります。 +請求契約は、GRTをEthereumメインネットからArbitrumネットワークに移すために設計されています。ArbitrumからGRTをEthereumメインネットに戻す場合は、[Arbitrumブリッジ](https://bridge.arbitrum.io/?l2ChainId=42161)を使用する必要があります。 diff --git a/website/pages/ja/chain-integration-overview.mdx b/website/pages/ja/chain-integration-overview.mdx new file mode 100644 index 000000000000..e3a9d2f1c8e6 --- /dev/null +++ b/website/pages/ja/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: チェーン統合プロセスの概要 +--- + +透明性のあるガバナンスベースの統合プロセスは、[integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468) を求めるブロックチェーン チーム向けに設計されました。 以下に要約するように、これは 3 段階のプロセスです。 + +## ステージ 1. 技術的統合 + +- チームは、非 EVM ベースのチェーン用の Graph Node 統合と Firehose に取り組んでいます。 [Here's how](/new-chain-integration/)。 +- チームは、プロトコルの統合プロセスを開始するために、[here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)のフォーラムスレッドを作成します(Governance & GIPsの下にあるNew Data Sourcesのサブカテゴリ内)。デフォルトのフォーラムテンプレートの使用が必須です。 + +## ステージ 2. 統合の検証 + +- チームは、コア開発者、Graph Foundation、および [Subgraph Studio](https://thegraph.com/studio/) のようなGUIやネットワークゲートウェイのオペレーターと協力して、スムーズな統合プロセスを確保しています。これには、統合するチェーンのJSON RPCやFirehoseエンドポイントなどの必要なバックエンドインフラストラクチャを提供することが含まれます。このようなインフラストラクチャをセルフホスティングしたくないチームは、The Graphのノードオペレーター(インデクサー)のコミュニティを活用して、それを行うことができます。これに関しては、Foundationがサポートを提供できます。 +- Graph Indexer は、The Graph のテストネットで統合をテストします。 +- コア開発者とインデクサーは、安定性、パフォーマンス、およびデータの決定性を監視します。 + +## ステージ 3. メインネットの統合 + +- チームは、メインネット統合を提案するために、Graph Improvement Proposal (GIP) を提出し、[feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) に関するプルリクエスト(PR)を開始します(詳細はリンクを参照)。 +- Graph Council はリクエストを検討してメインネットのサポートを承認し、ステージ 2 の成功とコミュニティからの肯定的なフィードバックを提供します。 + +--- + +もしプロセスが難しそうに見える場合でも、心配しないでください!Graph Foundationは、協力を促進し、必要な情報を提供し、Graph Improvement Proposals(GIP)やプルリクエストなどのガバナンスプロセスを含むさまざまな段階でガイドすることにコミットしています。質問がある場合は、[info@thegraph.foundation](mailto:info@thegraph.foundation) またはDiscord(Pedro、The Graph Foundationメンバー、IndexerDAO、その他のコア開発者など)を通じてお問い合わせください。 + +The Graph Network の未来を形作る準備はできていますか? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) 今すぐ、Web3 革命の一員になりましょう。 + +--- + +## よくある質問 + +### 1. これは [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761) とどのように関連していますか? + +このプロセスは、新しいサブグラフの「データソース」にのみ適用される、サブグラフデータサービスに関連しています。 + +### 2. ネットワークがメインネットでサポートされた後に Firehose とサブストリームのサポートが追加された場合はどうなりますか? + +これは、サブストリームで動作するサブグラフに対するインデックスリワードのプロトコルサポートに影響を与えるものです。新しいFirehoseの実装は、このGIPのステージ2に概説されている方法論に従って、テストネットでテストされる必要があります。同様に、実装がパフォーマンスが良く信頼性があると仮定して、[Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)へのPR(「Substreamsデータソース」サブグラフ機能)が必要です。また、インデックスリワードのプロトコルサポートに関する新しいGIPも必要です。誰でもPRとGIPを作成できますが、Foundationは評議会の承認をサポートします。 + +### 3. このプロセスにはどのくらい時間がかかりますか? + +メインネットへの移行にかかる時間は、統合開発の進捗によるもの、追加の調査が必要かどうか、テストとバグ修正、そして常にコミュニティのフィードバックを必要とするガバナンスプロセスのタイミングに応じて異なりますが、数週間を予想しています。 + +インデックスリワードのプロトコルサポートは、テスト、フィードバックの収集、および必要な場合のコアコードベースへの貢献の取り扱いに関わる関係者の能力に依存します。これは、統合の成熟度と、統合チームのレスポンスの良さ(RPC/Firehose実装の背後にいるかどうかは問わない)に直接関連しています。Foundationは、プロセス全体を通じてサポートを提供するためにここにいます。 + +### 4. 優先順位はどのように扱われますか? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ja/cookbook/arweave.mdx b/website/pages/ja/cookbook/arweave.mdx index 037242bc6bc6..fda55c355fa8 100644 --- a/website/pages/ja/cookbook/arweave.mdx +++ b/website/pages/ja/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweaveでのサブグラフ構築 --- -> グラフノードとホスティングサービスにおけるArweaveのサポートはベータ版です。Arweaveサブグラフの構築に関するご質問は[Discord](https://discord.gg/graphprotocol)にご連絡ください。 +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! このガイドでは、Arweaveブロックチェーンのインデックスを作成するためのサブグラフの構築とデプロイ方法について学びます。 @@ -83,7 +83,7 @@ dataSources: ``` - Arweave サブグラフは新しい種類のデータ ソースを導入します (`arweave`) -- ネットワークはホスティングするグラフノード上のネットワークに対応する必要があります。ホスティングサービス上では、Arweaveのメインネットは`arweave-mainnet`です。 +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave データ ソースには、オプションの source.owner フィールドが導入されています。これは、Arweave ウォレットの公開鍵です。 Arweaveデータソースは 2 種類のハンドラーをサポートしています: @@ -150,9 +150,9 @@ class Transaction { Arweave サブグラフのマッピングの記述は、Ethereum サブグラフのマッピングの記述と非常に似ています。詳細については、[こちら](/developing/creating-a-subgraph/#writing-mappings)をクリックしてください。 -## ホステッド サービスへの Arweave サブグラフのデプロイ +## Deploying an Arweave Subgraph on the hosted service -ホストされたサービスのダッシュボード上でサブグラフを作成したら、展開は「使用する」を使って行うことができます`graph deploy` CLI コマンド +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ja/cookbook/cosmos.mdx b/website/pages/ja/cookbook/cosmos.mdx index 1de8abedb949..d38cb857b148 100644 --- a/website/pages/ja/cookbook/cosmos.mdx +++ b/website/pages/ja/cookbook/cosmos.mdx @@ -178,7 +178,7 @@ Cosmos統合の全種類一覧は[こちら](https://github.com/graphprotocol/gr Cosmosのメッセージはチェーンに固有であり、シリアル化された[Protocol Buffers](https://developers.google.com/protocol-buffers/)ペイロードの形でサブグラフに渡されることに注意することが重要です。その結果、メッセージデータは処理される前にマッピング関数でデコードされる必要があります。 -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +サブグラフ内のメッセージ データをデコードする方法の例は、[ここ](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts)にあります。 ## Cosmosサブグラフの作成と構築 diff --git a/website/pages/ja/cookbook/grafting.mdx b/website/pages/ja/cookbook/grafting.mdx index a9a6b78f807a..a5ee81c1b26f 100644 --- a/website/pages/ja/cookbook/grafting.mdx +++ b/website/pages/ja/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: グラフティングでコントラクトを取り替え、履歴を残 このチュートリアルでは、基本的なユースケースについて説明します。既存の契約を同一の契約に置き換えます(新しい住所ですが、コードは同じです)。次に、新しいコントラクトを追跡する「ベース」サブグラフに既存のサブグラフを移植します +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## 既存のサブグラフの構築 サブグラフの構築は、The Graphの重要な部分であり、[こちら](http://localhost:3000/en/cookbook/quick-start/)でより詳しく説明されています。このチュートリアルで使用する既存のサブグラフをビルドしてデプロイできるようにするために、以下のレポを提供します。 diff --git a/website/pages/ja/cookbook/near.mdx b/website/pages/ja/cookbook/near.mdx index cdf6fbdddff2..1a055327d9c4 100644 --- a/website/pages/ja/cookbook/near.mdx +++ b/website/pages/ja/cookbook/near.mdx @@ -199,7 +199,7 @@ $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -### Local Graph Node (based on default configuration) +### ローカル グラフ ノード (デフォルト構成に基づく) ```sh graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 @@ -217,7 +217,7 @@ graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 } ``` -### Indexing NEAR with a Local Graph Node +### ローカル グラフ ノードを使用した NEAR のインデックス作成 NEAR のインデックスを作成するグラフノードの運用には、以下のような運用要件があります: @@ -241,19 +241,19 @@ NEAR サブグラフの GraphQL エンドポイントは、既存の API イン ## よくある質問 -### How does the beta work? +### ベータ版はどのように機能しますか? NEAR サポートはベータ版です。統合の改善を続ける中で、API に変更が加えられる可能性があります。NEAR サブグラフの構築をサポートし、最新の開発状況をお知らせしますので、near@thegraph.comまでメールをお送りください。 -### Can a subgraph index both NEAR and EVM chains? +### サブグラフは NEAR チェーンと EVM チェーンの両方にインデックスを付けることができますか? いいえ、サブグラフは 1 つのチェーン/ネットワークのデータソースのみをサポートします。 -### Can subgraphs react to more specific triggers? +### サブグラフはより具体的なトリガーに反応できますか? 現在、ブロックとレシートのトリガーのみがサポートされています。指定されたアカウントへのファンクションコールのトリガーを検討しています。また、NEAR がネイティブイベントをサポートするようになれば、イベントトリガーのサポートも検討しています。 -### Will receipt handlers trigger for accounts and their sub-accounts? +### 領収書ハンドラーは、アカウントとそのサブアカウントに対してトリガーされますか? もし`account`が指定された場合、それは正確なアカウント名にのみマッチします。`accounts` フィールドを指定して、`suffixes` と `prefixes` でアカウントとサブアカウントにマッチさせることが可能で、例えば、次のようにするとすべての `mintbase1.near` サブアカウントにマッチすることになります。 @@ -263,21 +263,21 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### NEAR サブグラフは、マッピング中に NEAR アカウントへのビュー呼び出しを行うことができますか? これはサポートされていません。この機能がインデックス作成に必要かどうかを評価しています。 -### Can I use data source templates in my NEAR subgraph? +### NEAR サブグラフでデータ ソース テンプレートを使用できますか? これは現在サポートされていません。この機能がインデックス作成に必要かどうかを評価しています。 -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum サブグラフは「保留中」バージョンと「現在」バージョンをサポートしていますが、NEAR サブグラフの「保留中」バージョンをデプロイするにはどうすればよいですか? 「pending」は、NEAR サブグラフではまだサポートされていません。暫定的に、新しいバージョンを別の「named」サブグラフにデプロイし、それがチェーンヘッドと同期したときに、メインの「named」サブグラフに再デプロイすることができます。 -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### 私の質問に対する回答がありません。NEAR サブグラフの作成に関するヘルプはどこで入手できますか? -サブグラフの開発に関する一般的な質問であれば、[Developer documentation](/cookbook/quick-start)に多くの情報が掲載されています。それ以外の場合は、[The Graph Protocol Discord](https://discord.gg/graphprotocol)に参加して#near チャンネルで質問するか、 near@thegraph.com にメールしてください。 +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## 参考文献 diff --git a/website/pages/ja/cookbook/subgraph-debug-forking.mdx b/website/pages/ja/cookbook/subgraph-debug-forking.mdx index 559d1ef72242..134e7b12cc1a 100644 --- a/website/pages/ja/cookbook/subgraph-debug-forking.mdx +++ b/website/pages/ja/cookbook/subgraph-debug-forking.mdx @@ -18,7 +18,7 @@ title: フォークを用いた迅速かつ容易なサブグラフのデバッ ## コードを見てみましょう -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +サブグラフのデバッグに集中し続けるために、物事をシンプルにして、Ethereum Gravity スマート コントラクトのインデックスを作成する [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) を実行してみましょう。 以下は、`Gravatar`のインデックスを作成するために定義されたハンドラで、バグが全くありません。 diff --git a/website/pages/ja/cookbook/subgraph-uncrashable.mdx b/website/pages/ja/cookbook/subgraph-uncrashable.mdx index fdc30e27cd86..f50944b02a9c 100644 --- a/website/pages/ja/cookbook/subgraph-uncrashable.mdx +++ b/website/pages/ja/cookbook/subgraph-uncrashable.mdx @@ -4,7 +4,7 @@ title: 安全なサブグラフのコード生成 [Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/)は、プロジェクトのgraphqlスキーマからヘルパー関数のセットを生成するコード生成ツールです。これにより、サブグラフ内のエンティティとのすべてのインタラクションが完全に安全で一貫性のあるものになることを保証します。 -## Why integrate with Subgraph Uncrashable? +## Subgraph Uncrashable と統合する理由 - **継続的なアップタイム**です。誤って処理されたエンティティによってサブグラフがクラッシュすることがあり、The Graphに依存しているプロジェクトに支障をきたすことがあります。ヘルパー関数を設定して、サブグラフを「クラッシュしない」ようにし、ビジネスの継続性を確保しましょう。 diff --git a/website/pages/ja/cookbook/substreams-powered-subgraphs.mdx b/website/pages/ja/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..d6f15b50efd7 100644 --- a/website/pages/ja/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/ja/cookbook/substreams-powered-subgraphs.mdx @@ -4,27 +4,27 @@ title: Substreams-powered subgraphs [Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. -## Requirements +## 要件 -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +このクックブックには、[yarn](https://yarnpkg.com/)、[ローカル サブストリーム開発に必要な依存関係](https://substreams.streamingfast.io/developers-guide/installation-requirements)、および Graph CLI の最新バージョン (>=0.52.0) が必要です。 ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## 料理本を入手 -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> このクックブックでは、この [Substreams-powered subgraph を参考として](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph) を使用します。 ``` -graph init --from-example substreams-powered-subgraph +グラフ初期化 --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## サブストリームパッケージの定義 A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): +問題の Substreams パッケージは、メインネット イーサリアム上のコントラクトのデプロイメントを検出し、新しくデプロイされたすべてのコントラクトの作成ブロックとタイムスタンプを追跡します。これを行うには、`/proto/example.proto` に専用の `Contract` タイプがあります ([プロトコル バッファーの定義の詳細](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: +Substreams パッケージのコア ロジックは、`lib.rs` の `map_contract` モジュールです。これは、すべてのブロックを処理し、元に戻されなかった Create 呼び出しをフィルタリングして、`Contracts` を返します。 ``` #[substreams::handlers::map] @@ -64,12 +64,11 @@ fn map_contract(block: eth::v2::Block) -> Result The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. +> 「substreams_entity_change」クレートには、エンティティの変更を単純に生成するための専用の「Tables」関数もあります (\[[documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html))。生成されたエンティティ変更は、対応するサブグラフの `subgraph.graphql` で定義された `schema.graphql` エンティティと互換性がある必要があります。 ``` #[substreams::handlers::map] @@ -88,7 +87,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: +この Substreams パッケージをサブグラフで使用できるように準備するには、次のコマンドを実行する必要があります。 ```bash yarn substreams:protogen # generates types in /src/pb @@ -147,19 +146,19 @@ yarn substreams:package # packages the substreams in a .spkg file # alternatively, yarn substreams:prepare calls all of the above commands ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> 基礎となるサブストリーム コマンドを理解したい場合は、これらのスクリプトは「package.json」ファイルで定義されます。 -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +これにより、「substreams.yaml」のパッケージ名とバージョンに基づいて「spkg」ファイルが生成されます。 「spkg」ファイルには、グラフ ノードがこのサブストリーム パッケージを取り込むために必要なすべての情報が含まれています。 -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. +> Substreams パッケージを更新する場合、加えた変更に応じて、`spkg` を最新にするために上記のコマンドの一部またはすべてを実行する必要がある場合があります。 -## Defining a Substreams-powered subgraph +## サブストリームによって動作するサブグラフの定義 -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. +サブストリームによって動作するサブグラフは、新しい "substreams" というデータソースの kind を導入します。このようなサブグラフは、ただ1つのデータソースのみを持つことができます -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +このデータ ソースは、インデックス付きネットワーク、相対的なファイルの場所としての Substreams パッケージ (`spkg`)、およびサブグラフ互換のエンティティ変更を生成するその Substreams パッケージ内のモジュール (この場合は、上記の Substreams パッケージからの `map_entity_changes`) を指定する必要があります。マッピングは指定されていますが、単にマッピングの種類 (「サブストリーム/グラフ エンティティ」) と apiVersion を識別するだけです。 -> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). +> 現在、Subgraph Studio と The Graph Network は、「メインネット」 (メインネット イーサリアム) にインデックスを付けるサブストリームを利用したサブグラフをサポートしています。 ```yaml specVersion: 0.0.4 @@ -180,7 +179,7 @@ dataSources: apiVersion: 0.0.5 ``` -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. +`subgraph.yaml` もスキーマ ファイルを参照します。このファイルの要件は変更されていませんが、指定されたエンティティは、`subgraph.yaml` で参照される Substreams モジュールによって生成されるエンティティの変更と互換性がある必要があります。 ```graphql type Contract @entity { @@ -194,9 +193,9 @@ type Contract @entity { } ``` -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. +上記を考慮すると、サブグラフ開発者は Graph CLI を使用して、このサブストリームを利用したサブグラフをデプロイできます。 -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). +> サブストリームを利用したサブグラフのインデックス作成メインネット Ethereum は、[Subgraph Studio](https://thegraph.com/studio/) にデプロイできます。 ```bash yarn install # install graph-cli @@ -204,11 +203,11 @@ yarn subgraph:build # build the subgraph yarn subgraph:deploy # deploy the subgraph ``` -That's it! You have built and deployed a Substreams-powered subgraph. +それでおしまい!サブストリームを利用したサブグラフを構築してデプロイしました。 -## Serving Substreams-powered subgraphs +## サブストリームを利用したサブグラフの提供 -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: +サブストリームを利用したサブグラフを提供するには、チェーン ヘッドを追跡するための Firehose または RPC だけでなく、関連するネットワークのサブストリーム プロバイダーを使用してグラフ ノードを構成する必要があります。これらのプロバイダーは、「config.toml」ファイル経由で設定できます。 ```toml [chains.mainnet] diff --git a/website/pages/ja/cookbook/upgrading-a-subgraph.mdx b/website/pages/ja/cookbook/upgrading-a-subgraph.mdx index a5a132a757d5..c2cd2aafe541 100644 --- a/website/pages/ja/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ja/cookbook/upgrading-a-subgraph.mdx @@ -1,23 +1,23 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: 既存のサブグラフをThe Graph Networkにアップグレードする方法 --- ## イントロダクション -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +これは、ホストされているサービスからThe Graphの分散型ネットワークへのサブグラフのアップグレード方法に関するガイドです。Snapshot、Loopring、Audius、Premia、Livepeer、Uma、Curve、Lidoなどのプロジェクトを含む1,000以上のサブグラフがThe Graph Networkに成功してアップグレードされました! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +アップグレードのプロセスは迅速であり、あなたのサブグラフは永久にThe Graph Networkでしか得られない信頼性とパフォーマンスの恩恵を受けることができます。 ### 前提条件 - 私はすでにホストされたサービスにサブグラフを展開しました -- サブグラフは、The Graph Networkで利用可能な(またはベータ版で利用可能な)チェーンのインデックスを作成しています。 -- You have a wallet with ETH to publish your subgraph on-chain. -- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. +- The subgraph is indexing a chain available on The Graph Network. +- サブグラフをチェーン上に公開するためのETHを持つウォレットがあります。 +- インデクサーがインデックス作業を開始できるように、サブグラフをキュレートするために約10,000 GRTを持っています。 -## Upgrading an Existing Subgraph to The Graph Network +## 既存のサブグラフをThe Graph Networkにアップグレードする方法 -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> あなたのサブグラフに特定のコマンドを見つけることができます。[Subgraph Studio](https://thegraph.com/studio/) をご覧ください。 1. 最新版の graph-cli をインストールする: @@ -29,7 +29,7 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +Subgraph.yamlの 'apiVersion' が '0.0.5' 以上であることを確認してください。 2. メイン・プロジェクトのサブグラフ・リポジトリ内で、スタジオ上でデプロイとビルドを行うためにサブグラフを認証します。 @@ -43,15 +43,15 @@ graph auth --studio graph codegen && graph build ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +もしサブグラフにビルドエラーがある場合、[AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/) を参照してください。 -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. ウォレットで[Subgraph Studio](https://thegraph.com/studio/)にサインインし、サブグラフをデプロイしてください。サブグラフの名前に基づく\は、Studio UIで見つけることができます。 ```sh graph deploy --studio ``` -5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): +5. Studioのプレイグラウンドでクエリをテストしてください。以下は、[Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground) のためのいくつかの例です。 ```sh { @@ -70,21 +70,21 @@ graph deploy --studio 6. この時点で、サブグラフはSubgraph Studio上にデプロイされましたが、分散ネットワークにはまだ公開されていません。サブグラフが意図したとおりに動作しているか、右上の一時的なクエリURLを用いてテストすることができます。この名前が示すように、これは一時的なURLであり、実運用に使用すべきではありません。 -- Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- アップデートは、既存のサブグラフの別のバージョンをチェーン上に公開するだけです。 +- これにはコストがかかるため、公開する前に「開発クエリURL」を使用してSubgraph Studioでサブグラフをデプロイおよびテストすることを強くお勧めします。 [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b) でトランザクションの例をご覧ください。価格はおおよそ100 gweiで0.0425 ETH程度です。 +- サブグラフを更新する必要があるたびに、更新料が請求されます。これにはコストがかかるため、メインネットにデプロイする前にサブグラフをGoerliでデプロイしてテストすることを強くお勧めします。場合によっては、そのサブグラフにシグナルがない場合でも、一部のGRTが必要になることもあります。サブグラフバージョンにシグナル/キュレーションがある場合(自動移行を使用)、料金は分割されます。 7. 「Publish」ボタンを押して、サブグラフをThe Graphの分散型ネットワーク上に公開する。 -You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. +インデクサーによるインデックス作成が行われるよう、サブグラフをGRTでキュレーションすることをお勧めします。ガスコストを節約するためには、サブグラフをネットワークに公開する同じトランザクションでキュレーションを行うことができます。高品質なサービスを提供するために、少なくとも10,000 GRTでサブグラフをキュレーションすることをおすすめします。 -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +以上です!公開が完了すると、[The Graph Explorer](https://thegraph.com/explorer) を介して分散型ネットワーク上でサブグラフをライブで表示することができます。 Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. ### APIキーの作成 -You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). +Subgraph StudioでAPIキーを生成するには、[here](https://thegraph.com/studio/apikeys/)をクリックしてください。 ![API key creation page](/img/api-image.png) @@ -101,7 +101,7 @@ You can generate an API key in Subgraph Studio [here](https://thegraph.com/studi ![Billing pane](/img/New-Billing-Pane.png) -> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. +> 注意: GRTを請求残高に追加するための詳細な手順については、[official billing page](../billing.mdx)をご覧ください。 ### APIキーの確保 @@ -110,13 +110,13 @@ APIは2つの方法で利用を制限し、セキュリティを確保するこ 1. オーソライズド・サブグラフ 2. オーソライズド・ドメイン -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +APIキーを保護するには、[here](https://thegraph.com/studio/apikeys/test/)をクリックしてください。 ![Subgraph lockdown page](/img/subgraph-lockdown.png) ### 分散ネットワーク上の自分のサブグラフをクエリ -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +これで、ネットワーク上のIndexersのインデックス作成状況をグラフエクスプローラで確認できます(example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers))。上部の緑の線は、投稿時に8つのインデクサーがそのサブグラフのインデックス付けに成功したことを示します。また、Indexerタブでは、どのインデクサーがあなたのサブグラフをピックアップしたかを見ることができます。 ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) @@ -124,13 +124,13 @@ Now you can check the indexing status of the Indexers on the network in Graph Ex `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. +重要: [api-key] を前述のセクションで生成した実際のAPIキーで置き換えてください。 このQuery URLをダップ内で使用して、GraphQLリクエストを送信することができます。 おめでとうございます。あなたは今、分散化のパイオニアです! -> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: +> 注意: ネットワークの分散性のため、異なるインデクサーが異なるブロックまでインデックスを行っている可能性があります。新鮮なデータのみを受け取るために、次のようにしてクエリを提供するためにインデクサーがインデックスを行った最小ブロックを指定することができます。ブロック引数: `{ number_gte: $minBlock }`といった形です。以下の例をご覧ください。 ```graphql { @@ -140,11 +140,11 @@ Important: Make sure to replace `[api-key]` with an actual API key generated in } ``` -More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). +ネットワークの性質や再編成の処理方法に関する詳細な情報は、ドキュメント記事[Distributed Systems](/querying/distributed-systems/) に記載されています。 -## Updating a Subgraph on the Network +## ネットワーク上のサブグラフの更新 -If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. +ネットワーク上の既存のサブグラフを更新したい場合、Graph CLIを使用して新しいバージョンのサブグラフをSubgraph Studioにデプロイすることで行えます。 1. 現在のサブグラフに変更を加える。Goerliに公開してSubgraph Studio上で小さな修正をテストするのが良いアイデアでしょう。 2. 以下のようにデプロイし、コマンドに新しいバージョンを指定します(例:v0.0.1、v0.0.2 など)。 @@ -156,58 +156,58 @@ graph deploy --studio 3. Subgraph Studio のプレイグラウンドでクエリを実行し、新バージョンをテストします。 4. 新しいバージョンを The Graph Network で公開します。これにはガスが必要であることを忘れてはなりません(上のセクションで説明)。 -### Owner Update Fee: Deep Dive +### 所有者更新料金: 詳細 -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> 注: Arbitrum のキュレーションでは結合曲線は使用されません。 Arbitrum について詳しくは、[here](/arbitrum/arbitrum-faq/) をご覧ください。 -An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). +アップデートには、GRTがサブグラフの古いバージョンから新しいバージョンに移行される必要があります。つまり、毎回のアップデートごとに新しいボンディングカーブが作成されます(ボンディングカーブに関する詳細は[here](/network/curating#bonding-curve-101))。 -The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. +新しいボンディングカーブは、新しいバージョンに移行されるすべてのGRTに1%のキュレーション税を課します。オーナーはこのうち50%、すなわち1.25%を支払わなければなりません。もう1.25%は、すべてのキュレーターに料金として請求されます。このインセンティブ設計は、サブグラフのオーナーが再帰的な更新呼び出しでキュレーターの資金をすべて排出できないようにするためのものです。キュレーションの活動がない場合、自分のサブグラフにシグナルを送るためには最低でも100 GRT支払う必要があります。 例を挙げてみましょう。これは、サブグラフが積極的にキュレートされている場合にのみ当てはまります。 - サブグラフの v1 で自動移行を使用して 100,000 GRT が通知される -- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned -- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. +- オーナーがバージョン2にアップデートします。100,000 GRTが新しいボンディングカーブに移行され、そのうち97,500 GRTが新しいカーブに投入され、2,500 GRTが燃焼されます。 +- その後、オーナーは手数料の半分を支払うために1250 GRTを燃やします。オーナーはアップデート前にこれをウォレットに持っていなければならず、そうでない場合、アップデートは成功しません。これはアップデートと同じトランザクションで行われます。 -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_このメカニズムは現在ネットワーク上で稼働していますが、コミュニティでは現在、サブグラフ開発者の更新コストを削減する方法について議論しています。_ ### サブグラフの安定したバージョンの維持 If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. -Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. +サブグラフは、外部開発者が利用しているオープン API です。オープン API は、外部開発者のアプリケーションを破壊しないように、厳格な標準に従う必要があります。グラフ ネットワークでは、サブグラフ開発者は、インデクサーと、そのサブグラフを使用している他の開発者**だけでなく**、新しいサブグラフを同期するのにかかる時間を考慮する必要があります。 ### サブグラフのメタデータの更新 新しいバージョンを公開しなくても、サブグラフのメタデータを更新できます。メタデータには、サブグラフ名、画像、説明、Web サイトの URL、ソース コードの URL、およびカテゴリが含まれます。開発者は、該当するすべてのフィールドを編集できる Subgraph Studio でサブグラフの詳細を更新することで、これを行うことができます。 -Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +**エクスプローラーでサブグラフの詳細を更新** がチェックされていることを確認し、**保存** をクリックします。これがチェックされている場合、新しいデプロイメントで新しいバージョンを公開することなく、エクスプローラー内のサブグラフの詳細を更新するオンチェーン トランザクションが生成されます。 ## グラフネットワークにサブグラフを展開する際のベストプラクティス 1. サブグラフの開発に ENS 名を活用する -- Set up your ENS [here](https://app.ens.domains/) -- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). +- ENS をセットアップする [here](https://app.ens.domains/) +- ENS 名を[here](https://thegraph.com/explorer/settings?view=display-name) の設定に追加します。 2. プロフィールが充実しているほど、サブグラフがインデックスやキュレーションされる可能性が高くなります。 ## The Graph Network のサブグラフを廃止する -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +[here](/managing/deprecating-a-subgraph) の手順に従って、サブグラフを非推奨にし、グラフ ネットワークから削除します。 ## The Graph Network でのサブグラフのクエリと課金について -The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. +ホストされたサービスは、開発者が制限なしでサブグラフをデプロイできるように設定されました。 -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +The Graph Network が真に分散化されるためには、プロトコルのインセンティブの中核部分としてクエリ料金を支払う必要があります。 API のサブスクライブとクエリ料金の支払いの詳細については、請求に関するドキュメントを[here](/billing/) で確認してください。 ### ネットワーク上でのクエリ料の算出 これは製品 UI のライブ機能ではありませんが、1 か月に支払ってもよい金額を予想クエリ量で割ることで、クエリごとの最大予算を設定できます。 -While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. +クエリの予算はあなたが決定することができますが、その価格でクエリを提供するインデクサーが用意されているとは限りません。ゲートウェイが、あなたが支払いたい価格以上でクエリを提供する意思のあるインデクサーに一致させる場合、あなたは予算と彼らの価格の差額を支払うことになります。その結果、より低いクエリ価格は利用可能なインデクサーのプールを減少させ、受け取るサービスの品質に影響を与える可能性があります。高いクエリ料金を持つことは有益であり、それによってキュレーションや有名なインデクサーがあなたのサブグラフに興味を持つかもしれません。 これはダイナミックで成長中の市場ですが、どのように関わるかは自分でコントロールできることを忘れないでください。プロトコルにもゲートウェイにも、上限や下限の価格は指定されていません。例えば、ネットワーク上のいくつかのdappsが支払う価格(週単位)を以下に示します。最後の列はGRTでのクエリ料を示していますのでご覧ください。例えば、Pickle Financeは1秒あたり回のリクエストがあり、1週間でGRTを支払っています。 @@ -215,11 +215,11 @@ While you get to decide on your query budget, there is no guarantee that an Inde ## その他のリソース -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +まだ混乱している場合でも、心配する必要はありません。次のリソースを確認するか、以下の分散ネットワークへのサブグラフのアップグレードに関するビデオ ガイドをご覧ください。 -- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [Subgraph Studio documentation](/deploying/subgraph-studio) +- [グラフネットワーク契約](https://github.com/graphprotocol/contracts) +- [キュレーションコントラクト](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - GNSがラップする基礎となるコントラクト + - アドレス - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Subgraph Studio ドキュメント](/deploying/subgraph-studio) diff --git a/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx index 0f397018bfcb..21e8dee194a2 100644 --- a/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ja/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Subgraph Studio にサブグラフをデプロイする --- -> サブグラフがデータをインデックスするネットワークが、分散型ネットワークで[サポートされている](/developing/supported-chains)ことを確認します。 +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). こちらがSubgraph Studioにサブグラフをデプロイする手順です: diff --git a/website/pages/ja/deploying/hosted-service.mdx b/website/pages/ja/deploying/hosted-service.mdx index 42684ad8d5f5..00d8b06cbfb6 100644 --- a/website/pages/ja/deploying/hosted-service.mdx +++ b/website/pages/ja/deploying/hosted-service.mdx @@ -2,11 +2,11 @@ title: とはホストされたサービス? --- -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> ホスト型サービスは 2023 年に廃止されますが、分散型ネットワークでサポートされていないネットワークでも引き続き利用できることに注意してください。より多くのネットワークがサポートされているため、開発者は[サブグラフを The Graph Network にアップグレードする](/cookbook/upgrading-a-subgraph)ことをお勧めします。各ネットワークでは、開発者がサブグラフを分散型ネットワークにアップグレードするのに十分な時間を確保できるように、ホスト型サービスと同等のサービスが段階的に廃止されます。ホスト型サービスの終了について詳しくは、[こちら](https://thegraph.com/blog/sunsetting-hosted-service)をご覧ください。 -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). +このセクションでは、[ホストされたサービス](https://thegraph.com/hosted-service/)にサブグラフをデプロイする手順を説明します。 -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +ホストされているサービスのアカウントをお持ちでない場合は、GitHub アカウントでサインアップできます。認証が完了すると、UI を介してサブグラフの作成を開始し、端末からそれらのデプロイを開始できます。ホストされたサービスは、Polygon、Gnosis Chain、BNB Chain、Optimism、Arbitrum などの多数のネットワークをサポートします。 包括的なリストについては、[サポートされているネットワーク](/developing/supported-networks/#hosted-service)をご覧ください。 @@ -16,7 +16,7 @@ If you don't have an account on the hosted service, you can sign up with your Gi ### 既存のコントラクトから -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. +選択したネットワークにスマート コントラクトを既に展開している場合は、このコントラクトから新しいサブグラフをブートストラップすることが、ホストされるサービスを開始するための良い方法となる可能性があります。 このコマンドを使用して、既存のコントラクトからすべてのイベントにインデックスを付けるサブグラフを作成できます。これは、[Etherscan](https://etherscan.io/)からコントラクト ABI をフェッチしようとします。 @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / サンプルのサブグラフは、Dani Grant による Gravity コントラクトをベースにしています。このコントラクトは、ユーザーのアバターを管理し、アバターが作成または更新されるたびに`NewGravatar`または`UpdateGravatar`イベントを発行します。サブグラフは、`Gravatar`エンティティをグラフノードストアに書き込み、イベントに応じてこれらが更新されるようにすることで、これらのイベントを処理します。[subgraph manifest](/developer/create-subgraph-hosted#the-subgraph-manifest)を見ると、スマートコントラクトからどのイベントに注意を払うべきか、マッピングなどがよくわかります。 -## Supported Networks on the hosted service +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## ホスト型サービスでサポートされているネットワーク 対応ネットワークの一覧は[こちら](/developing/supported-networks)で確認できます。 diff --git a/website/pages/ja/deploying/subgraph-studio-faqs.mdx b/website/pages/ja/deploying/subgraph-studio-faqs.mdx index 07e31999931e..a9c535bd95dc 100644 --- a/website/pages/ja/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/ja/deploying/subgraph-studio-faqs.mdx @@ -2,29 +2,29 @@ title: サブグラフスタジオFAQ --- -## 1. What is Subgraph Studio? +## 1. サブグラフスタジオとは? [Subgraph Studio](https://thegraph.com/studio/)は、サブグラフやAPIキーを作成・管理・公開するためのDappであり、サブグラフの作成・管理・公開を行う。 -## 2. How do I create an API Key? +## 2. API キーを作成するにはどうすればよいですか? APIを作成するには、Subgraph Studioに移動し、ウォレットを接続します。上部にあるAPI keysタブをクリックします。そこで、APIキーを作成することができます。 -## 3. Can I create multiple API Keys? +## 3. 複数の API キーを作成できますか? A: はい、できます。異なるプロジェクトで使用するために、[こちら](https://thegraph.com/studio/apikeys/)のリンクをご確認ください。 -## 4. How do I restrict a domain for an API Key? +## 4. API キーのドメインを制限するにはどうすればよいですか? API キーを作成後、「セキュリティ」セクションで、特定の API キーにクエリ可能なドメインを定義できます。 -## 5. Can I transfer my subgraph to another owner? +## 5. 自分のサブグラフを他のオーナーに譲渡することはできますか? はい、メインネットに公開されたサブグラフは、新しいウォレットまたはマルチシグに転送できます。これを行うには、サブグラフの詳細ページの [公開] ボタンの横にある 3 つのドットをクリックし、[所有権を譲渡する] を選択します。 サブグラフが転送されると、Studio でサブグラフを表示または編集できなくなることに注意してください。 -## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? +## 6. 使用したいサブグラフの開発者ではない場合、サブグラフのクエリ URL を見つけるにはどうすればよいですか? 各サブグラフのクエリ URL は、Graph Explorerの「Subgraph Details」で確認できます。「Query」をクリックすると、興味のあるサブグラフのクエリ URL が表示されます。ここで``というプレースホルダーを、Subgraph Studioで利用したい API キーに置き換えることができます。 diff --git a/website/pages/ja/deploying/subgraph-studio.mdx b/website/pages/ja/deploying/subgraph-studio.mdx index 7311ea370c9b..786ca5e9f854 100644 --- a/website/pages/ja/deploying/subgraph-studio.mdx +++ b/website/pages/ja/deploying/subgraph-studio.mdx @@ -32,13 +32,7 @@ Subgraph Studio では、サブグラフを完全にコントロールするこ ## Subgraph Studio でサブグラフを作成する方法 -一番のポイントです - 最初にサブグラフを作成する際には、以下の項目を入力するように指示されます: - -- サブグラフネーム -- 画像 -- 説明書き -- カテゴリー(e.g. `DeFi`, `NFTs`, `Governance`) -- ウェブサイト + ## Subgraph と The Graph Network の互換性 diff --git a/website/pages/ja/developing/creating-a-subgraph.mdx b/website/pages/ja/developing/creating-a-subgraph.mdx index 7efba3749268..c663a87c721b 100644 --- a/website/pages/ja/developing/creating-a-subgraph.mdx +++ b/website/pages/ja/developing/creating-a-subgraph.mdx @@ -24,13 +24,13 @@ Graph CLI は JavaScript で書かれており、使用するには`yarn`また `yarn`をインストールしたら、次のコマンドを実行して Graph CLI をインストールする。 -**yarn でインストールします:** +**Install with yarn:** ```bash yarn global add @graphprotocol/graph-cli ``` -**npm でインストールします:** +**Install with npm:** ```bash npm install -g @graphprotocol/graph-cli @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: マニフェストを更新する重要な項目は以下の通りです: -- `description`: サブグラフが何であるかについての人間が読める説明です。この説明は、サブグラフがホステッドサービスにデプロイされたときに、グラフエクスプローラーによって表示されます。 +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: サブグラフのマニフェストが存在するリポジトリの URL です。これは、グラフエクスプローラでも表示されます。 @@ -146,6 +154,10 @@ dataSources: - `dataSources.source.startBlock`: データソースがインデックス作成を開始するブロックの番号(オプション)です。ほとんどの場合、コントラクトが作成されたブロックの使用をお勧めします。 +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: データソースがストアに書き込むエンティティです。各エンティティのスキーマは、schema.graphql ファイルで定義されます。 - `dataSources.mapping.abis`: ソースコントラクトおよびマッピング内から対話する他のスマートコントラクトのための 1 つまたは複数の名前付き ABI ファイルです。 @@ -242,6 +254,7 @@ GraphQL API では、以下の Scalar をサポートしています: | `String` | `string`値の Scalar であり、Null 文字はサポートされておらず、自動的に削除されます。 | | `Boolean` | `boolean`値を表す Scalar。 | | `Int` | Int GraphQL の仕様では、`Int`のサイズは 32 バイトと定義されています。 | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | 大きな整数。Ethereum の`uint32`, `int64`, `uint64`, ..., `uint256` タイプに使用されます。注: `int32`, `uint24` `int8`など`uint32`以下のものは`i32`として表現されます。 | | `BigDecimal` | `BigDecimal`は、高精度の 10 進数を記号と指数で表します。指数の範囲は -6143 ~ +6144 です。有効数字 34 桁にまとめられます。 | @@ -771,6 +784,8 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { ### 対応フィルター +#### Call Filter + ```yaml filter: kind: call @@ -807,6 +822,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### マッピング関数 マッピング関数は、唯一の引数として`ethereum.Block`を受け取ります。イベント用のマッピング関数と同様に、この関数はストア内の既存のサブグラフエンティティにアクセスしたり、スマートコントラクトを呼び出したり、エンティティを作成または更新したりすることができます。 @@ -935,6 +989,8 @@ _meta { ### 既存のサブグラフへのグラフト +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + サブグラフが最初にデプロイされると、対応するチェーンのジェネシス ブロック (または各データ ソースで定義された `startBlock`) でイベントのインデックス作成が開始されます。既存のサブグラフのデータを再利用し、かなり後のブロックからインデックス作成を開始することは有益です。このインデックス作成モードは _グラフティング_ と呼ばれます。失敗した既存のサブグラフを迅速に、または一時的に再び機能させることができます。 `subgraph.yaml`のサブグラフマニフェストのトップレベルに`graft`ブロックがある場合、サブグラフはベースサブグラフにグラフトされます: @@ -964,7 +1020,7 @@ graft: ## ファイルデータソース -ファイルデータソースは、IPFSを皮切りに、インデックス作成時にオフチェーンデータに堅牢かつ拡張可能な方法でアクセスするための新しいサブグラフ機能です。 +ファイルデータソースは、堅牢で拡張可能な方法でインデックス作成中にオフチェーンデータにアクセスするための新しいサブグラフ機能です。ファイルデータソースは、IPFS および Arweave からのファイルのフェッチをサポートしています。 > また、オフチェーンデータの決定論的なインデックス作成、および任意のHTTPソースデータの導入の可能性についても基礎ができました。 @@ -976,7 +1032,7 @@ graft: > 既存の`ipfs.cat` APIを置き換えるものです。 -### Upgrade guide +### アップグレードガイド #### `graph-ts` および `graph-cli` を更新しました。 @@ -1031,7 +1087,7 @@ type TokenMetadata @entity { > [入れ子フィルター](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering)を使用すると、これらの入れ子エンティティに基づいて、親エンティティをフィルタリングすることができます。 -#### `kind: file/ipfs` で新しいテンプレート化されたデータソースを追加します。 +#### `種類: ファイル/ipfs` または `種類: ファイル/arweave` の新しいテンプレートデータソースを追加します> 目的のファイルが特定されたときに生成されるデータソースです。 @@ -1097,9 +1153,11 @@ export function handleMetadata(content: Bytes): void { チェーンベースハンドラーの実行中に、ファイルデータソースを作成できるようになりました: - 自動生成された`templates`からテンプレートをインポートする。 -- マッピング内から`TemplateName.create(cid: string)` を呼び出し、cidを有効なIPFSコンテンツ識別子とする。 +- マッピング内から `TemplateName.create(cid: string)` を呼び出します。この場合、cid は IPFS または Arweave の有効なコンテンツ識別子です -> 現在、Graph Nodeは[v0とv1のコンテンツ識別子](https://docs.ipfs.tech/concepts/content-addressing/)、およびディレクトリを持つコンテンツ識別子(例:`bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3qobbq7i4er3tnxci/metadata.json`)をサポートしています。 +IPFS の場合、グラフノードは [v0 および v1 コンテンツ識別子、および](https://docs.ipfs.tech/concepts/content-addressing/)ディレクトリを持つコンテンツ識別子 (例: `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) をサポートします。 + +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). 例: @@ -1130,7 +1188,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -これは新しいファイルデータソースを作成し、Graph Nodeの設定されたIPFSエンドポイントをポーリングし、見つからない場合はリトライします。ファイルが見つかると、ファイルデータソースハンドラが実行されます。 +これにより、新しいファイル データ ソースが作成され、グラフ ノードの構成済み IPFS または Arweave エンドポイントがポーリングされ、見つからない場合は再試行されます。ファイルが見つかると、ファイルデータソースハンドラが実行されます。 この例では、親 `Token` エンティティと結果の `TokenMetadata` エンティティの間のルックアップとして CID を使用しています。 @@ -1170,7 +1228,7 @@ NFT メタデータを対応するトークンにリンクする場合、メタ ファイルデータソースのハンドラーは、`eth_call`コントラクトバインディングをインポートするファイルには存在できず、「unknown import」で失敗しました。`ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)) で失敗します。回避策としては、ファイルデータソースハンドラーを専用ファイルに作成することです。 -#### Examples +#### 例 [クリプトコヴェン・サブグラフの移動](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) diff --git a/website/pages/ja/developing/developer-faqs.mdx b/website/pages/ja/developing/developer-faqs.mdx index 861be792bf3d..1f3a6413f5d8 100644 --- a/website/pages/ja/developing/developer-faqs.mdx +++ b/website/pages/ja/developing/developer-faqs.mdx @@ -2,39 +2,39 @@ title: 開発者 FAQ --- -## 1. What is a subgraph? +## 1. サブグラフとは サブグラフは、ブロックチェーンデータを基に構築されたカスタムAPIです。サブグラフはGraphQLクエリ言語を使ってクエリされ、Graph CLIを使ってGraph Nodeにデプロイされます。デプロイされ、The Graphの分散型ネットワークに公開されると、インデクサーはサブグラフを処理し、サブグラフの消費者がクエリできるようにします。 -## 2. Can I delete my subgraph? +## 2. サブグラフを削除できますか? 一度作成したサブグラフの削除はできません。 -## 3. Can I change my subgraph name? +## 3. サブグラフ名を変更できますか? 一度作成したサブグラフの名前を変更することはできません。サブグラフを作成する際には、他の dapps から検索しやすく、識別しやすい名前になるよう、よく考えてから作成してください。 -## 4. Can I change the GitHub account associated with my subgraph? +## 4. サブグラフに関連付けられている GitHub アカウントを変更できますか? 一度作成したサブグラフに関連する GitHub のアカウントは変更できません。サブグラフを作成する前に、この点をよく考えてください。 -## 5. Am I still able to create a subgraph if my smart contracts don't have events? +## 5. スマート コントラクトにイベントがない場合でもサブグラフを作成できますか? スマートコントラクトを構成して、クエリしたいデータに関連するイベントを持つことを強くお勧めします。サブグラフ内のイベントハンドラは、コントラクトのイベントによってトリガされ、有用なデータを取得するための圧倒的に速い方法です。 使用しているコントラクトにイベントが含まれていない場合、サブグラフはコールハンドラとブロックハンドラを使用してインデックス作成をトリガすることができます。しかし、パフォーマンスが大幅に低下するため、これは推奨されません。 -## 6. Is it possible to deploy one subgraph with the same name for multiple networks? +## 6. 複数のネットワークに同じ名前の 1 つのサブグラフを展開することは可能ですか? 複数のネットワークには別々の名前が必要です。同じ名前で異なるサブグラフを持つことはできませんが、単一のコードベースで複数のネットワークに対応する便利な方法があります。詳しくはドキュメントをご覧ください: [Redeploying a Subgraph](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) -## 7. How are templates different from data sources? +## 7. テンプレートとデータ ソースの違いは何ですか? テンプレートは、サブグラフがインデックスを作成している間に、その場でデータソースを作成することができます。また、コントラクトの形状(ABI、イベントなど)を前もって知っているので、テンプレートでどのようにインデックスを作成するかを定義することができ、コントラクトが作成されると、サブグラフはコントラクトのアドレスを供給することで動的なデータソースを作成します。 データソース・テンプレートのインスタンス化」のセクションをご覧ください: [Data Source Templates](/developing/creating-a-subgraph#data-source-templates) -## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? +## 8. ローカル展開に最新バージョンのグラフノードを使用していることを確認するにはどうすればよいですか? 以下のコマンドを実行してください: @@ -44,29 +44,29 @@ docker pull graphprotocol/graph-node:latest **注:** docker / docker-compose は、最初に実行したときにプルされた graph-node のバージョンを常に使用しますので、最新版の graph-node を使用していることを確認するために、このコマンドを実行することが重要です。 -## 9. How do I call a contract function or access a public state variable from my subgraph mappings? +## 9. コントラクト関数を呼び出したり、サブグラフ マッピングから公開状態変数にアクセスするにはどうすればよいですか? [AssemblyScript API](/developing/assemblyscript-api) セクション内の `スマート コントラクトへのアクセス` 状態を見てください。 -## 10. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another datasource in `subgraph.yaml` after running `graph init`? +## 10. 2 つのコントラクトを持つ `graph-cli` から `graph init` を使用してサブグラフをセットアップすることは可能ですか?または、`graph init` を実行した後、`subgraph.yaml` に別のデータソースを手動で追加する必要がありますか? `graph init`は基本的な出発点として意図されており、そこから手動でデータソースを追加することができます。 -## 11. I want to contribute or add a GitHub issue. Where can I find the open source repositories? +## 11. GitHub の問題に貢献または追加したい。オープンソースのリポジトリはどこにありますか? - [graph-node](https://github.com/graphprotocol/graph-node) - [graph-cli](https://github.com/graphprotocol/graph-cli) - [graph-ts](https://github.com/graphprotocol/graph-ts) -## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? +## 12. イベントを処理するときに、エンティティの「自動生成」Id を作成するための推奨される方法は何ですか? もし、イベント中に 1 つのエンティティしか作成されず、他に利用できるものがなければ、トランザクションハッシュ+ログインデックスがユニークになります。Bytes に変換して`crypto.keccak256`に通すことで難読化することができますが、これでは一意性は高まりません。 -## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? +## 13. 複数の契約を聞く場合、契約順を選択してイベントを聞くことはできますか? サブグラフ内では、複数のコントラクトにまたがっているかどうかにかかわらず、イベントは常にブロックに表示される順序で処理されます。 -## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? +## 14. イベント ハンドラー内からネットワーク (メインネット、Goerli、ローカル) を区別することは可能ですか? はい、以下の例のように`graph-ts`をインポートすることで可能です。 @@ -77,23 +77,23 @@ dataSource.network() dataSource.address() ``` -## 15. Do you support block and call handlers on Goerli? +## 15. Goerli でブロック ハンドラーとコール ハンドラーをサポートしていますか? はい。 Goerli は、ブロック ハンドラー、コール ハンドラー、およびイベント ハンドラーをサポートしています。イベント ハンドラーは、他の 2 つのハンドラーよりもはるかにパフォーマンスが高く、すべての EVM 互換ネットワークでサポートされていることに注意してください。 -## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? +## 16. サブグラフ マッピングに ethers.js または他の JS ライブラリをインポートできますか? マッピングは AssemblyScript で書かれているため、現在はできません。代替案としては、生データをエンティティに格納し、JS ライブラリを必要とするロジックをクライアントで実行することが考えられます。 -## 17. Is it possible to specify what block to start indexing on? +## 17. インデックス作成を開始するブロックを指定することはできますか? はい。`subgraph.yaml`ファイルの`dataSources.source.startBlock`は、データソースがインデックス作成を開始するブロックの番号を指定します。ほとんどの場合、コントラクトが作成されたブロックを使用することをお勧めします。開始ブロック -## 18. Are there some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +## 18. インデックス作成のパフォーマンスを向上させるためのヒントはありますか? サブグラフの同期に非常に時間がかかる はい、コントラクトがデプロイされたブロックからインデックス作成を開始するオプションのスタートブロック機能をご利用ください: [Start blocks](/developing/creating-a-subgraph#start-blocks) -## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +## 19. サブグラフに直接クエリを実行して、インデックスが作成された最新のブロック番号を特定する方法はありますか? はい、あります。organization/subgraphName」を公開先の組織とサブグラフの名前に置き換えて、以下のコマンドを実行してみてください: @@ -101,19 +101,19 @@ dataSource.address() curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql ``` -## 20. What networks are supported by The Graph? +## 20. The Graph はどのネットワークをサポートしていますか? 対応ネットワークの一覧は[こちら](/developing/supported-networks)で確認できます。 -## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +## 21. 再デプロイせずにサブグラフを別のアカウントまたはエンドポイントに複製することは可能ですか? サブグラフを再デプロイする必要がありますが、サブグラフの ID(IPFS ハッシュ)が変わらなければ、最初から同期する必要はありません。 -## 22. Is this possible to use Apollo Federation on top of graph-node? +## 22. グラフノード上で Apollo Federation を使用することは可能ですか? 将来的にはサポートしたいと考えていますが、フェデレーションはまだサポートされていません。現時点でできることは、クライアント上またはプロキシサービス経由でスキーマステッチを使用することです。 -## 23. Is there a limit to how many objects The Graph can return per query? +## 23. グラフがクエリごとに返すことができるオブジェクトの数に制限はありますか? デフォルトでは、クエリの応答は 1 つのコレクションにつき 100 アイテムに制限されています。それ以上の数を受け取りたい場合は、1 コレクションあたり 1000 アイテムまで、それ以上は以下のようにページネーションすることができます: @@ -121,22 +121,18 @@ curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"or someCollection(first: 1000, skip: ) { ... } ``` -## 24. If my dapp frontend uses The Graph for querying, do I need to write my query key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? +## 24. dapp フロントエンドがクエリに The Graph を使用する場合、クエリ キーをフロントエンドに直接書き込む必要がありますか? ユーザーにクエリ料金を支払う場合はどうなりますか? 悪意のあるユーザーによってクエリ料金が非常に高くなることはありますか? 現在、dapp の推奨されるアプローチは、キーをフロントエンドに追加し、それをエンド ユーザーに公開することです。とはいえ、そのキーを _yourdapp.io_ や subgraph.ゲートウェイは現在 Edge & によって実行されています。ノード。ゲートウェイの責任の一部は、不正行為を監視し、悪意のあるクライアントからのトラフィックをブロックすることです。 -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? -Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). +自分または他の人がホストされたサービスにデプロイしたサブグラフを見つけるには、ホストされたサービスに移動します。 [こちら](https://thegraph.com/hosted-service)でご覧いただけます。 -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? -The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. +Graph は、ホストされるサービスに対して料金を請求することはありません。 Graph は分散型プロトコルであり、集中型サービスに対する課金は The Graph の価値観と一致していません。ホスト型サービスは常に、分散型ネットワークにアクセスするための一時的なステップでした。開発者には、快適に分散ネットワークにアップグレードするのに十分な時間があります。 -## 27. When will the Hosted Service be shut down? +## 27. How do I update a subgraph on mainnet? -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? - -If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +サブグラフ開発者の場合は、CLI を使用して新しいバージョンのサブグラフを Subgraph Studio にデプロイできます。この時点では非公開になりますが、問題がなければ、分散型の Graph Explorer に公開できます。これにより、キュレーターがシグナリングを開始できるサブグラフの新しいバージョンが作成されます。 diff --git a/website/pages/ja/developing/graph-ts/api.mdx b/website/pages/ja/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..40702b83261f --- /dev/null +++ b/website/pages/ja/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +このページでは、サブグラフのマッピングを記述する際に、どのような組み込み API を使用できるかを説明します。 すぐに使える API は 2 種類あります: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API リファレンス + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Ethereum、JSON、GraphQL、AssemblyScript など、異なるタイプのシステム間で変換するための低レベルプリミティブ + +### バージョン + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| バージョン | リリースノート | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### 組み込み型 + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### バイト配列 + +```typescript +'@graphprotocol/graph-ts'から{ ByteArray } をインポートします。 +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +'@graphprotocol/graph-ts'から { BigDecimal } をインポートします。 +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +'@graphprotocol/graph-ts'から { BigInt } をインポートします。 +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +'@graphprotocol/graph-ts'から { TypedMap } をインポートします。 +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +'@graphprotocol/graph-ts'から { Bytes } をインポートします。 +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### 住所 + +```typescript +'@graphprotocol/graph-ts'から { Address } をインポートします。 +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### ストア API + +```typescript +'@graphprotocol/graph-ts'から { store } をインポートします。 +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### エンティティの作成 + +Ethereum のイベントからエンティティを作成する際の一般的なパターンを以下に示します。 + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +各エンティティは、他のエンティティとの衝突を避けるために、ユニークな ID を持たなければなりません。 イベントのパラメータには、使用可能な一意の識別子が含まれているのが一般的です。 注:トランザクションのハッシュを ID として使用することは、同じトランザクション内の他のイベントがこのハッシュを ID としてエンティティを作成しないことを前提としています。 + +#### ストアからのエンティティの読み込み + +エンティティがすでに存在する場合、以下の方法でストアからロードすることができます。 + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### ブロック内で作成されたエンティティの検索 + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +ストア API を使用すると、現在のブロックで作成または更新されたエンティティの取得が容易になります。この一般的な状況は、あるハンドラーがオンチェーン イベントからトランザクションを作成し、後のハンドラーがこのトランザクションが存在する場合にアクセスしようとすることです。トランザクションが存在しない場合、サブグラフはエンティティが存在しないことを確認するためだけにデータベースにアクセスする必要があります。エンティティが同じブロック内に作成されている必要があることをサブグラフの作成者がすでに知っている場合は、loadInBlock を使用すると、このデータベースのラウンドトリップが回避されます。一部のサブグラフでは、これらのルックアップの欠落がインデックス作成時間に大きく影響する可能性があります。 + +```typescript +let id = event.transaction.hash // または ID が構築される方法 +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = 新しい転送(id) +} + +// 以前と同様に Transfer エンティティを使用します +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### 派生エンティティの検索 + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +これにより、イベント ハンドラー内から派生エンティティ フィールドをロードできるようになります。たとえば、次のスキーマがあるとします。 + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### 既存のエンティティの更新 + +既存のエンティティを更新するには 2 つの方法があります。 + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +プロパティの変更は、生成されたプロパティセッターのおかげで、ほとんどの場合、簡単です。 + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +また、次の 2 つの命令のいずれかで、プロパティの設定を解除することも可能です。 + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### ストアからのエンティティの削除 + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +mport { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +Ethereum API は、スマートコントラクト、パブリックステート変数、コントラクト関数、イベント、トランザクション、ブロック、および Ethereum データのエンコード/デコードへのアクセスを提供します。 + +#### Ethereum タイプのサポート + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +以下の例で説明します。 以下のようなサブグラフのスキーマが与えられます。 + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### イベントとブロック/トランザクションデータ + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### スマートコントラクトの状態へのアクセス + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +よくあるパターンは、イベントが発生したコントラクトにアクセスすることです。 これは以下のコードで実現できます。 + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +サブグラフの一部である他のコントラクトは、生成されたコードからインポートすることができ、有効なアドレスにバインドすることができます。 + +#### リバートされた呼び出しの処理 + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +ただし、Geth や Infura のクライアントに接続された Graph ノードでは、すべてのリバートを検出できない場合があるので、これに依存する場合は Parity のクライアントに接続された Graph ノードを使用することをお勧めします。 + +#### 符号化/復号化 ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +その他の情報: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +'@graphprotocol/graph-ts'から{ log } をインポートします。 +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('表示されるメッセージ。{}, {}, {}', [value.toString(), anotherValue.toString(), 'すでに文字列']) +``` + +#### 1 つまたは複数の値を記録する + +##### 1 つの値を記録する + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### 既存の配列から 1 つのエントリをロギングする + +以下の例では、配列に 3 つの値が含まれているにもかかわらず、引数の配列の最初の値だけがログに記録されます。 + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### 既存の配列から複数のエントリを記録する + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### 既存の配列から特定のエントリをロギングする + +配列内の特定の値を表示するには、インデックス化された値を指定する必要があります。 + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### イベント情報の記録 + +以下の例では、イベントからブロック番号、ブロックハッシュ、トランザクションハッシュをログに記録しています。 + +```typescript +'@graphprotocol/graph-ts'から { log } をインポートします。 + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +'@graphprotocol/graph-ts'から { ipfs } をインポートします。 +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +IPFS のハッシュやパスが与えられた場合、IPFS からのファイルの読み込みは以下のように行われます。 + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +'@graphprotocol/graph-ts'から { crypto } をインポートします。 +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +'@graphprotocol/graph-ts'から{ json, JSONValueKind } をインポートします。 +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### タイプ 変換参照 + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### データソースのメタデータ + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### エンティティと DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ja/developing/graph-ts/common-issues.mdx b/website/pages/ja/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..261c8f3d6a8e --- /dev/null +++ b/website/pages/ja/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: AssemblyScriptのよくある問題 +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ja/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/ja/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..d9133eece2ba 100644 --- a/website/pages/ja/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/ja/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: サブストリームを利用したサブグラフに関するよくある質問 --- -## What are Substreams? +## サブストリームとは何ですか? Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. Go to the [Substreams Documentation](/substreams) to learn more about Substreams. -## What are Substreams-powered subgraphs? +## サブストリームによって動作するサブグラフは何ですか? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) は、サブストリームの機能とサブグラフのクエリ可能性を組み合わせています。 サブストリームを利用したサブグラフを公開する場合、サブストリーム変換によって生成されたデータは、[エンティティの変更を出力](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change)できます。 /src/tables.rs)、サブグラフ エンティティと互換性があります。 -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +すでにサブグラフ開発に​​精通している場合は、AssemblyScript 変換層によって生成されたかのように、サブストリームを利用したサブグラフをクエリできることに注意してください。動的で柔軟な GraphQL API の提供など、サブグラフのすべての利点が得られます。 -## How are Substreams-powered subgraphs different from subgraphs? +## サブストリームを利用したサブグラフはサブグラフとどう違うのでしょうか? -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +サブグラフは、オンチェーン イベントと、それらのイベントが Assemblyscript で記述されたハンドラーを介して変換される方法を指定するデータソースで構成されます。これらのイベントは、チェーン上でイベントが発生する順序に基づいて、順番に処理されます。 -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +一方、Substreamsで動作するサブグラフは、Substreamsパッケージを参照する単一のデータソースを持ち、これはGraph Nodeによって処理されます。従来のサブグラフと比較して、Substreamsはより細かい粒度のオンチェーンデータにアクセスでき、また、大規模な並列処理の恩恵も受けることができます。これにより、処理時間が大幅に短縮される可能性があります。 -## What are the benefits of using Substreams-powered subgraphs? +## サブストリームを利用したサブグラフを使用する利点は何ですか? Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. -## What are the benefits of Substreams? +## サブストリームの利点は何ですか? -There are many benefits to using Substreams, including: +サブストリームを使用すると、次のような多くの利点があります。 -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- コンポーザブル: レゴ ブロックのようなサブストリーム モジュールを積み重ね、コミュニティ モジュールを基にして公開データをさらに洗練させることができます。 -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- 高パフォーマンスのインデックス作成: 並列操作の大規模なクラスター (BigQuery を考えてください) を通じて、桁違いに高速なインデックス作成を実現します。 -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- 場所を選ばずにデータをどこにでも沈める: PostgreSQL、MongoDB、Kafka、サブグラフ、フラットファイル、Googleシート。 -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- プログラム可能: コードを使用して抽出をカスタマイズし、変換時の集計を実行し、複数のシンクの出力をモデル化します。 -- Access to additional data which is not available as part of the JSON RPC +- JSON RPC の一部として利用できない追加データへのアクセス -- All the benefits of the Firehose. +- Firehose のすべての利点。 -## What is the Firehose? +## 消防ホースとは何ですか? -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +[StreamingFast](https://www.streamingfast.io/) によって開発された Firehose は、ブロックチェーンの全履歴をこれまで見たことのない速度で処理するためにゼロから設計されたブロックチェーン データ抽出レイヤーです。ファイルベースでストリーミングファーストのアプローチを提供するこれは、StreamingFast のオープンソース テクノロジ スイートの中核コンポーネントであり、サブストリームの基盤です。 -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Firehose の詳細については、[documentation](https://firehose.streamingfast.io/) にアクセスしてください。 -## What are the benefits of the Firehose? +## 消防ホースの利点は何ですか? -There are many benefits to using Firehose, including: +Firehose を使用すると、次のような多くの利点があります。 -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- 最低のレイテンシーとポーリングなし: ストリーミングファーストの方式で、Firehose ノードはブロック データを最初にプッシュするために競合するように設計されています。 -- Prevents downtimes: Designed from the ground up for High Availability. +- ダウンタイムの防止: 高可用性を実現するためにゼロから設計されています。 -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- ビートを見逃すことはありません: Firehose ストリーム カーソルは、フォークを処理し、どのような状況でも中断したところから続行するように設計されています。 -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- 最も豊富なデータ モデル: 残高の変更、完全なコール ツリー、内部トランザクション、ログ、ストレージの変更、ガス料金などが含まれる最適なデータ モデル。 -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- フラット ファイルの活用: ブロックチェーン データは、利用可能な最も安価で最適化されたコンピューティング リソースであるフラット ファイルに抽出されます。 -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## 開発者は、サブストリームを利用したサブグラフとサブストリームに関する詳細情報にどこでアクセスできますか? The [Substreams documentation](/substreams) will teach you how to build Substreams modules. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +[Substreams-powered subgraphs ドキュメント](/cookbook/substreams-powered-subgraphs/) では、The Graph にデプロイするためにサブグラフをパッケージ化する方法が示されています。 -## What is the role of Rust modules in Substreams? +## サブストリームにおけるRustモジュールの役割は何ですか? -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +Rust モジュールは、サブグラフの AssemblyScript マッパーに相当します。これらは同様の方法で WASM にコンパイルされますが、プログラミング モデルにより並列実行が可能になります。これらは、生のブロックチェーン データに適用する変換と集計の種類を定義します。 See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. -## What makes Substreams composable? +## サブストリームを構成可能にするものは何ですか? -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +サブストリームを使用すると、変換レイヤーで合成が行われ、キャッシュされたモジュールを再利用できるようになります。 -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +例として、AliceはDEX価格モジュールを構築し、Bobはそれを使用して興味のあるいくつかのトークンのボリューム集計モジュールを構築し、Lisaは4つの個々のDEX価格モジュールを組み合わせて価格オラクルを作成することができます。単一のSubstreamsリクエストは、これらの個々のモジュールをまとめ、リンクしてより洗練されたデータのストリームを提供します。そのストリームはその後、サブグラフを作成し、消費者によってクエリされることができます。 -## How can you build and deploy a Substreams-powered Subgraph? +## サブストリームを利用したサブグラフを構築してデプロイするにはどうすればよいでしょうか? -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +Substreams を利用したサブグラフを [defining](/cookbook/substreams-powered-subgraphs/) した後、Graph CLI を使用してそれを [Subgraph Studio](https://thegraph.com/studio/) にデプロイできます。 -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## サブストリームおよびサブストリームを利用したサブグラフの例はどこで見つけることができますか? -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +[この Github リポジトリ](https://github.com/pinax-network/awesome-substreams) にアクセスして、サブストリームとサブストリームを利用したサブグラフの例を見つけることができます。 -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## SubstreamsとSubstreamsを活用したサブグラフがThe Graph Networkにとってどのような意味を持つのでしょうか? -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +この統合は、非常に高いパフォーマンスのインデクシングと、コミュニティモジュールを活用し、それらを基に構築することによる大きな組み合わせ可能性を含む多くの利点を約束しています。 diff --git a/website/pages/ja/developing/supported-networks.json b/website/pages/ja/developing/supported-networks.json index 5e12392b8c7d..397acfebbbe9 100644 --- a/website/pages/ja/developing/supported-networks.json +++ b/website/pages/ja/developing/supported-networks.json @@ -1,9 +1,9 @@ { - "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "network": "ネットワーク", + "cliName": "CLI名", + "chainId": "チェーンID", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "分散型ネットワーク", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/ja/developing/supported-networks.mdx b/website/pages/ja/developing/supported-networks.mdx index 720d698f4097..ca23a5cf4c2e 100644 --- a/website/pages/ja/developing/supported-networks.mdx +++ b/website/pages/ja/developing/supported-networks.mdx @@ -9,16 +9,16 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +分散型ネットワークでサポートされている機能の完全なリストについては、[このページ](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)を参照してください。 -Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. +`mainnet` Ethereum のインデックスを作成するサブストリームを利用したサブグラフは、Subgraph Studio および分散型ネットワークでサポートされています。 ## グラフノード -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +もしもあなたの好きなネットワークがThe Graphの分散型ネットワークでサポートされていない場合は、[Graph Node](https://github.com/graphprotocol/graph-node)を自分で実行して、任意のEVM互換ネットワークをインデックスすることができます。使用している[バージョン](https://github.com/graphprotocol/graph-node/releases)がそのネットワークをサポートしていることを確認し、必要な設定がされていることを確認してください。 Graph Nodeは、Firehose統合により、他のプロトコルのインデックスを作成することもできます。Firehoseインテグレーションは、NEAR、Arweave、Cosmosベースのネットワーク用に作成されています。 diff --git a/website/pages/ja/developing/unit-testing-framework.mdx b/website/pages/ja/developing/unit-testing-framework.mdx index 0f457a395fde..49601a107de7 100644 --- a/website/pages/ja/developing/unit-testing-framework.mdx +++ b/website/pages/ja/developing/unit-testing-framework.mdx @@ -990,9 +990,9 @@ test('Data source simple mocking example', () => { ## テストカバレッジ -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +**マッチスティック** を使用すると、サブグラフ開発者は、記述された単体テストのテスト カバレッジを計算するスクリプトを実行できます。 -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +テスト カバレッジ ツールは、コンパイルされたテスト `wasm` バイナリを取得して、それらを `wat` ファイルに変換します。このファイルは、`subgraph.yaml` で定義されたハンドラーが呼び出されているかどうかを簡単に検査して確認できます。 AssemblyScript と WebAssembly ではコード カバレッジ (およびテスト全体) が非常に初期段階にあるため、**Matchstick** はブランチ カバレッジをチェックできません。代わりに、特定のハンドラーが呼び出された場合、そのイベント/関数が適切にモック化されているというアサーションに依存します。 ### 前提条件 @@ -1029,7 +1029,7 @@ graph test -- -c }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +これによりカバレッジ ツールが実行され、ターミナルに次のような内容が表示されるはずです。 ```sh $ graph test -c diff --git a/website/pages/ja/firehose.mdx b/website/pages/ja/firehose.mdx index 9075c7d46b30..af1cfb29dd64 100644 --- a/website/pages/ja/firehose.mdx +++ b/website/pages/ja/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose は、ファイルベースかつストリーミングファーストのアプローチでブロックチェーンデータを処理します。 +![Firehose Logo](/img/firehose-logo.png) -Firehose は、Ethereum(および多くの EVM チェーン)、NEAR、Solana、Cosmos、Arweave 向けに構築されており、現在も続々と開発中です。 +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -グラフノードの統合は複数のチェーンに対して構築されているため、サブグラフは Firehose からデータをストリームし、パフォーマンスとスケーラビリティの高いインデックス作成を行うことができます。Firehose は、The Graph のコア開発者によって構築された新しい変換技術である[サブストリーム](/substreams)も強化します。 +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -詳しくは、[firehose のドキュメント](https://firehose.streamingfast.io/)をご覧ください。 +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### はじめに + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ja/glossary.mdx b/website/pages/ja/glossary.mdx index f21cf08feb73..36fe211e3c6c 100644 --- a/website/pages/ja/glossary.mdx +++ b/website/pages/ja/glossary.mdx @@ -8,11 +8,11 @@ title: 用語集 - **GraphQL**:API用のクエリ言語であり、既存のデータでクエリを実行するためのランタイムです。グラフは、サブグラフのクエリにGraphQLを使用しています。 -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **エンドポイント**: サブグラフのクエリに使用できる URL。 Subgraph Studio のテスト エンドポイントは `https://api.studio.thegraph.com/query///` であり、Graph Explorer エンドポイントは `https: //gateway.thegraph.com/api//subgraphs/id/`. Graph Explorer エンドポイントは、The Graph の分散型ネットワーク上のサブグラフをクエリするために使用されます。 - **Subgraph**:[GraphQL](https://graphql.org/)を使ってクエリできる、ブロックチェーンデータ上に構築されたカスタムAPIです。開発者は、The Graphの分散型ネットワークにサブグラフを構築、デプロイ、公開することができます。その後、インデクサーはサブグラフのインデックス作成を開始し、サブグラフコンシューマーがクエリできるようにすることができます。 -- **ホスティングサービス**:The Graphの分散型ネットワークが、サービスコスト、サービス品質、開発者体験を成熟させていく過程で、サブグラフの構築とクエリのための一時的な足場となるサービスです。 +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **インデクサー**:ブロックチェーンからデータをインデックスし、GraphQLクエリを提供するためにインデックスノードを実行するネットワーク参加者です。 @@ -24,6 +24,8 @@ title: 用語集 - **インデクサーのセルフステーク**:インデクサーが分散型ネットワークに参加するためにステークするGRTの金額です。最低額は100,000GRTで、上限はありません。 +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **デリゲーター**:GRTを所有し、そのGRTをインデクサーに委任するネットワーク参加者です。これにより、インデクサーはネットワーク上のサブグラフへの出資比率を高めることができます。デリゲーターは、インデクサーがサブグラフを処理する際に受け取るインデクサー報酬の一部を受け取ります。 - **デリゲーション・タックス**。デリゲーターがインデクサーにGRTを委任する際に支払う0.5%の手数料です。手数料の支払いに使われたGRTはバーンされます。 @@ -38,27 +40,21 @@ title: 用語集 - **Subgraph Manifest**:サブグラフの GraphQL スキーマ、データ ソース、およびその他のメタデータを記述する JSON ファイルです。[こちら](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf)がその例です。 -- **リベート・プール**:サブグラフの消費者が支払ったクエリ料を、クエリ料のリベートとしてインデクサが請求できるまで保持する経済的な安全対策。残ったGRTはバーンされます。 - -- **エポック**:ネットワークにおける時間の単位。1エポックは現在6,646ブロック、約1日である。 +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **アロケーション**:インデクサは、GRTのステーク(デリゲータのステークを含む)を、The Graphの分散型ネットワークで公開されているサブグラフに割り当てることができます。アロケーションは、4つの段階のうちの1つです。 1. **アクティブ**:アロケーションは、オンチェーンで作成されたときにアクティブとみなされます。これはアロケーションを開くと呼ばれ、インデクサーが特定のサブグラフのために積極的にインデックスを作成し、クエリを提供していることをネットワークに示しています。アクティブなアロケーションは、サブグラフ上のシグナルと割り当てられたGRTの量に比例してインデックス作成報酬を発生させます。 - 2. **クローズド**:インデクサーは、最近の有効なインデックス証明(POI)を提出することで、与えられたサブグラフに発生したインデクサー報酬を請求することができます。これは、割り当てを終了することとして知られています。アロケーションを閉じるには、最低1エポック以上開いていなければなりません。最大割当期間は28エポックです。インデクサが28エポックを超えてアロケーションを開いたままにした場合、それはステイルアロケーションとして知られています。割り当てが**クローズド**状態にあるとき、フィッシャーはまだ、偽のデータを提供したインデクサーに異議を唱えるために紛争を開くことができます。 - - 3. **ファイナライズド**:紛争期間が終了し、インデクサーが請求できるクエリ料の払い戻しが可能となる段階。 - - 4. **請求済み**:割り当ての最終段階で、対象となるすべての報酬が配布され、そのクエリ料報酬が請求されている状態です。 + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**:サブグラフの構築、デプロイ、公開のための強力なDAPです。 -- **漁師**: ネットワーク参加者は、インデクサーのクエリ応答と POI に異議を唱えることがあります。これを漁師といいます。 Fisherman に有利に解決された紛争は、Fisherman への裁定とともに、Indexer に金銭的ペナルティをもたらし、ネットワーク内の Indexer によって実行されるインデックス作成およびクエリ作業の整合性を奨励します。ペナルティ (スラッシュ) は現在、インデクサーのセルフ ステークの 2.5% に設定されており、削減された GRT の 50% がフィッシャーマンに、残りの 50% がバーンされます。 +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **アービトレーター**:アービトレーターは、ガバナンスを介して設定されたネットワーク参加者です。アービトレーターの役割は、インデックス作成とクエリの論争の結果を決定することです。彼らの目標は、The Graph Networkの実用性と信頼性を最大化することにあります。 +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **スラッシング**: インデクサーは、不正確なインデックス証明(POI)を提供したり、不正確なデータを提供したりすると、賭けられたGRTをスラッシングされることがあります。スラッシングの割合はプロトコルパラメータで、現在はインデクサーのセルフステークの2.5%に設定されています。削減されたGRTの50%は、不正確なデータまたは不正確なPOIに異議を唱えたフィッシャーマンに支払われる。残りの50%はバーンされます。 +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **インデックス作成報酬**:インデクサーがサブグラフのインデックス作成に対して受け取る報酬です。インデックス作成報酬はGRTで分配されます。 @@ -66,7 +62,7 @@ title: 用語集 - **GRT**: Graphのワークユーティリティトークン。GRTは、ネットワーク参加者にネットワークへの貢献に対する経済的インセンティブを提供します。 -- **POIまたはProof of Indexing**:インデクサーの割り当てが終了し、与えられたサブグラフで発生したインデクサー報酬を請求したい場合、有効かつ最新のインデクシング証明(POI)を提出しなければなりません。フィッシャーマンは、インデクサーの提供したPOIに異議を唱えることができます。フィッシャーマン側に有利に解決された紛争は、そのインデクサーの削減をもたらします。 +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **グラフノード**:Graph Nodeは、サブグラフにインデックスを付け、その結果得られたデータをGraphQL APIを介してクエリに利用できるようにするコンポーネントです。そのため、インデクサースタックの中心であり、グラフノードの正しい動作は、成功するインデクサを実行するために重要です。 @@ -80,10 +76,10 @@ title: 用語集 - **クールダウン期間**:デリゲーションパラメータを変更したインデクサが再度デリゲーションできるようになるまでの残り時間。 -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. +- **サブグラフを Graph Network にアップグレードする**: サブグラフをホストされたサービスから Graph Network に移動するプロセス。 -- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **サブグラフの*更新***: サブグラフのマニフェスト、スキーマ、または更新を含む新しいサブグラフ バージョンをリリースするプロセス。マッピング。 -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ja/graphcast.mdx b/website/pages/ja/graphcast.mdx index 86a17d35da81..1b86fc7f6eaa 100644 --- a/website/pages/ja/graphcast.mdx +++ b/website/pages/ja/graphcast.mdx @@ -10,7 +10,7 @@ title: グラフキャスト Graphcast SDK (ソフトウェア開発キット) を使用すると、開発者はラジオを構築できます。これは、インデクサーが特定の目的を果たすために実行できる、ゴシップを利用したアプリケーションです。また、次のユースケースのために、いくつかのラジオを作成する (または、ラジオを作成したい他の開発者/チームにサポートを提供する) 予定です: -- サブグラフ データの整合性のリアルタイムのクロスチェック ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio))。 +- サブグラフ データの整合性のリアルタイム クロスチェック ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio))。 - サブグラフ、サブストリーム、および他のインデクサーからの Firehose データをワープ同期するためのオークションと調整の実施。 - サブグラフのリクエスト量、料金の量などを含む、アクティブなクエリ分析に関する自己報告。 - サブグラフのインデックス作成時間、ハンドラー ガスのコスト、発生したインデックス作成エラーなどを含む、インデックス作成分析に関する自己報告。 diff --git a/website/pages/ja/index.json b/website/pages/ja/index.json index 5924b3db2a59..962b9ba681ec 100644 --- a/website/pages/ja/index.json +++ b/website/pages/ja/index.json @@ -23,8 +23,8 @@ "description": "スタジオを使ってサブグラフを作成" }, "migrateFromHostedService": { - "title": "ホスティングサービスからの移行", - "description": "The Graph Networkへのサブグラフの移行" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "ホステッド サービス", - "description": "ホストサービスでのサブグラフの作成と探索" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "サポートされているネットワーク", - "description": "The Graphでは、The Graph NetworkおよびHosted Serviceにおいて、以下のネットワークをサポートしています。", - "graphNetworkAndHostedService": "グラフ ネットワークとホステッド サービス", - "hostedService": "ホステッド サービス", - "betaWarning": "ベータモードで" + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ja/managing/transferring-subgraph-ownership.mdx b/website/pages/ja/managing/transferring-subgraph-ownership.mdx index 2e53cfd2ce04..6bfbbeefb354 100644 --- a/website/pages/ja/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/ja/managing/transferring-subgraph-ownership.mdx @@ -10,7 +10,7 @@ NFTの所有者は、そのサブグラフをコントロールします。所 この機能により、開発ライフサイクルに柔軟性が増すだけでなく、マルチシグにコントロールを移動したり、コミュニティメンバーがDAOに代わって作成するなど、特定のユースケースがより便利になります。 -## Viewing your subgraph as an NFT +## サブグラフを NFT として表示する サブグラフをNFTとして表示するには、OpenSeaのようなNFTマーケットプレイスにアクセスします。 @@ -24,7 +24,7 @@ https://opensea.io/your-wallet-address https://rainbow.me/your-wallet-addres ``` -## Transferring ownership of a subgraph +## サブグラフの所有権の譲渡 サブグラフの所有権を移転するには、Subgraph Studioに組み込まれたUIを使用します。 diff --git a/website/pages/ja/mips-faqs.mdx b/website/pages/ja/mips-faqs.mdx index 0e21ef9e18fb..c82399db143f 100644 --- a/website/pages/ja/mips-faqs.mdx +++ b/website/pages/ja/mips-faqs.mdx @@ -4,15 +4,17 @@ title: MIPs FAQs ## イントロダクション -The Graphエコシステムに参加するのはエキサイティングな時です。Graph Day 2022(https://thegraph.com/graph-day/2022/) でYaniv Talが[ホストサービスの終了](https://thegraph.com/blog/sunsetting-hosted-service/)を発表しましたが、これはThe Graphエコシステムが何年も前から取り組んできた瞬間でした。 +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! -ホスティングサービスの終了と、そのすべての活動の分散型ネットワークへの移行をサポートするため、The Graph Foundationは[MIPs(Migration Infrastructure Providers)プログラム](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program)を発表しています。 +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. -MIPsプログラムは、Ethereumメインネットを超えるチェーンをインデックスするためのリソースを提供し、The Graphプロトコルが分散型ネットワークをマルチチェーン基盤層に拡大することを支援する、インデクサーのためのインセンティブ・プログラムです。 +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). + +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. -### 便利なリソース +### Useful Resources - [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) - [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) @@ -20,106 +22,106 @@ The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to r - [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) - [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -### 1. サブグラフが破綻していても、有効なPOI(Proof of Indexing)を生成することは可能ですか? +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -はい、確かにそうです。 +Yes, it is indeed. -文脈上、arbitration charter [詳細はこちら](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract)は、失敗したサブグラフのPOIを生成する方法を指定しています。 +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -コミュニティメンバーの[SunTzu](https://github.com/suntzu93)が、arbitration charterの方法論に準拠してこのプロセスを自動化するスクリプトを作成しました。レポ [here](https://github.com/suntzu93/get_valid_poi_subgraph)をチェックしてください。 +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). -### 2. MIPsプログラムは、どのチェーンに最初にインセンティブを与えるのでしょうか? +### 2. Which chain will the MIPs program incentivise first? -分散型ネットワークでサポートされる最初のチェーンは、Gnosis Chainです 以前はxDAIとして知られていたGnosis Chainは、EVMベースのチェーンです。Gnosis Chainは、ノードの実行のしやすさ、Indexerの準備、The Graphとの整合性、web3での採用などを考慮して、最初のものとして選択されました。 +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. -### 3. MIPsプログラムに新しいチェーンはどのように追加されるのでしょうか? +### 3. How will new chains be added to the MIPs program? -新しいチェーンは、インデクサーの準備、需要、コミュニティの感情に基づいて、MIPsプログラム全体を通して発表される予定です。チェーンはまずテストネットでサポートされ、その後、メインネットでそのチェーンをサポートするためのGIPが可決されます。MIPsプログラムに参加するインデクサは、サポートするチェーンに興味があるものを選び、チェーンごとに報酬を得ることができます。MIPs参加者は、そのパフォーマンス、ネットワークのニーズに応える能力、およびコミュニティのサポートに基づいて採点されます。 +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. -### 4. ネットワークが新しいチェーンに対応できるようになったら、どのように知ることができますか? +### 4. How will we know when the network is ready for a new chain? -グラフ財団は、QoSパフォーマンス指標、ネットワークパフォーマンス、コミュニティチャンネルを監視し、準備状況を最適に評価する予定です。優先順位は、ネットワークがマルチチェーンDappsのサブグラフを移行するためのパフォーマンスニーズを満たすことを保証することです。 +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. -### 5. チェーンごとの報酬はどのように分配されますか? +### 5. How are rewards divided per chain? -ノードを同期するための要件が​​チェーンごとに異なり、クエリの量と採用が異なることを考えると、チェーンごとの報酬は、すべてのフィードバックと学習が確実に取得されるように、そのチェーンのサイクルの最後に決定されます。ただし、チェーンがネットワークでサポートされると、いつでもインデクサーはクエリ料金とインデックス作成報酬を獲得することもできます。 +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. -### 6. MIPsプログラムのすべてのチェーンにインデックスを付ける必要がありますか、それとも1つのチェーンを選んでそのインデックスを付けることができますか? +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? -好きなチェーンにインデックスをつけることができます。MIPsプログラムの目標は、インデックス作成者に、彼らが望むチェーンにインデックスを作成し、彼らが興味を持つWeb3エコシステムをサポートするためのツールと知識を提供することです。しかし、すべてのチェーンには、テストネットからメインネットまでの段階があります。インデックスを作成するチェーンについては、必ずすべてのフェーズを完了させてください。フェーズについては、[MIPsの概念ページ](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059)を参照してください。 +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. -### 7. 報酬はいつ配布されるのですか? +### 7. When will rewards be distributed? -MIPsの報酬は、パフォーマンス指標が満たされ、移行されたサブグラフがそれらのIndexerによってサポートされると、チェーンごとに配布されます。チェーンごとの総報酬は、チェーンのサイクルの途中でお知らせします。 +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. -### 8. スコアリングの仕組み? +### 8. How does scoring work? -インデックサーは、プログラム期間中のスコアに基づいて、リーダーボードで報酬を競うことになります。プログラムのスコアリングは、以下の項目に基づいて行われます。 +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: **Subgraph Coverage** -- チェーンごとにサブグラフの最大サポートを提供していますか? +- Are you providing maximal support for subgraphs per chain? -- MIPでは、大規模なIndexerは、サポートするチェーンごとに50%以上のサブグラフをステークすることが期待されます。 +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. **Quality Of Service** -- インデックサーは、サービス品質(レイテンシー、新鮮なデータ、アップタイムなど)が良い状態でチェーンにサービスを提供していますか? +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? -- インデクサーは、ダップ開発者のニーズに応えられるようなサポートをしていますか? +- Is the Indexer supporting dapp developers being reactive to their needs? -インデクサーは効率的に割り当てられ、ネットワーク全体の健全性に寄与していますか? +Is Indexer allocating efficiently, contributing to the overall health of the network? **Community Support** -- インデクサーは、他のインデクサーの仲間と協力して、マルチチェーンに対応するためのセットアップを手伝っているのでしょうか? +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? -- インデクサーは、プログラムを通じてコア開発者にフィードバックしたり、フォーラムでインデクサーと情報を共有しているのでしょうか? +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? -### 9. Discordのロールはどのように割り振られるのでしょうか? +### 9. How will the Discord role be assigned? -数日中にモデレーターが役割を割り振る予定です。 +Moderators will assign the roles in the next few days. -### 10. テストネットでプログラムを開始し、その後メインネットに切り替えても問題ないでしょうか?私のノードを特定し、報酬を分配する際に考慮することは可能ですか? +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? -はい、実際にそうすることが期待されています。いくつかのフェーズがGörliにあり、1つはメインネットにあります。 +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. -### 11. 参加者がメインネットのデプロイを追加するのはどの時点からでしょうか? +### 11. At what point do you expect participants to add a mainnet deployment? -フェーズ3では、メインネットインデクサーを持つことが要求されます。これに関する詳細な情報は、[近日中にこのコンセプトページで共有されます](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -### 12. 報酬は権利確定の対象になりますか? +### 12. Will rewards be subject to vesting? -プログラム終了時に分配される割合は、権利確定に従うものとします。この詳細については、「インデクサー契約書」にて共有されます。 +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. -### 13. 複数のメンバーがいるチームの場合、チームメンバー全員にMIPs Discordのロールが与えられるのでしょうか? +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -はい。 +Yes -### 14. グラフキュレータープログラムでロックされたトークンを使って、MIPsテストネットに参加することは可能でしょうか? +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -はい。 +Yes -### 15. MIPsプログラム期間中、無効なPOIに異議を唱える期間はあるのでしょうか? +### 15. During the MIPs program, will there be a period to dispute invalid POI? -決定する予定です。このページで定期的に詳細をご確認いただくか、お急ぎの場合は info@thegraph.foundation までご連絡ください。 +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation -### 17. 2つの権利確定コントラクトを組み合わせることは可能ですか? +### 17. Can we combine two vesting contracts? -いいえ、選択肢としては、一方をもう一方にデリゲートするか、2つの別々のインデクサーを稼働させるかです。 +No. The options are: you can delegate one to the other one or run two separate indexers. -### 18. KYCに関する質問は? +### 18. KYC Questions? -メールを info@thegraph.foundation までお寄せください。 +Please email info@thegraph.foundation -### 19. Gnosisチェーンのインデックスの準備が出来ていないのですが、準備が出来たら他のチェーンからインデックスを開始しても良いですか? +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -はい。 +Yes -### 20. サーバーを稼働させる推奨地域はありますか? +### 20. Are there recommended regions to run the servers? -私たちは地域についての推奨はしていません。場所を選ぶ際には、暗号通貨の主要な市場がどこにあるのかを考えてみるとよいでしょう。 +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. -### 21. 「ハンドラーガスコスト」とは何ですか? +### 21. What is “handler gas cost”? -ハンドラーを実行するコストの決定論的な尺度です。名前から想像されるのとは逆に、ブロックチェーン上のガスコストとは関係ありません。 +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/ja/network/benefits.mdx b/website/pages/ja/network/benefits.mdx index 6cf3a654260a..f1dbdaa20937 100644 --- a/website/pages/ja/network/benefits.mdx +++ b/website/pages/ja/network/benefits.mdx @@ -14,7 +14,7 @@ Graph の分散型ネットワークは、堅牢なインデックス作成と - 60-98%の月額コスト削減 - インフラ構築費用0ドル - 優れたアップタイム -- 438 のインデクサーへのアクセス (およびカウント中) +- Access to hundreds of independent Indexers around the world - グローバルコミュニティによる24時間365日の技術サポート ## 以下に利点を説明します @@ -79,9 +79,9 @@ Graph の分散型ネットワークは、堅牢なインデックス作成と サブグラフ上のシグナルのキュレーションは、オプションで1回限り、ネットゼロのコストで可能です(例えば、$1,000のシグナルをサブグラフ上でキュレーションし、後で引き出すことができ、その過程でリターンを得る可能性があります)。 -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +ユーザーによっては、サブグラフを新しいバージョンに更新する必要がある場合があります。イーサリアムのガス料金のため、この記事の執筆時点では更新には約 50 ドルの費用がかかります。 -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +[Arbitrum](/arbitrum/arbitrum-faq) のガス料金はイーサリアム メインネットよりも大幅に低いことに注意してください。 ## セットアップ費用不要; 運用効率アップ。 @@ -89,8 +89,8 @@ Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower ## 信頼性と信頼性回復力 -グラフの分散型ネットワークにより、`graph-node` を自己ホストする場合には存在しない地理的冗長性へのアクセスがユーザーに提供されます。ネットワークをグローバルに保護する 168 のインデクサー (およびカウント) によって達成される 99.9% 以上のアップタイムにより、クエリは確実に処理されます。 +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. 結論: グラフ ネットワークは、ローカルで `graph-node` を実行するよりも安価で使いやすく、優れた結果を生み出します。 -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +今すぐ The Graph Network の使用を開始し、[サブグラフを The Graph の分散ネットワークにアップグレードする](/cookbook/upgrading-a-subgraph)方法を学びましょう。 diff --git a/website/pages/ja/network/curating.mdx b/website/pages/ja/network/curating.mdx index b17212fbecd2..a478f08f4057 100644 --- a/website/pages/ja/network/curating.mdx +++ b/website/pages/ja/network/curating.mdx @@ -4,7 +4,7 @@ title: キュレーティング キュレーターは、グラフの分散型経済にとって重要な存在です。 キューレーターは、web3 のエコシステムに関する知識を用いて、グラフネットワークがインデックスを付けるべきサブグラフを評価し、シグナルを送ります。 キュレーターはエクスプローラーを通じてネットワークのデータを見て、シグナルを出す判断をすることができます。グラフネットワークは、良質なサブグラフにシグナルを送ったキュレーターに、サブグラフが生み出すクエリフィーのシェアを与えます。 キュレーターには、早期にシグナルを送るという経済的なインセンティブが働きます。 キュレーターからのシグナルはインデクサーにとって非常に重要で、インデクサーはシグナルを受けたサブグラフからデータを処理したり、インデックスを作成したりすることができます。 -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +シグナリングの際、キュレーターはサブグラフの特定のバージョンでシグナリングするか、自動移行を使用してシグナリングするかを決定できます。自動移行を使用して通知する場合、キュレーターの共有は常に開発者によって公開された最新バージョンに移行されます。代わりに特定のバージョンでシグナルを送信することにした場合、共有は常にこの特定のバージョンに留まります。 キュレーションはリスクを伴うことを忘れないでください。 そして、信頼できるサブグラフでキュレーションを行うよう、十分に注意してください。 サブグラフの作成はパーミッションレスであり、人々はサブグラフを作成し、好きな名前をつけることができます。 キュレーションのリスクについての詳しいガイダンスは、 [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) をご覧ください。 @@ -60,7 +60,7 @@ The Graph の場合は、 [Bancor が実装しているボンディングカー ## リスク 1. The Graph では、クエリ市場は本質的に歴史が浅く、初期の市場ダイナミクスのために、あなたの%APY が予想より低くなるリスクがあります。 -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. +2. キュレーション料金 - キュレーターがサブグラフで GRT を通知すると、1% のキュレーション税がかかります。この手数料は燃やされ、残りは結合曲線の予備供給に預けられます。 3. キュレーターが株式をバーンして GRT を撤回すると、残りの株式の GRT 評価額が引き下げられます。場合によっては、キュレーターが**一度にすべて**共有を破棄することを決定する場合があることに注意してください。この状況は、dApp 開発者がサブグラフのバージョン管理/改善とクエリを停止した場合、またはサブグラフが失敗した場合によく発生する可能性があります。その結果、残りのキュレーターは当初の GRT の一部しか引き出せない可能性があります。リスク プロファイルが低いネットワーク ロールについては、[委任者](/network/delegating)を参照してください。 4. サブグラフはバグで失敗することがあります。 失敗したサブグラフは、クエリフィーが発生しません。 結果的に、開発者がバグを修正して新しいバージョンを展開するまで待たなければならなくなります。 - サブグラフの最新バージョンに加入している場合、シェアはその新バージョンに自動移行します。 これには 0.5%のキュレーション税がかかります。 @@ -79,13 +79,13 @@ The Graph の場合は、 [Bancor が実装しているボンディングカー - キュレーターはネットワークの理解を利用して、個々のサブグラフが将来的にどのように高いまたは低いクエリボリュームを生成するかを予測することができます。 - キュレーターは、グラフ・エクスプローラーで利用可能なメトリクスも理解する必要があります。 過去のクエリボリュームやサブグラフの開発者が誰であるかといったメトリクスは、サブグラフがシグナリングする価値があるかどうかを判断するのに役立ちます。 -### 3. What’s the cost of updating a subgraph? +### 3. サブグラフの更新にかかるコストはいくらですか? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +キュレーション シェアを新しいサブグラフ バージョンに移行すると、1% のキュレーション税が発生します。キュレーターは、サブグラフの最新バージョンを購読することを選択できます。キュレーション シェアが新しいバージョンに自動移行されると、キュレーターはキュレーション税の半分も支払うことになります。 0.5%。サブグラフの更新はガスを消費するオンチェーンアクションであるためです。 -### 4. How often can I update my subgraph? +### 4. サブグラフはどれくらいの頻度で更新できますか? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +サブグラフをあまり頻繁に更新しないことをお勧めします。詳細については、上記の質問を参照してください。 ### 5. キュレーションのシェアを売却することはできますか? diff --git a/website/pages/ja/network/developing.mdx b/website/pages/ja/network/developing.mdx index 3ab6b40cba68..c1f29bf6dbdd 100644 --- a/website/pages/ja/network/developing.mdx +++ b/website/pages/ja/network/developing.mdx @@ -8,25 +8,25 @@ title: 現像 ネットワークに配置されたサブグラフは、ライフサイクルが定義されています。 -### Build locally +### ローカルでビルド すべてのサブグラフ開発と同様に、ローカルでの開発とテストから始まります。開発者は、`graph-cli` と `graph-ts` を利用して、The Graph Network、ホステッド サービス、またはローカル グラフ ノードのいずれを構築する場合でも、同じローカル セットアップを使用して構築できます。サブグラフ。開発者は、[Matchstick](https://github.com/LimeChain/matchstick) などのツールを単体テストに使用して、サブグラフの堅牢性を向上させることをお勧めします。 > The Graph Network には、機能とネットワーク サポートに関して一定の制約があります。 [サポートされているネットワーク](/developing/supported-networks)のサブグラフのみがインデックス作成の報酬を獲得できます。また、IPFS からデータを取得するサブグラフも資格がありません。 -### Deploy to the Subgraph Studio +### Subgraph Studio にデプロイする サブグラフを定義すると、[Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/) に構築して展開できます。展開されたサブグラフにインデックスを付け、レート制限された開発とテストに使用できるようにするサンドボックス環境。これにより、開発者は、サブグラフでインデックス エラーが発生せず、期待どおりに動作することを確認する機会が得られます。 -### Publish to the Network +### ネットワークに公開 開発者がサブグラフに満足したら、それをグラフネットワークに公開することができます。これはオンチェーンアクションであり、インデックス作成者が発見できるようにサブグラフを登録します。公開されたサブグラフは対応するNFTを持ち、これは簡単に転送できます。公開されたサブグラフには関連するメタデータがあり、他のネットワーク参加者に有用なコンテキストと情報を提供します。 -### Signal to Encourage Indexing +### 索引作成を促すシグナル 公開されたサブグラフは、シグナルを追加しないとインデックス作成者に拾われにくいです。シグナルは、与えられたサブグラフに関連するロックされたGRTで、与えられたサブグラフがクエリー量を受け取ることをインデックス作成者に示し、またその処理に利用できるインデックス作成報酬に寄与します。サブグラフの開発者は、インデックス作成を促進するために、一般的にそのサブグラフにシグナルを追加する。サードパーティのキュレーターも、そのサブグラフがクエリ量を増加させると判断した場合、そのサブグラフにシグナルを追加することができます。 -### Querying & Application Development +### クエリ& アプリケーション開発 サブグラフがインデクサーによって処理され、クエリに使用できるようになると、開発者はアプリケーションでサブグラフの使用を開始できます。開発者は、サブグラフを処理したインデクサーにクエリを転送するゲートウェイを介してサブグラフにクエリを実行し、GRT でクエリ料金を支払います。 @@ -34,20 +34,20 @@ title: 現像 例えば、クエリのレスポンスが速いインデックスを優先したり、データが最新であるインデックスを優先したりすることができます。これらの制御は、Subgraph Studio で設定します。 -### Updating Subgraphs +### サブグラフの更新 サブグラフ開発者は、バグを修正したり、新しい機能を追加したりして、サブグラフを更新することができます。サブグラフの開発者は、サブグラフの新しいバージョンをSubgraph Studioにデプロイし、制限時間内に開発・テストすることができます。 -Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +サブグラフ開発者は、更新の準備が完了すると、トランザクションを開始してサブグラフを新しいバージョンに向けることができます。サブグラフを更新すると、すべてのシグナルが新しいバージョンに移行されます (シグナルを適用したユーザーが「自動移行」を選択したと仮定します)。これには移行税もかかります。このシグナルの移行により、インデクサーは新しいバージョンのサブグラフのインデックス作成を開始するよう促されるため、すぐにクエリに使用できるようになるはずです。 -### Deprecating Subgraphs +### サブグラフの廃止 ある時点で、開発者は公開されたサブグラフが不要になったと判断することがあります。そのとき、開発者はサブグラフを非推奨とし、キュレータにシグナライズされたGRTを返します -### Diverse Developer Roles +### 多様な開発者の役割 開発者の中には、ネットワーク上のサブグラフのライフサイクルに関与し、自分のサブグラフを公開し、クエリし、反復する者もいる。サブグラフの開発に重点を置き、他の人が構築できるオープンなAPIを構築する人もいます。また、アプリケーションに焦点を当て、他の人が配置したサブグラフをクエリすることもあります。 -### Developers and Network Economics +### 開発者とネットワーク経済学 -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. +開発者はネットワークにおける主要な経済的主体であり、インデックス作成を促進するために GRT をロックアップし、ネットワークの主要な価値交換であるサブグラフのクエリを非常に重要にしています。サブグラフ開発者は、サブグラフが更新されるたびに GRT も書き込みます。 diff --git a/website/pages/ja/network/explorer.mdx b/website/pages/ja/network/explorer.mdx index 4fdd65814731..b3a1c9c9a6aa 100644 --- a/website/pages/ja/network/explorer.mdx +++ b/website/pages/ja/network/explorer.mdx @@ -74,7 +74,7 @@ Subgraph Studio でサブグラフを公開すると、ナビゲーション バ デリゲーターは、グラフネットワークの安全性と分散性を維持するための重要な役割を担っています。 デリゲーターは、GRT トークンを 1 人または複数のインデクサーにデリゲート(=「ステーク」)することでネットワークに参加します。 デリゲーターがいなければ、インデクサーは大きな報酬や手数料を得ることができません。 そのため、インデクサーは獲得したインデクシング報酬やクエリフィーの一部をデリゲーターに提供することで、デリゲーターの獲得を目指します。 -一方、デリゲーターは、過去の実績、インデックス作成報酬率、クエリ手数料のカット率など、さまざまな変数に基づいてインデクサーを選択します。 また、コミュニティ内での評判も関係してきます。 選ばれたインデクサーとは、 [The Graph’s Discord](https://discord.gg/graphprotocol) や [The Graph Forum](https://forum.thegraph.com/)でつながることをお勧めします。 +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![エクスプローラーイメージ 7](/img/Delegation-Overview.png) @@ -87,7 +87,7 @@ Subgraph Studio でサブグラフを公開すると、ナビゲーション バ - 現在プロトコルに保持している GRT 総量 - 最後にデリゲートした日 -If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). +委任者になる方法について詳しく知りたい場合は、もう探す必要はありません。 [公式ドキュメント](/network/delegating)または[The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers)にアクセスするだけです。 ## ネットワーク diff --git a/website/pages/ja/network/indexing.mdx b/website/pages/ja/network/indexing.mdx index 761ae8354422..ca3453fcb6e9 100644 --- a/website/pages/ja/network/indexing.mdx +++ b/website/pages/ja/network/indexing.mdx @@ -2,7 +2,7 @@ title: インデキシング --- -インデクサは、グラフネットワークのノードオペレータであり、グラフトークン(GRT)を賭けて、インデックス作成や問い合わせ処理のサービスを提供します。 インデクサーは、そのサービスの対価として、クエリフィーやインデックス作成の報酬を得ることができます。 また、Cobbs-Douglas Rebate Function に基づいて、ネットワーク貢献者全員にその成果に応じて分配される Rebate Pool からも報酬を得ることもできます。 +インデクサーは、The Graph Network内のノードオペレーターであり、インデックス化とクエリ処理のサービスを提供するためにGraph Token(GRT)をステークします。インデクサーは、そのサービスに対するクエリ料金とインデックスリワードを獲得します。また、指数的なリベート関数に従ってリベートされるクエリ料金も獲得します。 プロトコルにステークされた GRT は解凍期間が設けられており、インデクサーが悪意を持ってアプリケーションに不正なデータを提供したり、不正なインデックスを作成した場合には、スラッシュされる可能性があります。 また、インデクサーはデリゲーターからステークによる委任を受けて、ネットワークに貢献することができます。 @@ -26,7 +26,7 @@ title: インデキシング インデキシング報酬は、年間 3%の発行量に設定されているプロトコル・インフレから得られます。 報酬は、それぞれのサブグラフにおけるすべてのキュレーション・シグナルの割合に基づいてサブグラフに分配され、そのサブグラフに割り当てられたステークに基づいてインデクサーに分配されます。 **特典を受けるためには、仲裁憲章で定められた基準を満たす有効な POI(Proof of Indexing)で割り当てを終了する必要があります。** -報酬を計算するために、コミュニティによって多数のツールが作成されています。[ コミュニティ ガイド コレクション](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c)に整理されたコレクションがあります。また、[Discord サーバー](https://discord.gg/graphprotocol)の #Delegators チャンネルと #Indexers チャンネルでツールの最新リストを見つけることができます。インデクサー ソフトウェア スタックと統合された[推奨割り当てオプティマイザー](https://github.com/graphprotocol/AllocationOpt.jl)。 +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. ### POI(proof of indexing)とは何ですか? @@ -81,17 +81,17 @@ Use Etherscan to call `getRewards()`: ### クエリフィーリベートとは何ですか、またいつ配布されますか? -クエリフィーは、割り当てが終了するたびにゲートウェイが徴収し、サブグラフのクエリフィーリベートプールに蓄積されます。 リベートプールは、インデクサーがネットワークのために獲得したクエリフィーの量にほぼ比例してステークを割り当てるように促すためのものです。 プール内のクエリフィーのうち、特定のインデクサーに割り当てられる部分はコブス・ダグラス生産関数を用いて計算されます。 インデクサーごとの分配額は、プールへの貢献度とサブグラフでのステークの割り当ての関数となります。 +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -割り当てが終了し、争議期間が経過すると、リベートをインデクサーが請求できるようになります。 請求されたクエリフィーのリベートは、クエリフィーカットとデリゲーションプールの比率に基づいて、インデクサーとそのデリゲーターに分配されます。 +割り当てが閉じられると、リベートはインデクサーによって請求されることができるようになります。請求されると、クエリ料金のリベートは、クエリ料金のカットと指数的なリベート関数に基づいて、インデクサーとその委任者に分配されます。 ### クエリフィーカットとインデキシングリワードカットとは? `クエリフィーカット` と`インデキシングリワードカット` の値は、インデクサーが クールダウンブロックと共に設定できるデリゲーションパラメータで、インデクサーとそのデリゲーター間の GRT の分配を制御するためのものです。 デリゲーションパラメータの設定方法については、[Staking in the Protocol](/network/indexing#stake-in-the-protocol)の最後のステップを参照してください。 -- **クエリフィーカット** - サブグラフに蓄積されたクエリフィーリベートのうち、インデクサーに分配される割合です。 これが 95%に設定されていると、割り当てが要求されたときに、インデクサはクエリフィー・リベート・プールの 95%を受け取り、残りの 5%はデリゲータに渡されます。 +- **queryFeeCut** - クエリ料金のリベートの%を示します。これが95%に設定されている場合、インデクサーは割り当てが閉じられた際に獲得するクエリ料金の95%を受け取り、残りの5%は委任者に支払われます。 -- **インデキシング・リワードカット** - サブグラフに蓄積されたインデキシング・リワードのうち、インデクサーに分配される割合です。 これが 95%に設定されていると、割り当てが終了したときに、インデクサがインデキシング・リワードプールの 95%を受け取り、残りの 5%をデリゲータが分け合うことになります。 +- **indexingRewardCut** - インデックスリワードの%を示します。これが95%に設定されている場合、インデクサーは割り当てが閉じられた際に獲得するインデックスリワードの95%を受け取り、残りの5%は委任者で分割されます。 ### インデクサーはどのサブグラフにインデックスを付けるかをどう見分けるのですか? @@ -374,7 +374,7 @@ docker-compose up #### はじめに -インデクサーエージェントとインデクサーサービスは、グラフノードインフラストラクチャーと同居している必要があります。 ここでは、NPM パッケージやソースを使ってベアメタル上で実行する方法と、Google Cloud Kubernetes Engine 上で kubernetes や docker を使って実行する方法を説明します。 これらの設定例があなたのインフラに適用できない場合は、コミュニティガイドを参照するか、[Discord](https://discord.gg/graphprotocol)でお問い合わせください。 インデクサーコンポーネントを起動する前に、[プロトコルのステーク](/indexing#stake-in-the-protocol) を忘れないでください。 +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! #### NPM パッケージから @@ -661,21 +661,21 @@ ActionType { ソースからの使用例: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` 割り当て管理でサポートされるアクションタイプは、入力要件が異なることに注意してください。 @@ -797,8 +797,4 @@ setDelegationParameters(950000, 600000, 500) - **Closed** - インデクサーは、1 エポックが経過した時点で自由に割り当てをクローズすることができます([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) また、インデクサエージェントは、**maxAllocationEpochs**(現在は 28 日)が経過した時点で自動的に割り当てをクローズします。 割り当てが有効な POI(Proof of Indexing)とともにクローズされると、そのインデクサー報酬がインデクサーとそのデリゲーターに分配されます(詳細は下記の「報酬の分配方法」を参照してください) -- **Finalized** - 割り当てがクローズすると、争議期間が設けられます。 その後、割り当てが**finalized**したとみなされ、クエリフィーのリベートを請求することができます(claim()) インデクサーエージェントは、ネットワークを監視して**finalized** した割り当てを検出し、設定可能な(オプションの)しきい値 **—-allocation-claim-threshold**を超えていれば、それを請求できます。 - -- **請求** - アロケーションの最終状態で、アクティブなアロケーションとしての期間が終了し、全ての適格な報酬が配布され、クエリ料の払い戻しが請求されます。 - インデクサーは、チェーン上に配置を作成する前に、チェーンヘッドにサブグラフの配置を同期させるために、オフチェーン同期機能を利用することを推奨します。この機能は、同期に28エポック以上かかる可能性があるサブグラフや、不定期に失敗する可能性があるサブグラフに特に有効です。 diff --git a/website/pages/ja/new-chain-integration.mdx b/website/pages/ja/new-chain-integration.mdx index c5934efa6f87..421fb74e4696 100644 --- a/website/pages/ja/new-chain-integration.mdx +++ b/website/pages/ja/new-chain-integration.mdx @@ -1,39 +1,39 @@ --- -title: Integrating New Networks +title: 新しいネットワークの統合 --- -Graph Node can currently index data from the following chain types: +Graph Nodeは現在、以下のチェーンタイプからデータをインデックス化できます: - Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) - NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) - Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) - Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) -If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. +もしご興味があるチェーンがあれば、統合はGraph Nodeの設定とテストの問題です。 -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +もしも異なるチェーンタイプに興味がある場合は、新しいGraph Nodeを構築する必要があります。私たちの推奨するアプローチは、まず対象のチェーン用の新しいFirehoseを開発し、それをGraph Nodeと統合することです。詳細は以下に記載しています。 -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). +ブロックチェーンが EVM と同等であり、クライアント/ノードが標準の EVM JSON-RPC API を公開している場合、グラフ ノードは新しいチェーンのインデックスを作成できるはずです。 詳細については、「EVM JSON-RPC のテスト」(new-chain-integration#testing-an-evm-json-rpc) を参照してください。 **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. -## Difference between EVM JSON-RPC & Firehose +## EVM JSON-RPC と Firehose の違い -While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. +これらの2つの方法は、サブグラフに適していますが、[Substreams](substreams/), を使用して開発者がビルドする場合、常にFirehoseが必要です。これには、[Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/) のようなサブストリームを活用したサブグラフの構築が含まれます。さらに、FirehoseはJSON-RPCと比較して、改善されたインデックス化速度を提供します。 -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +新しいEVMチェーンの統合者は、サブストリームの利点とその大規模な並列化されたインデックス化能力を考慮して、Firehoseベースのアプローチも検討することができます。両方をサポートすることで、開発者は新しいチェーンに対してサブストリームまたはサブグラフのどちらを構築するかを選択できるようになります。 -> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **注意**: EVM チェーンの Firehose ベースの統合では、インデクサーがチェーンのアーカイブ RPC ノードを実行してサブグラフに適切にインデックスを付ける必要があります。 これは、通常「eth_call」RPC メソッドによってアクセスできるスマート コントラクト状態を Firehose が提供できないためです。 (eth_calls は [開発者にとって良い習慣ではない](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/) であることを思い出してください) --- -## Testing an EVM JSON-RPC +## EVM JSON-RPC のテスト -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: +Graph NodeがEVMチェーンからデータを取り込むためには、RPCノードは以下のEVM JSON RPCメソッドを公開する必要があります。 - `eth_getLogs` - `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): @@ -43,32 +43,32 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex - `eth_getTransactionReceipt`, in a JSON-RPC batch request - _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ -### Graph Node Configuration +### Graph Node の設定 -**Start by preparing your local environment** +**ローカル環境を準備することから始めます** 1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ +2. [この行](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) を変更して、新しいネットワーク名と EVM JSON RPC 準拠の URL を含めます。 + > 環境変数名自体は変更しないでください。ネットワーク名が異なる場合でも、「ethereum」という名前のままである必要があります。 +3. IPFSノードを実行するか、The Graphが使用するものを使用してください: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**サブグラフをローカルにデプロイして統合をテストします。** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +2. 簡単なサブグラフの例を作成します。 いくつかのオプションを以下に示します。 + 1. 事前にパックされた [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) スマート コントラクトとサブグラフは良い出発点です。 + 2. 既存のスマート コントラクトまたは Solidity 開発環境からローカル サブグラフをブートストラップする [グラフ プラグインで Hardhat を使用](https://github.com/graphprotocol/hardhat-graph) +3. [`dataSources.network`](http://dataSources.network) を以前にグラフ ノードに渡した同じ名前に変更して、結果の `subgraph.yaml` を調整します。 +4. グラフ ノードでサブグラフを作成します: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. サブグラフをGraph Nodeに公開するには、次のコマンドを使用します:graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. +Graph Nodeはエラーがない場合、デプロイされたサブグラフを同期するはずです。同期が完了するのを待ってから、ログに表示されたAPIエンドポイントに対していくつかのGraphQLクエリを送信してください。 --- -## Integrating a new Firehose-enabled chain +## 新しい Firehose 対応チェーンの統合 -Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: +新しいチェーンを統合することは、Firehoseアプローチを使用しても可能です。これは、非EVMチェーン向けの現在の最良のオプションであり、サブストリームサポートの要件でもあります。追加のドキュメントでは、Firehoseの動作方法、新しいチェーンへのFirehoseサポートの追加、およびGraph Nodeとの統合に焦点を当てています。統合者に推奨されるドキュメント: 1. [General docs on Firehose](firehose/) 2. [Adding Firehose support for a new chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) diff --git a/website/pages/ja/operating-graph-node.mdx b/website/pages/ja/operating-graph-node.mdx index 4885a39afc71..d491c6607c34 100644 --- a/website/pages/ja/operating-graph-node.mdx +++ b/website/pages/ja/operating-graph-node.mdx @@ -22,7 +22,7 @@ title: オペレーティンググラフノード 一部のサブグラフは完全なノードのみを必要とする場合がありますが、一部のサブグラフには追加の RPC 機能を必要とするインデックス機能が含まれる場合があります。具体的には、インデックス作成の一部として `eth_calls` を作成するサブグラフには、[EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) をサポートするアーカイブ ノードが必要になります。、および `callHandlers` を持つサブグラフ、または `call` フィルタを持つ `blockHandlers` には、`trace_filter` サポートが必要です ([トレース モジュールのドキュメントはこちら](https://openethereum.github.io/JSONRPC-trace-module))。 -**近日公開です。Network Firehoses** - Firehose は、順序付けられた、しかしフォークを意識したブロックのストリームを提供する gRPC サービスで、The Graph のコア開発者により、大規模で高性能なインデックス作成をより良くサポートするために開発されました。これは現在、インデクサーの要件ではありませんが、インデクサーは、ネットワークの完全サポートに先立って、この技術に慣れることが推奨されています。Firehose の詳細については、[こちら](https://firehose.streamingfast.io/)を参照してください。 +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFSノード @@ -114,7 +114,7 @@ indexers = [ "<.. list of all indexing nodes ..>" ] #### 複数のグラフノード -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestor), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +グラフ ノードのインデックス作成は水平方向にスケーリングでき、グラフ ノードの複数のインスタンスを実行してインデックス作成とクエリを異なるノードに分割します。これは、起動時に別の `node_id` で構成されたグラフ ノードを実行するだけで (たとえば、Docker Compose ファイルで)、それを `config.toml` ファイルで使用できます。 [専用クエリ ノード](#dedicated-query-nodes)、[ブロック インジェスター](#dedicated-block-ingestor)、[デプロイ ルール](#deployment-rules)を使用したノード間でのサブグラフの分割を指定します。 > なお、複数のGraph Nodeはすべて同じデータベースを使用するように設定することができ、Shardingによって水平方向に拡張することができます。 @@ -293,7 +293,7 @@ graphmanコマンドは公式コンテナに含まれており、グラフノー グラフノードは、プロバイダからのリフェッチを節約するために、ストア内の特定のデータをキャッシュします。ブロックは、`eth_calls`の結果と同様にキャッシュされます(後者は特定のブロックのものとしてキャッシュされます)。このキャッシュは、わずかに変更されたサブグラフの "再同期" 時にインデックス作成速度を劇的に向上させることができます。 -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +ただし、場合によっては、イーサリアム ノードが一定期間誤ったデータを提供した場合、それがキャッシュに入り、誤ったデータやサブグラフの失敗につながる可能性があります。この場合、インデクサーは `graphman` を使用して汚染されたキャッシュをクリアし、影響を受けたサブグラフを巻き戻して、(できれば) 正常なプロバイダーから新しいデータをフェッチします。 TX受信欠落イベントなど、ブロックキャッシュの不整合が疑われる場合。 @@ -306,7 +306,7 @@ TX受信欠落イベントなど、ブロックキャッシュの不整合が疑 サブグラフがインデックス化されると、インデクサはサブグラフの専用クエリエントポイントを介してクエリを提供することが期待できます。もしインデクサがかなりの量のクエリを提供することを望むなら、専用のクエリノードを推奨します。また、クエリ量が非常に多い場合、インデクサーはレプリカシャードを構成して、クエリがインデックス作成プロセスに影響を与えないようにしたいと思うかもしれません。 -However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. +ただし、専用のクエリ ノードとレプリカを使用しても、特定のクエリの実行に時間がかかる場合があり、場合によってはメモリ使用量が増加し、他のユーザーのクエリ時間に悪影響を及ぼします。 「銀の弾丸」は一つではなく、遅いクエリを防止、診断、対処するための様々なツールがあります。 diff --git a/website/pages/ja/publishing/publishing-a-subgraph.mdx b/website/pages/ja/publishing/publishing-a-subgraph.mdx index 92a0f48caf98..5d0bd2e459cb 100644 --- a/website/pages/ja/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ja/publishing/publishing-a-subgraph.mdx @@ -6,11 +6,11 @@ title: 分散型ネットワークへのサブグラフの公開 サブグラフを分散型ネットワークに公開すると、[キュレーター](/network/curating)がキュレーションを開始したり、[インデクサー](/network/indexing)がインデックスを作成したりできるようになります。 -分散型ネットワークにサブグラフを公開する方法については、[こちらのビデオ](https://youtu.be/HfDgC2oNnwo?t=580)をご覧ください。 + 対応ネットワークの一覧は[こちら](/developing/supported-networks)で確認できます。 -## Publishing a subgraph +## サブグラフの公開 サブグラフは、Subgraph Studio のダッシュボードから**Publish** ボタンをクリックすることで、直接分散型ネットワークに公開することができます。 サブグラフが公開されると、[Graph Explorer](https://thegraph.com/explorer/)で閲覧できるようになります。 @@ -20,7 +20,7 @@ title: 分散型ネットワークへのサブグラフの公開 - 既存のサブグラフの新バージョンを公開する場合は、上記と同じルールが適用されます。 -## Curating your subgraph +## サブグラフのキュレーション > 10,000GRTのサブグラフは、できるだけ早くインデックス化され、クエリに利用できるように、自分でキュレーションすることが推奨されます。 @@ -28,6 +28,6 @@ Subgraph Studioでは、同じトランザクションでGRTをサブグラフ ![キュレーションプール](/img/curate-own-subgraph-tx.png) -## Updating metadata for a published subgraph +## パブリッシュされたサブグラフのメタデータの更新 サブグラフが分散型ネットワークに公開されると、サブグラフの Subgraph Studio ダッシュボードで更新を行うことにより、いつでもメタデータを変更することができます。 変更を保存し、更新内容をネットワークに公開すると、グラフエクスプローラーに反映されます。 デプロイメントが変更されていないため、新しいバージョンは作成されません。 diff --git a/website/pages/ja/querying/graphql-api.mdx b/website/pages/ja/querying/graphql-api.mdx index b35754bf250a..bb65c50d2929 100644 --- a/website/pages/ja/querying/graphql-api.mdx +++ b/website/pages/ja/querying/graphql-api.mdx @@ -8,7 +8,7 @@ Graph Protocol で使用される GraphQL Query API について説明します サブグラフのスキーマには、`Entities`と呼ばれるタイプが定義されています。各`Entity`タイプには、トップレベルの`Query`タイプに`entity`と`entities`フィールドが生成されます。なお、The Graph を使用する際には、`graphql`の`query` の先頭にクエリを含める必要はありません。 -### Examples +### 例 スキーマで定義された 1 つの`Token`エンティティに対するクエリ: @@ -21,9 +21,9 @@ Graph Protocol で使用される GraphQL Query API について説明します } ``` -> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. +> **注:** 単一のエンティティを照会する場合、`id` フィールドは必須であり、文字列でなければなりません。 -Query all `Token` entities: +すべての `Token` エンティティをクエリします。 ```graphql { @@ -34,9 +34,9 @@ Query all `Token` entities: } ``` -### Sorting +### 並べ替え -When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. +コレクションをクエリする場合、`orderBy` パラメータを使用して特定の属性で並べ替えることができます。さらに、`orderDirection` を使用してソート方向を指定できます。昇順の場合は `asc`、降順の場合は `desc` です。 #### 例 @@ -49,11 +49,11 @@ When querying a collection, the `orderBy` parameter may be used to sort by a spe } ``` -#### Example for nested entity sorting +#### ネストされたエンティティの並べ替えの例 -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. +グラフ ノード [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) の時点で、エンティティを並べ替えることができますネストされたエンティティに基づいています。 -In the following example, we sort the tokens by the name of their owner: +次の例では、所有者の名前でトークンを並べ替えます。 ```graphql { @@ -66,19 +66,19 @@ In the following example, we sort the tokens by the name of their owner: } ``` -> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. +> 現在、`@entity` および `@derivedFrom` フィールドで、1 レベルの深い `String` または `ID` 型で並べ替えることができます。残念ながら、[1 レベルの深さのエンティティのインターフェイスによる並べ替え](https://github.com/graphprotocol/graph-node/pull/4058)、配列およびネストされたエンティティであるフィールドによる並べ替えは、まだサポートされていません。 -### Pagination +### ページネーション -When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. +コレクションをクエリする場合、`first` パラメータを使用して、コレクションの先頭から改ページすることができます。デフォルトのソート順は、作成時間順ではなく、英数字の昇順の ID 順であることに注意してください。 -Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +さらに、 `skip` パラメーターを使用してエンティティをスキップし、ページ分割することができます。例えば`first:100` は最初の 100 個のエンティティを示し、`first:100, skip:100` は次の 100 個のエンティティを示します。 -Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. +クエリは一般にパフォーマンスが低いため、非常に大きな `skip` 値を使用しないでください。多数のアイテムを取得するには、最後の例で示したように、属性に基づいてエンティティをページングする方がはるかに優れています。 -#### Example using `first` +#### `first` を使用した例 -Query the first 10 tokens: +最初の 10 個のトークンを照会します。 ```graphql { @@ -89,11 +89,11 @@ Query the first 10 tokens: } ``` -To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. +コレクションの途中にあるエンティティのグループをクエリするには、`skip` パラメータを `first` パラメータと組み合わせて使用​​して、最初から指定された数のエンティティをスキップできます。コレクションの。 -#### Example using `first` and `skip` +#### `first` と `skip` を使用した例 -Query 10 `Token` entities, offset by 10 places from the beginning of the collection: +コレクションの先頭から 10 桁ずれた 10 個の `Token` エンティティをクエリします。 ```graphql { @@ -104,9 +104,9 @@ Query 10 `Token` entities, offset by 10 places from the beginning of the collect } ``` -#### Example using `first` and `id_ge` +#### `first` と `id_ge` を使用した例 -If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: +クライアントが多数のエンティティを取得する必要がある場合は、属性に基づいてクエリを実行し、その属性でフィルター処理する方がはるかに効率的です。たとえば、クライアントは次のクエリを使用して多数のトークンを取得します。 ```graphql query manyTokens($lastID: String) { @@ -117,15 +117,15 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +初めて、`lastID = ""` でクエリを送信し、後続のリクエストでは `lastID` を最後の `id` 属性に設定します。前のリクエストのエンティティ。このアプローチは、`skip` 値を増やして使用するよりもはるかに優れたパフォーマンスを発揮します。 -### Filtering +### フィルタリング -You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. +クエリで `where` パラメータを使用して、さまざまなプロパティをフィルタリングできます。 `where` パラメータ内で複数の値をフィルタリングできます。 -#### Example using `where` +#### `where` を使用した例 -Query challenges with `failed` outcome: +`failed` 結果のクエリ チャレンジ: ```graphql { @@ -139,9 +139,9 @@ Query challenges with `failed` outcome: } ``` -You can use suffixes like `_gt`, `_lte` for value comparison: +値の比較には、`_gt`、`_lte` などのサフィックスを使用できます。 -#### Example for range filtering +#### 範囲フィルタリングの例 ```graphql { @@ -153,11 +153,11 @@ You can use suffixes like `_gt`, `_lte` for value comparison: } ``` -#### Example for block filtering +#### ブロックフィルタリングの例 -You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. +`_change_block(number_gte: Int)` でエンティティをフィルタリングすることもできます - これは、指定されたブロック内またはそれ以降に更新されたエンティティをフィルタリングします。 -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +これは、前回のポーリング以降など、変更されたエンティティのみをフェッチする場合に役立ちます。または、サブグラフでエンティティがどのように変化しているかを調査またはデバッグするのに役立ちます (ブロック フィルターと組み合わせると、特定のブロックで変更されたエンティティのみを分離できます)。 ```graphql { @@ -169,11 +169,11 @@ This can be useful if you are looking to fetch only entities which have changed, } ``` -#### Example for nested entity filtering +#### ネストされたエンティティ フィルタリングの例 -Filtering on the basis of nested entities is possible in the fields with the `_` suffix. +`_` サフィックスが付いたフィールドでは、ネストされたエンティティに基づくフィルタリングが可能です。 -This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. +これは、子レベルのエンティティが指定された条件を満たすエンティティのみをフェッチする場合に役立ちます。 ```graphql { @@ -187,13 +187,13 @@ This can be useful if you are looking to fetch only entities whose child-level e } ``` -#### Logical operators +#### 論理演算子 -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. +Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) の時点で、複数のグループをグループ化できます同じ `where` 引数で `and` または `or` 演算子を使用して複数の基準に基づいて結果をフィルタリングします。 ##### `AND` 演算子 -In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +次の例では、`outcome` `succeeded` および `number` が `100` 以上のチャレンジをフィルタリングしています。 ```graphql { @@ -207,7 +207,7 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> **シンタックス シュガー:** コンマで区切られた部分式を渡すことで `and` 演算子を削除することで、上記のクエリを簡素化できます。 > > ```graphql > { @@ -223,7 +223,7 @@ In the following example, we are filtering for challenges with `outcome` `succee ##### `OR` 演算子 -In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +次の例では、`outcome` `succeeded` または `number` が `100` 以上のチャレンジをフィルタリングしています。 ```graphql { @@ -237,11 +237,11 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. +> **注意**:クエリを構築する際には、`または`演算子の使用によるパフォーマンスへの影響を考慮することが重要です。`または`は検索結果を広げるための便利なツールとなり得ますが、重要なコストも伴います。`または`の主な問題の1つは、クエリの遅延を引き起こす可能性があることです。これは、`または`がデータベースに複数のインデックスをスキャンする必要があるため、時間のかかるプロセスとなるからです。これらの問題を避けるために、開発者は可能な限りまたはの代わりにかつ演算子を使用することが推奨されます。これにより、より正確なフィルタリングが可能となり、より高速で正確なクエリが実行できるでしょう。 -#### All Filters +#### すべてのフィルター -Full list of parameter suffixes: +パラメータのサフィックスの全リスト: ``` _ @@ -266,21 +266,21 @@ _not_ends_with _not_ends_with_nocase ``` -> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. +> 一部の接尾辞は、特定のタイプでのみサポートされていることに注意してください。たとえば、`Boolean` は `_not`、`_in`、および `_not_in` のみをサポートしますが、`_` はサポートしません。オブジェクト型とインターフェイス型でのみ使用できます。 -In addition, the following global filters are available as part of `where` argument: +さらに、次のグローバル フィルターを `where` 引数の一部として使用できます。 ```gr _change_block(number_gte: Int) ``` -### Time-travel queries +### タイムトラベル クエリ -You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. +デフォルトである最新のブロックだけでなく、過去の任意のブロックについてもエンティティの状態を照会できます。クエリが発生するブロックは、クエリのトップレベル フィールドに `block` 引数を含めることで、ブロック番号またはブロック ハッシュのいずれかで指定できます。 -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +そのようなクエリの結果は時間の経過とともに変化しません。つまり、特定の過去のブロックでクエリを実行しても、いつ実行されたとしても同じ結果が返されます。ただし、チェーンの先頭に非常に近いブロックでクエリを実行する場合を除いては、そのブロックがメインチェーン上にないことが判明し、チェーンが再構築される場合に結果が変わる可能性があります。ブロックが最終的とみなせるようになると、クエリの結果は変わらなくなります。 -Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +現在の実装には、これらの保証を破る可能性がある特定の制限がまだ存在することに注意してください。実装は常に特定のブロックハッシュがメインチェーン上に存在しないことを判断できるわけではなく、また、まだ最終的とみなせないブロックのブロックハッシュによるクエリの結果が、同時に実行されるブロックの再構築によって影響を受ける可能性があります。これらの制限は、ブロックが最終的であり、メインチェーン上に存在することが確認されている場合には、ブロックハッシュによるクエリの結果に影響を与えません。詳細は[この問題](https://github.com/graphprotocol/graph-node/issues/1405)で説明されています。 #### 例 @@ -296,7 +296,7 @@ Note that the current implementation is still subject to certain limitations tha } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. +このクエリは、ブロック番号 8,000,000 を処理した直後に存在していた Challenge エンティティとそれに関連する Application エンティティを返します。 #### 例 @@ -312,15 +312,15 @@ This query will return `Challenge` entities, and their associated `Application` } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. +このクエリは `Challenge` エンティティとそれに関連付けられた `Application` エンティティを返します。これは、指定されたハッシュでブロックを処理した直後に存在していたためです。 -### Fulltext Search Queries +### 全文検索クエリ -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. +フルテキスト検索クエリフィールドは、サブグラフスキーマに追加してカスタマイズできる、表現力豊かなテキスト検索 API を提供します。サブグラフにフルテキスト検索を追加するには、「[Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields)」を参照してください。 -Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. +全文検索クエリには、検索語を提供するための必須フィールド `text` が 1 つあります。この `text` 検索フィールドでは、いくつかの特別な全文演算子を使用できます。 -Fulltext search operators: +全文検索演算子: | シンボル | オペレーター | 説明書き | | --- | --- | --- | @@ -329,9 +329,9 @@ Fulltext search operators: | `<->` | `Follow by` | 2 つの単語の間の距離を指定します。 | | `:*` | `プレフィックス` | プレフィックス検索語を使って、プレフィックスが一致する単語を検索します(2 文字必要) | -#### Examples +#### 例 -Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. +`or` 演算子を使用すると、このクエリはフルテキスト フィールドに「anarchism」または「crumpet」のいずれかのバリエーションを持つブログ エンティティにフィルター処理されます。 ```graphql { @@ -344,7 +344,7 @@ Using the `or` operator, this query will filter to blog entities with variations } ``` -The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" +`follow by` 演算子は、フルテキスト ドキュメント内で特定の距離だけ離れた単語を指定します。次のクエリは、"decentralize" の後に "philosophy" が続くすべてのブログを返します。 ```graphql { @@ -357,7 +357,7 @@ The `follow by` operator specifies a words a specific distance apart in the full } ``` -Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". +全文演算子を組み合わせて、より複雑なフィルターを作成します。口実検索演算子を follow by このサンプル クエリと組み合わせて使用​​すると、"lou" で始まり、その後に "music" が続く単語を持つすべてのブログ エンティティが一致します。 ```graphql { @@ -370,27 +370,27 @@ Combine fulltext operators to make more complex filters. With a pretext search o } ``` -### Validation +### 認証 -Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. +グラフ ノードは、受信した GraphQL クエリの [仕様ベース](https://spec.graphql.org/October2021/#sec-Validation) の検証を実装します[graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules),これはに基づいています[graphql-js リファレンス実装](https://github.com/graphql/graphql-js/tree/main/src/validation).検証ルールに失敗したクエリは、標準エラーで失敗します - にアクセスしてください詳細については、[GraphQL 仕様](https://spec.graphql.org/October2021/#sec-Validation)をご覧ください。 ## スキーマ -The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +データ ソースのスキーマ、つまりクエリに使用できるエンティティ タイプ、値、および関係は、[GraphQL インターフェイス定義言語 (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System)。 -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. +GraphQL スキーマは通常、`クエリ`、`サブスクリプション`、および `ミューテーション` のルート タイプを定義します。グラフは `クエリ` のみをサポートします。サブグラフのルート `Query` タイプは、サブグラフ マニフェストに含まれる GraphQL スキーマから自動的に生成されます。 -> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> **注:** 開発者はアプリケーションから基盤となるブロックチェーンに対して直接トランザクションを発行することが期待されるため、API はミューテーションを公開しません。 -### Entities +### エンティティ -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +スキーマ内の `@entity` ディレクティブを持つすべての GraphQL タイプはエンティティとして扱われ、 `ID` フィールドが必要です。 -> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. +> **注:** 現在、スキーマ内のすべてのタイプに `@entity` ディレクティブが必要です。将来的には、`@entity` ディレクティブのない型を値オブジェクトとして扱いますが、これはまだサポートされていません。 -### Subgraph Metadata +### サブグラフ メタデータ -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +すべてのサブグラフには、サブグラフ メタデータへのアクセスを提供する、自動生成された `_Meta_` オブジェクトがあります。これは、次のように照会できます。 ```graphQL { @@ -406,14 +406,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +ブロックが提供されている場合、メタデータはそのブロックのものであり、そうでない場合は、最新のインデックス付きブロックが使用されます。提供される場合、ブロックはサブグラフの開始ブロックの後にあり、最後にインデックス付けされたブロック以下でなければなりません。 -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` は、`subgraph.yaml` ファイルの IPFS CID に対応する一意の ID です。 -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` は、最新のブロックに関する情報を提供します (`_meta` に渡されたブロック制約を考慮します): - hash: ブロックのハッシュ - number: ブロック番号 - timestamp: 可能であれば、ブロックのタイムスタンプ (これは現在、EVMネットワークのインデックスを作成するサブグラフでのみ利用可能) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` は、サブグラフが過去のブロックでインデックス作成エラーに遭遇したかどうかを識別するブール値です diff --git a/website/pages/ja/querying/querying-best-practices.mdx b/website/pages/ja/querying/querying-best-practices.mdx index 0c9e887135a1..5cbb9bba5ebd 100644 --- a/website/pages/ja/querying/querying-best-practices.mdx +++ b/website/pages/ja/querying/querying-best-practices.mdx @@ -67,18 +67,18 @@ query [operationName]([variableName]: [variableType]) { ### GraphQL APIへのクエリの送信 -GraphQL is a language and set of conventions that transport over HTTP. +GraphQLは、HTTPを介して転送される言語と一連の規約です。 -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +これは、標準の`fetch`(ネイティブであれば、`@whatwg-node/fetch`や`isomorphic-fetch`を介しても)を使用して、GraphQL APIにクエリを送信できることを意味します。 -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +ただし、「[アプリケーションからのクエリ](/querying/querying-from-an-application)」で述べたように、以下のような固有の機能をサポートする`graph-client`を使用することをおすすめします。 - クロスチェーンのサブグラフ処理:1回のクエリで複数のサブグラフからクエリを実行可能 - [自動ブロック追跡](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [自動ページング](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - 完全なタイプ付け結果 -Here's how to query The Graph with `graph-client`: +`graph-client` を使用してグラフをクエリする方法は次のとおりです。 ```tsx import { execute } from '../.graphclient' @@ -102,9 +102,9 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +その他の GraphQL クライアントの代替手段については、[「アプリケーションからのクエリ」](/querying/querying-from-an-application) で説明します。 -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +GraphQL クエリ構文の基本ルールを説明したので、今度は GraphQL クエリ記述のベスト プラクティスを見てみましょう。 --- @@ -112,7 +112,7 @@ Now that we covered the basic rules of GraphQL queries syntax, let's now look at ### 常に静的なクエリを記述 -A common (bad) practice is to dynamically build query strings as follows: +一般的な (悪い) プラクティスは、次のようにクエリ文字列を動的に構築することです。 ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +上記のスニペットは有効な GraphQL クエリを生成しますが、**多くの欠点**があります。 - クエリ全体を**理解するのが難しくなります**。 - 開発者は、**文字列補間を安全にサニタイズする責任がある**ということです。 - リクエストパラメータの一部として変数の値を送信しないでください。**サーバー側でのキャッシュの可能性を防止** - それは ** ツールがクエリを静的に分析するのを防ぐ** (例: Linter、またはタイプ生成ツール) です。 -For this reason, it is recommended to always write queries as static strings: +このため、クエリは常に静的文字列として記述することをお勧めします: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +そうすることで**多くのメリット**がもたらされます: - **読みやすく、メンテナンスしやすい**クエリ - GraphQLの**サーバーは、変数のサニタイズを処理します** - サーバーレベルで**変数がキャッシュできます**。 - **ツールでクエリを静的に分析できる**(これについては、次のセクションで詳しく説明します。) -**Note: How to include fields conditionally in static queries** +**注: 静的クエリに条件付きでフィールドを含める方法** -We might want to include the `owner` field only on a particular condition. +特定の条件でのみ `owner` フィールドを含めることができます。 -For this, we can leverage the `@include(if:...)` directive as follows: +このために、次のように `@include(if:...)` ディレクティブを利用できます: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,21 +191,21 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +注: 反対のディレクティブは `@skip(if: ...)` です。 ### パフォーマンスに関するヒント -**"Ask for what you want"** +**"欲しいものを聞いてください"** -GraphQL became famous for its "Ask for what you want" tagline. +GraphQL は、「欲しいものを聞いてください」というキャッチフレーズで有名になりました。 -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +このため、GraphQLでは、個々にリストすることなくすべての利用可能なフィールドを取得する方法はありません。 -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +GraphQL APIをクエリする際には、実際に使用するフィールドのみをクエリするように常に考えてください。 -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +過剰なデータ取得の一般的な原因は、エンティティのコレクションです。デフォルトでは、クエリはコレクション内のエンティティを100個取得しますが、通常、実際に使用される量(たとえば、ユーザーに表示される量)よりもはるかに多いです。そのため、クエリはほぼ常に`first`を明示的に設定し、実際に必要なだけのエンティティを取得するようにする必要があります。これは、クエリ内のトップレベルのコレクションだけでなく、さらにエンティティのネストされたコレクションにも当てはまります。 -For example, in the following query: +たとえば、次のクエリでは: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +応答には、100 個のトークンごとに 100 個のトランザクションが含まれる可能性があります。 -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +アプリケーションが 10 トランザクションのみを必要とする場合、クエリではトランザクション フィールドに `first: 10` を明示的に設定する必要があります。 -**Combining multiple queries** +**複数のクエリを組み合わせる** -Your application might require querying multiple types of data as follows: +アプリケーションでは、次のように複数の種類のデータをクエリする必要がある場合があります: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +この実装は完全に有効ですが、GraphQL API を使用した 2 つの往復が必要になります。 -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +幸いなことに、次のように同じ GraphQL リクエストで複数のクエリを送信することも有効です: ```graphql import { execute } from "your-favorite-graphql-client" @@ -277,13 +277,13 @@ query GetTokensandCounters { ` ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +このアプローチは、ネットワークに費やす時間を減少させる(APIへの往復を省略する)ため、**全体的なパフォーマンスを向上させます**。また、**より簡潔な実装**を提供します。 ### GraphQLフラグメントの活用 -A helpful feature to write GraphQL queries is GraphQL Fragment. +GraphQL クエリを作成するのに役立つ機能は、GraphQL Fragment です。 -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +次のクエリを見ると、いくつかのフィールドが複数のSelection-Sets(`{ ... }`)で繰り返されていることがわかります: ```graphql query { @@ -303,12 +303,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +このような繰り返しフィールド (`id`、`active`、`status`) は、多くの問題を引き起こします。 - より広範囲なクエリに対応するために読みにくくなります - クエリに基づいて TypeScript 型を生成するツールを使用する場合 (_前のセクションで詳しく説明します_)、`newDelegate` および `oldDelegate` は、2 つの異なるインライン インターフェイスになります。 -A refactored version of the query would be the following: +クエリのリファクタリングされたバージョンは次のようになります: ```graphql query { @@ -332,15 +332,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +GraphQLの`fragment`を使用すると、可読性が向上します(特に大規模な場合)し、さらにはより良いTypeScriptの型生成にも結びつきます。 -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +型生成ツールを使用すると、上記のクエリは適切な`DelegateItemFragment`型を生成します(_最後の「ツール」セクションを参照_)。 ### GraphQLフラグメントの注意点 -**Fragment base must be a type** +**フラグメントベースは型である必要があります** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +フラグメントは、適用できない型、つまり**フィールドを持たない型**に基づくことはできません。 ```graphql fragment MyFragment on BigInt { @@ -348,11 +348,11 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt` は**スカラー** (ネイティブの「プレーン」タイプ) であり、フラグメントのベースとして使用できません。 -**How to spread a Fragment** +**フラグメントを拡散する方法** -Fragments are defined on specific types and should be used accordingly in queries. +フラグメントは特定のタイプに定義されているため、クエリではそれに応じて使用する必要があります。 例: @@ -375,17 +375,17 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` と `oldDelegate` のタイプは `Transcoder` です。 -It is not possible to spread a fragment of type `Vote` here. +ここでタイプ `Vote` のフラグメントを拡散することはできません。 -**Define Fragment as an atomic business unit of data** +**フラグメントをデータのアトミックなビジネス単位として定義する** -GraphQL Fragment must be defined based on their usage. +GraphQL フラグメントは、その使用法に基づいて定義する必要があります。 -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +ほとんどのユースケースでは、1つのタイプに対して1つのフラグメントを定義すること(繰り返しフィールドの使用または型生成の場合)で十分です。 -Here is a rule of thumb for using Fragment: +Fragment を使用する場合の経験則は次のとおりです: - 同じ型のフィールドがクエリ内で繰り返される場合、それらをFragmentでグループ化します。 - 同じフィールドが繰り返される場合、複数のフラグメントを作成します。 @@ -415,31 +415,31 @@ fragment VoteWithPoll on Vote { ### GraphQL ウェブベースのエクスプローラ -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +クエリをアプリケーション内で実行して繰り返しテストするのは手間がかかる場合があります。そのため、クエリをアプリケーションに追加する前に、[The Graph Explorer](https://thegraph.com/explorer)を使用してクエリをテストすることを躊躇しないでください。The Graph Explorerは、クエリをテストするための事前に設定されたGraphQLプレイグラウンドを提供します。 -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +クエリをデバッグやテストするより柔軟な方法を探している場合、[Altair](https://altair.sirmuel.design/)や[GraphiQL](https://graphiql-online.com/graphiql)などの類似のWebベースのツールも利用できます。 ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +上記で述べたベストプラクティスと構文ルールに従うためには、以下のワークフローとIDEツールを使用することを強くお勧めします。 **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) を使用すると、手間をかけずに GraphQL のベスト プラクティスを常に把握できるようになります。 -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +[「operations-recommended」](https://github.com/dotansimha/graphql-eslint#available-configs) 構成をセットアップすると、次のような重要なルールが適用されます。 - `@graphql-eslint/fields-on-correct-type`: フィールドは適切なタイプで使用されているか? - `@graphql-eslint/no-unused variables`: 与えられた変数は未使用のままであるべきか? - ともっと -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +これにより、 プレイグラウンドでクエリをテストしたり、本番環境で実行したりせずに**エラーをキャッチできる** ようになります。 ### IDE plugins -**VSCode and GraphQL** +**VSCodeとGraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +The [GraphQL VSCode Extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is a great addition to your development workflow, allowing you to: - 構文の強調表示 - オートコンプリートの提案 @@ -447,15 +447,15 @@ The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemNa - snippets - フラグメントと入力タイプの定義に移動 -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +`graphql-eslint`を使用している場合、[ESLint VSCode拡張機能](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint)はエラーや警告を正しくコード内に表示するために必須です。 -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij および GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQLプラグイン](https://plugins.jetbrains.com/plugin/8097-graphql/)は、以下を提供することで、GraphQLを使用する際のエクスペリエンスを大幅に向上させます。 - 構文の強調表示 - オートコンプリートの提案 - スキーマに対する検証 - snippets -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +詳細は、この[WebStorm の記事](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/)で、プラグインの主な機能をすべて紹介しています。 diff --git a/website/pages/ja/querying/querying-from-an-application.mdx b/website/pages/ja/querying/querying-from-an-application.mdx index 797f5a4c4ef6..500df957a1b6 100644 --- a/website/pages/ja/querying/querying-from-an-application.mdx +++ b/website/pages/ja/querying/querying-from-an-application.mdx @@ -33,11 +33,11 @@ The Graphは、独自のGraphQLクライアント`graph-client`を提供し、 - [自動ページング](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - 完全なタイプ付け結果 -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +さらに、ApolloやURQLなどの人気のあるGraphQLクライアントと統合され、すべての環境(React、Angular、Node.js、React Native)と互換性があり、`graph-client`を使用することで、The Graphとの対話の最良のエクスペリエンスが得られます。 -Let's look at how to fetch data from a subgraph with `graphql-client`. +以下は、`graphql-client`を使用してサブグラフからデータを取得する方法を見てみましょう。 -To get started, make sure to install The Graph Client CLI in your project: +始める前に、プロジェクトにThe Graph Client CLIをインストールしてください。 ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +`.graphql` ファイルでクエリを定義します (または、`.js` または `.ts` ファイルにインラインで)。 ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +次に、設定ファイル(`.graphclientrc.yml`と呼ばれる)を作成し、The Graphが提供するGraphQLエンドポイントを指定します。例えば: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +以下のThe Graph Client CLIコマンドを実行すると、型付けされたJavaScriptコードが生成され、すぐに使用できる状態になります。 ```sh graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +最後に、生成された型付きのGraphQLドキュメントを使用するために、`.ts`ファイルを更新してください。 ```tsx import React, { useEffect } from 'react' @@ -134,17 +134,17 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ 重要なお知らせ** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client`は、Apolloクライアント、URQL、React Queryなどの他のGraphQLクライアントと完全に統合されています。公式リポジトリには[例があります](https://github.com/graphprotocol/graph-client/tree/main/examples)。 -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +ただし、別のクライアントを選択する場合は、**クロスチェーンサブグラフ処理や自動ページネーションといった、The Graphのクエリングの中核的な機能を利用できないことに注意してください。** ### Apollo クライアント -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[Apollo Client](https://www.apollographql.com/docs/)は、フロントエンドエコシステムで広く使われているGraphQLクライアントです。 -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +React、Angular、Vue、Ember、iOS、およびAndroid向けに利用可能なApollo Clientは、最も重いクライアントですが、GraphQLを基にした高度なUIを構築するための多くの機能を提供します。 - 高度なエラー処理 - ページネーション @@ -152,7 +152,7 @@ Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, altho - 楽観的な UI - ローカル状態管理 -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +Apollo Clientを使用してウェブプロジェクトでサブグラフからデータを取得する方法を見てみましょう。 First, install `@apollo/client` and `graphql`: @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +変数を使用するには、クエリに`variables`引数を渡すことができます: ```javascript const tokensQuery = ` @@ -226,16 +226,16 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +もう一つの選択肢は[URQL](https://formidable.com/open-source/urql/)で、Node.js、React/Preact、Vue、およびSvelteの環境で利用でき、より高度な機能が備わっています。 - 柔軟なキャッシュ システム - Extensible design(新しい機能の追加を容易にする) - Lightweight bundle(Apollo Clientの約5倍の軽さ) - ファイルアップロードとオフラインモードに対応 -Let's look at how to fetch data from a subgraph with URQL in a web project. +URQLを使用してウェブプロジェクトでサブグラフからデータを取得する方法を見てみましょう。 -First, install `urql` and `graphql`: +まず、`urql`と`graphql`をインストールします: ```sh npm install urql graphql diff --git a/website/pages/ja/querying/querying-the-graph.mdx b/website/pages/ja/querying/querying-the-graph.mdx index eba4ad158642..47bb4143483d 100644 --- a/website/pages/ja/querying/querying-the-graph.mdx +++ b/website/pages/ja/querying/querying-the-graph.mdx @@ -19,7 +19,7 @@ title: The Graphのクエリ } ``` -## Using The Graph Explorer +## グラフ エクスプローラーの使用 分散型グラフエクスプローラに公開されているサブグラフには、それぞれ固有のクエリ URL が設定されており、サブグラフの詳細ページに移動し、右上の「クエリ」ボタンをクリックすることで確認できます。 これは、サブグラフの詳細ページに移動し、右上の「クエリ」ボタンをクリックすると、サブグラフの固有のクエリ URL と、そのクエリの方法を示すサイドペインが表示されます。 diff --git a/website/pages/ja/querying/querying-the-hosted-service.mdx b/website/pages/ja/querying/querying-the-hosted-service.mdx index f782ad8fb4ba..dfa22ac5f424 100644 --- a/website/pages/ja/querying/querying-the-hosted-service.mdx +++ b/website/pages/ja/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: ホスティングサービスのクエリ --- -サブグラフがデプロイされた状態で、[Graph Explorer](https://thegraph.com/hosted-service/)にアクセスすると、[GraphiQL](https://github.com/graphql/graphiql)インターフェースが表示され、サブグラフにデプロイされた GraphQL API を探索して、クエリを発行したり、スキーマを表示したりすることができます。 +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. 以下に例を示しますが、サブグラフのエンティティへのクエリの方法については、[Query API](/querying/graphql-api)を参照してください。 @@ -19,9 +19,9 @@ title: ホスティングサービスのクエリ } ``` -## Using The Hosted Service +## Using the hosted service -Graph Explorer とその GraphQL playground は、ホステッドサービス上に展開されたサブグラフを探索したり、クエリするのに便利な機能です。 +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. 主な機能を以下にご紹介します: diff --git a/website/pages/ja/querying/querying-with-python.mdx b/website/pages/ja/querying/querying-with-python.mdx new file mode 100644 index 000000000000..9f6914483d5b --- /dev/null +++ b/website/pages/ja/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## はじめに + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ja/quick-start.mdx b/website/pages/ja/quick-start.mdx new file mode 100644 index 000000000000..208227db71b5 --- /dev/null +++ b/website/pages/ja/quick-start.mdx @@ -0,0 +1,167 @@ +--- +title: クイックスタート +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +このガイドは、次のことを前提として書かれています。 + +- 選択したネットワーク上のスマート コントラクト アドレス +- サブグラフをキュレートする GRT +- クリプトウォレット + +## 1. Subgraph Studio でサブグラフを作成する + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +接続したら、「サブグラフの作成」をクリックして開始できます。選択したネットワークを選択し、[続行] をクリックします。 + +## 2. Graph CLI をインストールする + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +ローカル マシンで、次のいずれかのコマンドを実行します。 + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. サブグラフの初期化 + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +サブグラフを初期化すると、CLI ツールは次の情報を要求します。 + +- プロトコル: サブグラフがデータのインデックスを作成するプロトコルを選択します +- サブグラフ スラッグ: サブグラフの名前を作成します。サブグラフ スラッグは、サブグラフの識別子です。 +- サブグラフを作成するディレクトリ: ローカル ディレクトリを選択します +- Ethereum ネットワーク (オプション): サブグラフがデータのインデックスを作成する EVM 互換ネットワークを指定する必要がある場合があります。 +- コントラクト アドレス: データを照会するスマート コントラクト アドレスを見つけます。 +- ABI: ABI が自動入力されない場合は、JSON ファイルとして手動で入力する必要があります +- 開始ブロック: サブグラフがブロックチェーン データをインデックス化する間、時間を節約するために開始ブロックを入力することをお勧めします。コントラクトが展開されたブロックを見つけることで、開始ブロックを見つけることができます。 +- 契約名: 契約の名前を入力します +- コントラクト イベントをエンティティとしてインデックス付けする: これを true に設定することをお勧めします。発行されたすべてのイベントのサブグラフにマッピングが自動的に追加されるためです。 +- 別の契約を追加 (オプション): 別の契約を追加できます + +次のコマンドを実行して、既存のコントラクトからサブグラフを初期化します。 + +```sh +graph init --studio +``` + +サブグラフを初期化する際に予想されることの例については、次のスクリーンショットを参照してください。 + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. サブグラフを書く + +前述のコマンドでは、サブグラフを作成するための出発点として使用できる scaffold サブグラフを作成します。 サブグラフに変更を加える際には、主に 3 つのファイルを使用します: + +- マニフェスト (subgraph.yaml) - マニフェストは、サブグラフがインデックスするデータソースを定義します。 +- スキーマ (schema.graphql) - GraphQL スキーマは、サブグラフから取得するデータを定義します. +- AssemblyScript Mappings (mapping.ts) - データソースからのデータを、スキーマで定義されたエンティティに変換するコードです。 + +サブグラフの書き方については、[サブグラフの作成](/developing/creating-a-subgraph) をご覧ください。 + +## 5. Subgraph Studio にデプロイする + +サブグラフが作成されたら、次のコマンドを実行します。 + +```sh +$ graph codegen +$ graph build +``` + +- サブグラフの認証とデプロイを行います。 デプロイキーは、Subgraph Studio の Subgraph ページにあります。 + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. サブグラフをテストする + +プレイグラウンド セクションでサンプル クエリを作成して、サブグラフをテストできます。 + +ログは、サブグラフにエラーがあるかどうかを示します。運用サブグラフのログは次のようになります。 + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. サブグラフをグラフの分散型ネットワークに公開する + +サブグラフが Subgraph Studio にデプロイされたら、それをテストして、本番環境に配置する準備ができたら、それを分散ネットワークに公開できます。 + +Subgraph Studio で、サブグラフをクリックします。サブグラフのページでは、右上の公開ボタンをクリックできます。 + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +サブグラフにクエリを実行する前に、インデクサーはサブグラフに対するクエリの提供を開始する必要があります。このプロセスを合理化するために、GRT を使用して独自のサブグラフをキュレートできます。 + +執筆時点では、独自のサブグラフを 10,000 GRT でキュレートして、できるだけ早くインデックスを作成してクエリに使用できるようにすることをお勧めします。 + +ガスのコストを節約するために、サブグラフを The Graph の分散型ネットワークに公開するときにこのボタンを選択すると、公開したのと同じトランザクションでサブグラフをキュレートできます。 + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. サブグラフをクエリする + +これで、GraphQL クエリをサブグラフのクエリ URL に送信することで、サブグラフにクエリを実行できます。これは、クエリ ボタンをクリックして見つけることができます。 + +API キーを持っていない場合は、開発とステージングに使用できる無料のレート制限された一時クエリ URL を介して、dapp からクエリを実行できます。 + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ja/release-notes/graphql-validations-migration-guide.mdx b/website/pages/ja/release-notes/graphql-validations-migration-guide.mdx index 95c613525383..a31e06102542 100644 --- a/website/pages/ja/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/ja/release-notes/graphql-validations-migration-guide.mdx @@ -1,62 +1,62 @@ --- -title: GraphQL Validations migration guide +title: GraphQL 検証移行ガイド --- -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). +まもなく「graph-node」は [GraphQL Validations 仕様](https://spec.graphql.org/June2018/#sec-Validation) を 100% カバーします。 -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. +以前のバージョンの「graph-node」は、すべての検証をサポートしておらず、より適切な応答を提供していました。そのため、あいまいな場合、「graph-node」は無効な GraphQL 操作コンポーネントを無視していました。 -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. +GraphQL Validations サポートは、今後の新機能と The Graph Network の大規模なパフォーマンスの柱です。 -It will also ensure determinism of query responses, a key requirement on The Graph Network. +また、The Graph Network の重要な要件であるクエリ応答の決定性も保証されます。 -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. +**GraphQL Validations を有効にすると、The Graph API に送信された既存のクエリの一部が壊れます**。 -To be compliant with those validations, please follow the migration guide. +これらの検証に準拠するには、移行ガイドに従ってください。 -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. +> ⚠️ 検証がロールアウトされる前にクエリを移行しないと、エラーが返され、フロントエンド/クライアントが壊れる可能性があります。 -## Migration guide +## 移行ガイド -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. +CLI 移行ツールを使用して、GraphQL 操作の問題を見つけて修正できます。または、GraphQL クライアントのエンドポイントを更新して、`https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` エンドポイントを使用することもできます。このエンドポイントに対してクエリをテストすると、クエリの問題を見つけるのに役立ちます。 -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. +> [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) または [GraphQL Code Generator](https://the-guild.dev) を使用している場合、すべてのサブグラフを移行する必要はありません。 /graphql/codegen)、クエリが有効であることを既に確認しています。 -## Migration CLI tool +## 移行 CLI ツール -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** +**GraphQL 操作エラーのほとんどは、事前にコードベースで見つけることができます。** -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. +このため、開発中または CI で GraphQL 操作を検証するためのスムーズなエクスペリエンスを提供します。 -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) は、特定のスキーマに対して GraphQL 操作を検証するのに役立つシンプルな CLI ツールです。 -### **Getting started** +### **入門** -You can run the tool as follows: +ツールは次のように実行できます。 ```bash npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql ``` -**Notes:** +**ノート:** -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). +- $GITHUB_USER、$SUBGRAPH_NAME を適切な値に設定または置き換えます。のように: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- 提供されているプレビュー スキーマ URL (https://api-next.thegraph.com/) は大幅にレート制限されており、すべてのユーザーが新しいバージョンに移行すると廃止されます。 **本番環境では使用しないでください。** +- 操作は、次の拡張子を持つファイルで識別されます [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` オプション)。 -### CLI output +### CLI 出力 -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: +`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI ツールは、GraphQL 操作エラーを次のように出力します。 -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) +![CLIからのエラー出力](https://i.imgur.com/x1cBdhq.png) -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). +エラーごとに、説明、ファイル パスと位置、および解決例へのリンクが表示されます (次のセクションを参照)。 -## Run your local queries against the preview schema +## プレビュー スキーマに対してローカル クエリを実行する -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. +検証がオンになっている「graph-node」バージョンを実行するエンドポイント「https://api-next.thegraph.com/」を提供します。 -You can try out queries by sending them to: +クエリを次の宛先に送信して試すことができます。 - `https://api-next.thegraph.com/subgraphs/id/` @@ -64,23 +64,23 @@ You can try out queries by sending them to: - `https://api-next.thegraph.com/subgraphs/name//` -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. +検証エラーがあるとフラグが立てられたクエリを処理するには、Altair や [GraphiQL](https://cloud.hasura.io/public/graphiql) などの好きな GraphQL クエリ ツールを使用して、クエリを試してみてください。これらのツールは、実行前であっても、UI でこれらのエラーをマークします。 -## How to solve issues +## 問題を解決する方法 -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. +以下に、既存の GraphQL 操作で発生する可能性があるすべての GraphQL 検証エラーを示します。 -### GraphQL variables, operations, fragments, or arguments must be unique +### GraphQL の変数、操作、フラグメント、または引数は一意である必要があります -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. +操作に GraphQL 変数、操作、フラグメント、および引数の一意のセットが含まれるようにするためのルールを適用しました。 -A GraphQL operation is only valid if it does not contain any ambiguity. +GraphQL 操作は、あいまいさが含まれていない場合にのみ有効です。 -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. +これを実現するには、GraphQL 操作の一部のコンポーネントが一意でなければならないことを確認する必要があります。 -Here's an example of a few invalid operations that violates these rules: +これらの規則に違反するいくつかの無効な操作の例を次に示します。 -**Duplicate Query name (#UniqueOperationNamesRule)** +**クエリ名が重複しています (#UniqueOperationNamesRule)** ```graphql # The following operation violated the UniqueOperationName @@ -95,7 +95,7 @@ query myData { } ``` -_Solution:_ +_解決:_ ```graphql query myData { @@ -108,7 +108,7 @@ query myData2 { } ``` -**Duplicate Fragment name (#UniqueFragmentNamesRule)** +**フラグメント名の重複 (#UniqueFragmentNamesRule)** ```graphql # The following operation violated the UniqueFragmentName @@ -127,7 +127,7 @@ fragment MyFields { } ``` -_Solution:_ +_解決:_ ```graphql query myData { @@ -145,7 +145,7 @@ fragment MyFieldsName { # assign a unique name to fragment } ``` -**Duplicate variable name (#UniqueVariableNamesRule)** +**重複した変数名 (#UniqueVariableNamesRule)** ```graphql # The following operation violates the UniqueVariables @@ -155,7 +155,7 @@ query myData($id: String, $id: Int) { } ``` -_Solution:_ +_解決:_ ```graphql query myData($id: String) { @@ -165,7 +165,7 @@ query myData($id: String) { } ``` -**Duplicate argument name (#UniqueArgument)** +**引数名が重複しています (#UniqueArgument)** ```graphql # The following operation violated the UniqueArguments @@ -176,7 +176,7 @@ query myData($id: ID!) { } ``` -_Solution:_ +_解決:_ ```graphql query myData($id: ID!) { @@ -186,9 +186,9 @@ query myData($id: ID!) { } ``` -**Duplicate anonymous query (#LoneAnonymousOperationRule)** +**重複した匿名クエリ (#LoneAnonymousOperationRule)** -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: +また、2 つの匿名操作を使用すると、応答構造の競合により、「LoneAnonymousOperation」ルールに違反します。 ```graphql # This will fail if executed together in @@ -202,7 +202,7 @@ query { } ``` -_Solution:_ +_解決:_ ```graphql query { @@ -211,7 +211,7 @@ query { } ``` -Or name the two queries: +または、2 つのクエリに名前を付けます。 ```graphql query FirstQuery { @@ -223,15 +223,15 @@ query SecondQuery { } ``` -### Overlapping Fields +### 重複するフィールド -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. +GraphQL 選択セットは、最終的な結果セットを正しく解決する場合にのみ有効と見なされます。 -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. +特定の選択セットまたはフィールドが、選択されたフィールドまたは使用された引数のいずれかによってあいまいさを生み出す場合、GraphQL サービスは操作の検証に失敗します。 -Here are a few examples of invalid operations that violate this rule: +この規則に違反する無効な操作の例をいくつか示します。 -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** +**競合するフィールド エイリアス (#OverlappingFieldsCanBeMergedRule)** ```graphql # Aliasing fields might cause conflicts, either with @@ -245,7 +245,7 @@ query { } ``` -_Solution:_ +_解決:_ ```graphql query { @@ -256,7 +256,7 @@ query { } ``` -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** +**引数を持つフィールドの競合 (#OverlappingFieldsCanBeMergedRule)** ```graphql # Different arguments might lead to different data, @@ -269,7 +269,7 @@ query { } ``` -_Solution:_ +_解決:_ ```graphql query { @@ -280,7 +280,7 @@ query { } ``` -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: +また、より複雑なユースケースでは、最終的に予想されるセットで競合を引き起こす可能性のある 2 つのフラグメントを使用して、この規則に違反する可能性があります。 ```graphql query { @@ -299,7 +299,7 @@ fragment B on Type { } ``` -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: +それに加えて、`@skip` や `@include` などのクライアント側の GraphQL ディレクティブは、あいまいさにつながる可能性があります。次に例を示します。 ```graphql fragment mergeSameFieldsWithSameDirectives on Dog { @@ -308,15 +308,15 @@ fragment mergeSameFieldsWithSameDirectives on Dog { } ``` -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) +[アルゴリズムの詳細については、こちらをご覧ください](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) -### Unused Variables or Fragments +### 未使用の変数またはフラグメント -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. +GraphQL 操作も、操作で定義されたすべてのコンポーネント (変数、フラグメント) が使用されている場合にのみ有効と見なされます。 -Here are a few examples for GraphQL operations that violates these rules: +これらのルールに違反する GraphQL 操作の例をいくつか示します: -**Unused variable** (#NoUnusedVariablesRule) +**未使用の変数** (#NoUnusedVariablesRule) ```graphql # Invalid, because $someVar is never used. @@ -325,7 +325,7 @@ query something($someVar: String) { } ``` -_Solution:_ +_解決:_ ```graphql query something { @@ -333,7 +333,7 @@ query something { } ``` -**Unused Fragment** (#NoUnusedFragmentsRule) +**未使用のフラグメント** (#NoUnusedFragmentsRule) ```graphql # Invalid, because fragment AllFields is never used. @@ -347,7 +347,7 @@ fragment AllFields { # unused :( } ``` -_Solution:_ +_解決:_ ```graphql # Invalid, because fragment AllFields is never used. @@ -358,14 +358,14 @@ query something { # remove the `AllFields` fragment ``` -### Invalid or missing Selection-Set (#ScalarLeafsRule) +### 無効または欠落している選択セット (#ScalarLeafsRule) -Also, a GraphQL field selection is only valid if the following is validated: +また、GraphQL フィールドの選択は、以下が検証された場合にのみ有効です: -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. +- オブジェクト フィールドには選択セットが指定されている必要があります。 +- エッジ フィールド (スカラー、列挙型) には、選択セットが指定されていてはなりません。 -Here are a few examples of violations of these rules with the following Schema: +次のスキーマでこれらの規則に違反する例をいくつか示します: ```graphql schema { @@ -384,7 +384,7 @@ schema { } ``` -**Invalid Selection-Set** +**無効な選択セット** ```graphql query { @@ -396,7 +396,7 @@ query { } ``` -_Solution:_ +_解決:_ ```graphql query { @@ -406,7 +406,7 @@ query { } ``` -**Missing Selection-Set** +**選択セットがありません** ```graphql query { @@ -417,7 +417,7 @@ query { } ``` -_Solution:_ +_解決:_ ```graphql query { @@ -430,11 +430,11 @@ query { } ``` -### Incorrect Arguments values (#VariablesInAllowedPositionRule) +### 引数の値が正しくない (#VariablesInAllowedPositionRule) -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. +ハードコーディングされた値を引数に渡す GraphQL 操作は、スキーマで定義された値に基づいて有効である必要があります。 -Here are a few examples of invalid operations that violate these rules: +これらの規則に違反する無効な操作の例をいくつか示します: ```graphql query purposes { @@ -457,22 +457,22 @@ query purposes($name: Int!) { } ``` -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) +### 不明な型、変数、フラグメント、またはディレクティブ (#UnknownX) -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. +不明なタイプ、変数、フラグメント、またはディレクティブが使用されている場合、GraphQL API はエラーを発生させます。 -Those unknown references must be fixed: +これらの不明な参照は修正する必要があります: -- rename if it was a typo -- otherwise, remove +- タイプミスだった場合の名前の変更 +- それ以外の場合は、削除します -### Fragment: invalid spread or definition +### フラグメント: 無効なスプレッドまたは定義 -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** +**無効なフラグメント スプレッド (#PossibleFragmentSpreadsRule)** -A Fragment cannot be spread on a non-applicable type. +Fragment は、適用できない型に展開できません。 -Example, we cannot apply a `Cat` fragment to the `Dog` type: +たとえば、`Cat` フラグメントを `Dog` タイプに適用することはできません。 ```graphql query { @@ -486,14 +486,14 @@ fragment CatSimple on Cat { } ``` -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** +**無効なフラグメント定義 (#FragmentsOnCompositeTypesRule)** -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. +すべての Fragment は、(`on ...` を使用して) 複合型、つまり、オブジェクト、インターフェイス、またはユニオンで定義する必要があります。 -The following examples are invalid, since defining fragments on scalars is invalid. +スカラーでのフラグメントの定義は無効であるため、次の例は無効です。 ```graphql -fragment fragOnScalar on Int { +ragment fragOnScalar on Int { # we cannot define a fragment upon a scalar (`Int`) something } @@ -506,13 +506,13 @@ fragment inlineFragOnScalar on Dog { } ``` -### Directives usage +### ディレクティブの使用 -**Directive cannot be used at this location (#KnownDirectivesRule)** +**ディレクティブはこの場所では使用できません (#KnownDirectivesRule)** -Only GraphQL directives (`@...`) supported by The Graph API can be used. +The Graph API でサポートされている GraphQL ディレクティブ (`@...`) のみを使用できます。 -Here is an example with The GraphQL supported directives: +以下は、GraphQL がサポートするディレクティブの例です: ```graphql query { @@ -523,13 +523,13 @@ query { } ``` -_Note: `@stream`, `@live`, `@defer` are not supported._ +\_注: `@stream`、`@live`、`@defer` はサポートされていません。 -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** +**ディレクティブは、この場所で 1 回だけ使用できます (#UniqueDirectivesPerLocationRule)** -The directives supported by The Graph can only be used once per location. +The Graph でサポートされているディレクティブは、場所ごとに 1 回だけ使用できます。 -The following is invalid (and redundant): +以下は無効です (そして冗長です): ```graphql query { diff --git a/website/pages/ja/substreams.mdx b/website/pages/ja/substreams.mdx index 97aa76b701ea..83139831b10c 100644 --- a/website/pages/ja/substreams.mdx +++ b/website/pages/ja/substreams.mdx @@ -2,8 +2,43 @@ title: サブストリーム --- -サブストリームは、The Graph プロトコルのコア開発者が開発した新技術で、インデックス付きのブロックチェーンデータの極めて高速な消費と処理を可能にするために構築されたものです。サブストリームは現在オープンベータ版で、複数のブロックチェーンでテストと開発が可能です。 +![Substreams Logo](/img/substreams-logo.png) -[substreams documentation](https://substreams.streamingfast.io/) で詳細を確認し、サブストリームの構築を開始してください。 +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### はじめに + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ja/sunrise.mdx b/website/pages/ja/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ja/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ja/tokenomics.mdx b/website/pages/ja/tokenomics.mdx index 214e60c950fd..384065acd920 100644 --- a/website/pages/ja/tokenomics.mdx +++ b/website/pages/ja/tokenomics.mdx @@ -11,13 +11,13 @@ description: The Graph Networkは、強力なトークノミクスでインセ B2B2C モデルに似ていますが、参加者の分散型ネットワークによって強化されています。ネットワーク参加者は協力して、GRT 報酬と引き換えにエンド ユーザーにデータを提供します。 GRT は、データのプロバイダーとコンシューマーを調整するワーク ユーティリティ トークンです。 GRT は、ネットワーク内のデータ プロバイダーとコンシューマーを調整するためのユーティリティとして機能し、プロトコル参加者がデータを効果的に整理するように動機付けます。 -The Graph を使用することで、ユーザーはブロックチェーンのデータに簡単にアクセスでき、必要な特定の情報に対してのみ料金を支払うことができます。グラフは、今日の web3 エコシステムの多くの[人気のあるアプリケーション](https://thegraph.com/explorer)で使用されています。 +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graphは、Googleがウェブをインデックスするのと同様に、ブロックチェーンデータをインデックスします。実際、あなたは気づかないうちにすでにThe Graphを利用しているかもしれません。サブグラフからデータを取得するダップのフロントエンドを見たことがあるなら、サブグラフからデータを照会したことになります。 グラフは、ブロックチェーンのデータをより身近なものにし、その交換のためのマーケットプレイスを実現するという重要な役割を担っています。 -## The Roles of Network Participants +## ネットワーク参加者の役割 主なネットワーク参加者は4つの種類があります: @@ -33,7 +33,7 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 ![トークノミクス図](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## 委任者 (受動的に GRT を獲得) インデクサーは、ネットワーク上のサブグラフでインデクサーのステークを増やす委任者によって GRT を委任されます。その見返りとして、委任者はすべてのクエリ料金とインデクサーからのインデックス作成報酬の一定の割合を獲得します。各インデクサーは、委任者に報いるカットを個別に設定し、委任者を引き付けるためにインデクサー間の競争を生み出します。ほとんどのインデクサーは、年間 9 ~ 12% のオファーを提供しています @@ -43,7 +43,7 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 これを読んでいる方は、[ネットワーク参加者ページ](https://thegraph.com/explorer/participants/indexers)にアクセスして、GRTを自分の選んだインデクサーに委任することで、今すぐデリゲーターになることができるのです。 -## Curators (Earn GRT) +## キュレーター (GRT を獲得) キュレーターは、高品質のサブグラフを特定し、それを「キュレーション」(GRTシグナルを送ること)することで、そのサブグラフから将来発生する全てのクエリ料の一定割合を保証するキュレーションシェアを獲得します。独立したネットワーク参加者であれば誰でもキュレーターになることができますが、通常、サブグラフの開発者は、自分のサブグラフがインデックスされることを保証したいため、自分のサブグラフの最初のキュレーターの一人になります。 @@ -51,17 +51,17 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 キュレーターは、新しいサブグラフをキュレーションするときに1%のキュレーション税を支払います。このキュレーション税はバーンされ、GRTの供給量を減少させます。 -## Developers +## 開発者 ディベロッパーは、ブロックチェーンのデータを取得するためにサブグラフを構築し、クエリを実行します。サブグラフはオープンソースであるため、ディベロッパーは既存のサブグラフに問い合わせを行い、ブロックチェーンのデータを自分のDappsにロードすることができます。ディベロッパーは、クエリを実行する際に、ネットワーク参加者に配布されるGRTで支払いを行う -### Creating a subgraph +### サブグラフの作成 ディベロッパーは[サブグラフ](/developing/creating-a-subgraph/)を作成し、ブロックチェーン上のデータをインデックス化することができます。サブグラフは、どのデータをコンシューマーに提供すべきかについて、インデクサーに指示するものです。 サブグラフの構築とテストが完了したら、開発者は[The Graphの分散型ネットワークでサブグラフを公開することができます](/publishing/publishing-a-subgraph/)。 -### Querying an existing subgraph +### 既存のサブグラフのクエリ サブグラフが The Graph の分散型ネットワークに[公開](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/)されると、誰でも API キーを作成し、 GRT を請求残高に追加し、サブグラフをクエリします。 @@ -69,13 +69,13 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 ネットワークに支払われるクエリーフィーの1%がバーンされます。 -## Indexers (Earn GRT) +## インデクサー (GRT を獲得) インデクサーは、The Graphのバックボーンです。彼らは、The Graphの分散型ネットワークを支える独立したハードウェアとソフトウェアを操作しています。インデクサーは、サブグラフからの指示に基づいてコンシューマーにデータを提供します。 インデクサーは、2つの方法でGRT報酬を獲得することができます: -1. クエリ料:サブグラフデータのクエリに対して、開発者またはユーザが支払うGRT。クエリ料はリベートプールに預けられ、インデクサに分配されます。 +1. クエリ料金:開発者やユーザーがサブグラフデータのクエリに支払うGRTのことです。クエリ料金は、指数的リベート関数に従って直接インデクサーに分配されます(GIPは[こちら](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)を参照)。 2. インデックス作成報酬:年間3%の発行額は、インデックスを作成しているサブグラフの数に応じて、インデクサーに分配されます。この報酬は、インデクサーのインセンティブとなり、時にはクエリ料が機能する前にサブグラフのインデックスを作成し、データを正確にインデックスしたことを証明するPOI(Proofs of Indexing)を蓄積して提出します。 @@ -87,7 +87,7 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 インデクサーが受け取る報酬の量は、最初のステーク、受け入れられた委任、サービスの質、その他多くの要因に基づいて変化する可能性があります。以下のグラフは、The Graphの分散型ネットワーク上でアクティブなインデクサーから公開されたデータです。 -### The Indexer stake & reward of allnodes-com.eth +### インデクサーステーク&allnodes-com.ethの報酬 ![インデキシングステークと報酬](/img/indexing-stake-and-income.png) @@ -95,7 +95,7 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 > なお、[Arbitrumの移行](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551)が完了すると改善され、ガス代はネットワークに参加するための負担が大幅に軽減されることになります。 -## Token Supply: Burning & Issuance +## トークン供給: 燃焼& 発行 最初のトークン供給量は100億GRTで、サブグラフにステークを割り当てるIndexerに報いるため、毎年3%の新規発行を目標としています。つまり、GRTトークンの総供給量は、ネットワークへの貢献に対してインデクサーに新しいトークンが発行されることにより、毎年3%ずつ増加することになります。 @@ -105,6 +105,6 @@ The Graphは、Googleがウェブをインデックスするのと同様に、 このような定期的に発生するバーニング活動に加え、GRTトークンには、インデクサーの悪意ある行動や無責任な行動にペナルティを与えるスラッシングの仕組みも用意されています。インデクサーがスラッシングされた場合、そのエポックのインデックス報酬の50%が焼却され(残りの半分は漁師に渡る)、自己ステークは2.5%スラッシングされ、その半分がバーンされます。これにより、インデクサーはネットワークの最善の利益のために行動し、そのセキュリティと安定性に貢献するという強いインセンティブを確保することができます。 -## Improving the Protocol +## プロトコルの改善 グラフネットワークは常に進化しており、プロトコルの経済的な設計の改善は、すべてのネットワーク参加者に最高の体験を提供するために常に行われています。グラフ評議会はプロトコルの変更を監督し、コミュニティメンバーも参加することが推奨されています。[グラフフォーラム](https://forum.thegraph.com/)で、プロトコルの改良に参加してください。 diff --git a/website/pages/ko/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ko/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..fa428219ae8e 100644 --- a/website/pages/ko/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ko/arbitrum/l2-transfer-tools-faq.mdx @@ -2,314 +2,410 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### L2 전송 도구란 무엇입니까? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. -## Subgraph Transfer +### 이더리움 메인넷에서 사용하는 지갑을 그대로 사용할 수 있나요? -## How do I transfer my subgraph? +당신이 사용하는 경우 [EOA](https://ethereum.org/ko/개발자/문서/계정/#계정-유형) 만약 당신의 이더리움 메인넷 지갑이 단순한 지갑이라면, 당신은 동일한 주소를 사용할 수 있습니다. 만약 당신의 이더리움 메인넷 지갑이 계약(예: 다중 서명 지갑)인 경우, 당신은 당신의 이체가 전송될 Arbitrum 지갑 주소: /arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2를 지정해야 합니다. 잘못된 주소로의 이체는 영구적인 손실을 초래할 수 있으므로 주소를 주의깊게 확인하십시오. 만약 당신이 L2에서 다중 서명 지갑을 사용하고 싶다면, 반드시 Arbitrum One에 다중 서명 계약을 배포하십시오. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### 만약 7일 안에 이체를 완료하지 못하면 어떻게 되나요? -3. Confirm subgraph transfer on Arbitrum\* +L2 전송 도구는 Arbitrum의 기본 메커니즘을 사용하여 L1에서 L2로 메시지를 보냅니다. 이 메커니즘은 "재시도 가능한 티켓"이라고 하며 Arbitrum GRT 브리지를 포함한 모든 네이티브 토큰 브리지를 사용하여 사용됩니다. 재시도 가능한 티켓에 대해 자세히 읽을 수 있습니다 [Arbitrum 문서](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +자산(하위 그래프, 스테이크, 위임 또는 큐레이션) 을 L2로 이전하면 L2에서 재시도 가능한 티켓을 생성하는 Arbitrum GRT 브리지를 통해 메시지가 전송됩니다. 전송 도구에는 거래에 일부 ETH 값이 포함되어 있으며, 이는 1) 티켓 생성 비용을 지불하고 2) L2에서 티켓을 실행하기 위해 가스 비용을 지불하는 데 사용됩니다. 그러나 티켓이 L2에서 실행될 준비가 될 때까지 가스 가격이 시간에 따라 달라질 수 있으므로 이 자동 실행 시도가 실패할 수 있습니다. 그런 일이 발생하면 Arbitrum 브릿지는 재시도 가능한 티켓을 최대 7일 동안 유지하며 누구나 티켓 "사용"을 재시도할 수 있습니다(Arbitrum에 브릿지된 일부 ETH가 있는 지갑이 필요함). -5. Update Query URL (recommended) +이것이 모든 전송 도구에서 '확인' 단계라고 부르는 것입니다. 자동 실행이 성공하는 경우가 가장 많기 때문에 대부분의 경우 자동으로 실행되지만 제대로 진행되었는지 다시 확인하는 것이 중요합니다. 성공하지 못하고 7일 이내에 성공적인 재시도가 없으면 Arbitrum 브릿지는 티켓을 폐기하며 귀하의 자산(하위 그래프, 지분, 위임 또는 큐레이션)은 손실되어 복구할 수 없습니다. Graph 코어 개발자는 이러한 상황을 감지하고 너무 늦기 전에 티켓을 교환하기 위해 모니터링 시스템을 갖추고 있지만 전송이 제 시간에 완료되도록 하는 것은 궁극적으로 귀하의 책임입니다. 거래를 확인하는 데 문제가 있는 경우 [이 양식을 사용하여 문의하세요](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) 핵심 개발자들이 도와드릴 것입니다. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## 하위 그래프 전송 + +### 내 서브그래프를 어떻게 이전하나요? + + + +동영상YouTube 삽입 + +1. 이더리움 메인넷에서 전송 시작 + +2. 확인을 위해 20분 정도 기다리세요 + +3. Arbitrum에서 하위 그래프 전송 확인\* + +4. Arbitrum에 하위 그래프 게시 완료 + +5. 쿼리 URL 업데이트(권장) \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### 어디에서 이전을 시작해야 합니까? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +[Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) 또는 하위 그래프 세부정보 페이지에서 전송을 시작할 수 있습니다. 하위 그래프 세부 정보 페이지에서 "하위 그래프 전송" 버튼을 클릭하여 전송을 시작하세요. -## How long do I need to wait until my subgraph is transferred +### 내 하위 그래프가 전송될 때까지 얼마나 기다려야 합니까? -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +환승 시간은 약 20분 정도 소요됩니다. Arbitrum 브리지는 브리지 전송을 자동으로 완료하기 위해 백그라운드에서 작동하고 있습니다. 경우에 따라 가스 비용이 급증할 수 있으며 거래를 다시 확인해야 합니다. -## Will my subgraph still be discoverable after I transfer it to L2? +### 내 하위 그래프를 L2로 전송한 후에도 계속 검색할 수 있나요? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +귀하의 하위 그래프는 해당 하위 그래프가 게시된 네트워크에서만 검색 가능합니다. 예를 들어, 귀하의 하위 그래프가 Arbitrum One에 있는 경우 Arbitrum One의 Explorer에서만 찾을 수 있으며 Ethereum에서는 찾을 수 없습니다. 올바른 네트워크에 있는지 확인하려면 페이지 상단의 네트워크 전환기에서 Arbitrum One을 선택했는지 확인하세요. 이전 후 L1 하위 그래프는 더 이상 사용되지 않는 것으로 표시됩니다. -## Does my subgraph need to be published to transfer it? +### 내 하위 그래프를 전송하려면 게시해야 합니까? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +하위 그래프 전송 도구를 활용하려면 하위 그래프가 이미 이더리움 메인넷에 게시되어 있어야 하며 하위 그래프를 소유한 지갑이 소유한 일부 큐레이션 신호가 있어야 합니다. 하위 그래프가 게시되지 않은 경우 Arbitrum One에 직접 게시하는 것이 좋습니다. 관련 가스 요금은 상당히 낮아집니다. 게시된 하위 그래프를 전송하고 싶지만 소유자 계정이 이에 대한 신호를 큐레이팅하지 않은 경우 해당 계정에서 소액(예: 1 GRT)을 신호로 보낼 수 있습니다. "자동 마이그레이션" 신호를 선택했는지 확인하세요. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Arbitrum으로 이전한 후 내 서브그래프의 이더리움 메인넷 버전은 어떻게 되나요? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +귀하의 하위 그래프를 Arbitrum으로 이전한 후에는 Ethereum 메인넷 버전이 더 이상 사용되지 않습니다. 48시간 이내에 쿼리 URL을 업데이트하는 것이 좋습니다. 그러나 타사 dapp 지원이 업데이트될 수 있도록 메인넷 URL이 작동하도록 유지하는 유예 기간이 있습니다. -## After I transfer, do I also need to re-publish on Arbitrum? +### 양도한 후에 Arbitrum에 다시 게시해야 합니까? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +20분의 이체 기간이 지나면 이체를 완료하려면 UI에서 거래를 통해 이체를 확인해야 하지만 이체 도구가 이를 안내합니다. L1 엔드포인트는 전송 기간과 그 이후의 유예 기간 동안 계속 지원됩니다. 편리할 때 엔드포인트를 업데이트하는 것이 좋습니다. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### L2에서 Ethereum Ethereum 메인넷과 게시 및 버전 관리가 동일합니까? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### 내 하위 그래프의 큐레이션이 내 하위 그래프와 함께 이동하나요? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +자동 마이그레이션 신호를 선택한 경우 자체 큐레이션의 100%가 하위 그래프와 함께 Arbitrum One으로 이동됩니다. 하위 그래프의 모든 큐레이션 신호는 전송 시 GRT로 변환되며, 큐레이션 신호에 해당하는 GRT는 L2 하위 그래프의 신호 생성에 사용됩니다. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +다른 큐레이터는 GRT 일부를 인출할지, 아니면 L2로 전송하여 동일한 하위 그래프의 신호를 생성할지 선택할 수 있습니다. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### 이전 후 구독을 이더리움 메인넷으로 다시 이동할 수 있나요? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +이전되면 이 하위 그래프의 Ethereum 메인넷 버전은 더 이상 사용되지 않습니다. 메인넷으로 다시 이동하려면 다시 메인넷에 재배포하고 게시해야 합니다. 그러나 인덱싱 보상은 결국 Arbitrum One에 전적으로 배포되므로 이더리움 메인넷으로 다시 이전하는 것은 권장되지 않습니다. -## Why do I need bridged ETH to complete my transfer? +### 전송을 완료하려면 브리지된 ETH가 필요한 이유는 무엇입니까? -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +Arbitrum One의 가스 요금은 브리지된 ETH(즉, Arbitrum One에 브리지된 ETH)를 사용하여 지불됩니다. 그러나 이더리움 메인넷에 비해 가스 비용은 상당히 낮습니다. -## Curation Signal +## 대표단 -## How do I transfer my curation? +### 위임을 어떻게 이전하나요? -To transfer your curation, you will need to complete the following steps: + -1. Initiate signal transfer on Ethereum mainnet +위임을 이전하려면 다음 단계를 완료해야 합니다. -2. Specify an L2 Curator address\* +1. 이더리움 메인넷에서 위임 이전 시작 +2. 확인을 위해 20분 정도 기다리세요 +3. Arbitrum에서 위임 이전 확인 -3. Wait 20 minutes for confirmation +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*If necessary - i.e. you are using a contract address. +### 이더리움 메인넷에서 공개 할당으로 전송을 시작하면 보상은 어떻게 되나요? -## How will I know if the subgraph I curated has moved to L2? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### 현재 위임한 인덱서가 Arbitrum One에 없으면 어떻게 됩니까? -## What if I do not wish to move my curation to L2? +L2 전송 도구는 귀하가 위임한 인덱서가 자신의 지분을 Arbitrum으로 전송한 경우에만 활성화됩니다. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### 위임자는 다른 인덱서에게 위임할 수 있나요? -## How do I know my curation successfully transferred? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +### L2에서 위임할 인덱서를 찾을 수 없으면 어떻게 되나요? -## Can I transfer my curation on more than one subgraph at a time? +L2 전송 도구는 이전에 위임한 인덱서를 자동으로 감지합니다. -There is no bulk transfer option at this time. +### 이전 인덱서 대신 새 인덱서나 여러 인덱서에 내 위임을 혼합하고 일치시키거나 '확산'할 수 있나요? -## Indexer Stake +L2 전송 도구는 항상 이전에 위임한 동일한 인덱서로 위임을 이동합니다. L2로 이동한 후에는 위임을 취소하고 해동 기간을 기다린 후 위임을 분할할지 결정할 수 있습니다. -## How do I transfer my stake to Arbitrum? +### 휴지 기간이 적용되나요, 아니면 L2 위임 전송 도구를 사용한 후 즉시 철회할 수 있나요? -To transfer your stake, you will need to complete the following steps: +Transfer Tool을 사용하면 즉시 L2로 이동할 수 있습니다. 위임을 취소하려면 해동 기간을 기다려야 합니다. 그러나 인덱서가 모든 지분을 L2로 이전한 경우 이더리움 메인넷에서 즉시 출금할 수 있습니다. -1. Initiate stake transfer on Ethereum mainnet +### 위임을 양도하지 않으면 보상에 부정적인 영향을 미칠 수 있나요? -2. Wait 20 minutes for confirmation +향후 모든 네트워크 참여는 Arbitrum One으로 이동할 것으로 예상됩니다. -3. Confirm stake transfer on Arbitrum +### 내 위임을 L2로 이전하는 데 시간이 얼마나 걸리나요? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +예! 베스팅 컨트랙트는 L2 가스 지불에 필요한 ETH를 전달할 수 없기 때문에 프로세스가 조금 다르므로 미리 입금해야 합니다. 귀하의 베스팅 계약이 완전히 베스팅되지 않은 경우 먼저 L2에서 상대방 베스팅 계약을 초기화해야 하며 위임을 이 L2 베스팅 계약으로만 전송할 수 있습니다. Explorer의 UI는 베스팅 잠금 지갑을 사용하여 Explorer에 연결한 경우 이 프로세스를 안내할 수 있습니다. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## How much time do I have to confirm my stake transfer to Arbitrum? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### 위임세가 있나요? -## What if I have open allocations? +아니요. L2에서 받은 토큰은 위임세를 부과하지 않고 지정된 위임자를 대신하여 지정된 인덱서에 위임됩니다. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +### Will my unrealized rewards be transferred when I transfer my delegation? -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## How long will it take to transfer my stake? +### Is moving delegations to L2 mandatory? Is there a deadline? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## Do I have to index on Arbitrum before I transfer my stake? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## Can Delegators move their delegation before I move my indexing stake? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### I don't see a button to transfer my delegation. Why is that? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## How do I transfer my delegation? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -To transfer your delegation, you will need to complete the following steps: +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -1. Initiate delegation transfer on Ethereum mainnet +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -2. Wait 20 minutes for confirmation +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -3. Confirm delegation transfer on Arbitrum +## 큐레이션 신호 -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### 내 큐레이션을 어떻게 이전하나요? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +큐레이션을 전송하려면 다음 단계를 완료해야 합니다. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +1. 이더리움 메인넷에서 신호 전송 시작 -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +2. L2 큐레이터 주소 지정\* -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +3. 확인을 위해 20분 정도 기다리세요 -## Do Delegators have the option to delegate to another Indexer? +\*필요한 경우 - 즉, 계약 주소를 사용하고 있습니다. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### 내가 큐레이트한 하위 그래프가 L2로 이동했는지 어떻게 알 수 있나요? -## What if I can't find the Indexer I'm delegating to on L2? +하위 세부정보 페이지를 보면 해당 하위 하위가 이전되었음을 알리는 배너가 표시됩니다. 메시지에 따라 큐레이션을 전송할 수 있습니다. 이동한 하위 그래프의 하위 그래프 세부정보 페이지에서도 이 정보를 찾을 수 있습니다. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### 큐레이션을 L2로 옮기고 싶지 않으면 어떻게 되나요? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +하위 그래프가 더 이상 사용되지 않으면 신호를 철회할 수 있는 옵션이 있습니다. 마찬가지로 하위 그래프가 L2로 이동한 경우 이더리움 메인넷에서 신호를 철회하거나 L2로 신호를 보낼 수 있습니다. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### 내 큐레이션이 성공적으로 전송되었는지 어떻게 알 수 있나요? -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +L2 전송 도구가 시작된 후 약 20분 후에 Explorer를 통해 신호 세부 정보에 액세스할 수 있습니다. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### 한 번에 두 개 이상의 하위 그래프에 대한 내 큐레이션을 전송할 수 있나요? -## Can my rewards be negatively impacted if I do not transfer my delegation? +현재는 대량 전송 옵션이 없습니다. -It is anticipated that all network participation will move to Arbitrum One in the future. +## 인덱서 스테이크 -## How long does it take to complete the transfer of my delegation to L2? +### 내 지분을 Arbitrum으로 어떻게 이전하나요? -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +> 고지 사항: 현재 인덱서에서 GRT의 일부를 언스테이킹 중인 경우 L2 전송 도구를 사용할 수 없습니다. + + + +지분을 양도하려면 다음 단계를 완료해야 합니다. + +1. 이더리움 메인넷에서 지분 이전 시작 + +2. 확인을 위해 20분 정도 기다리세요 + +3. Arbitrum에서 지분 이전 확인 + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### 내 지분이 모두 이전되나요? + +양도할 지분의 양을 선택할 수 있습니다. 모든 지분을 한 번에 이전하기로 선택한 경우 먼저 열려 있는 할당을 모두 종료해야 합니다. + +여러 거래를 통해 지분의 일부를 양도할 계획이라면 항상 동일한 수취인 주소를 지정해야 합니다. -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +참고: 이체 도구를 처음 사용할 때 L2의 최소 지분 요구 사항을 충족해야 합니다. 인덱서는 최소 100,000 GRT를 보내야 합니다(이 함수를 처음 호출할 때). L1에 지분 일부를 남겨 두는 경우 최소 100,000 GRT를 초과해야 하며 (대표단과 함께) 공개 할당량을 감당하기에 충분해야 합니다. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Arbitrum으로의 지분 이전을 확인하는 데 얼마나 시간이 걸리나요? -## Is there any delegation tax? +\*\*\* Arbitrum에서 지분 이전을 완료하려면 거래를 확인해야 합니다. 이 단계는 7일 이내에 완료되어야 합니다. 그렇지 않으면 지분이 손실될 수 있습니다. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### 공개 할당이 있으면 어떻게 되나요? -## Vesting Contract Transfer +지분을 전부 보내지 않는 경우, L2 전송 도구는 최소 100,000 GRT가 이더리움 메인넷에 남아 있고 남은 지분과 위임이 열려 있는 할당량을 감당하기에 충분한지 확인합니다. GRT 잔액이 최소 금액 + 공개 할당을 충족하지 못하는 경우 공개 할당을 종료해야 할 수도 있습니다. -## How do I transfer my vesting contract? +### 전송 도구를 사용하면 전송하기 전에 이더리움 메인넷에서 언스테이크를 해제하는 데 28일을 기다려야 합니까? -To transfer your vesting, you will need to complete the following steps: +아니요, 스테이크를 L2로 즉시 전송할 수 있습니다. 전송 도구를 사용하기 전에 스테이크를 해제하고 기다릴 필요가 없습니다. 28일 대기 기간은 이더리움 메인넷 또는 L2에서 스테이크를 지갑으로 다시 인출하려는 경우에만 적용됩니다. -1. Initiate the vesting transfer on Ethereum mainnet +### 지분을 이전하는 데 얼마나 걸리나요? -2. Wait 20 minutes for confirmation +L2 전송 도구가 지분 전송을 완료하는 데 약 20분이 소요됩니다. -3. Confirm vesting transfer on Arbitrum +### 지분을 양도하기 전에 Arbitrum에서 색인을 생성해야 합니까? -## How do I transfer my vesting contract if I am only partially vested? +인덱싱을 설정하기 전에 먼저 지분을 효과적으로 이전할 수 있지만, L2의 하위 그래프에 할당하고 이를 인덱싱하고 POI를 제시할 때까지는 L2에서 어떤 보상도 청구할 수 없습니다. -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### 내가 인덱싱 지분을 이동하기 전에 위임자가 자신의 위임을 이동할 수 있나요? -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +아니요, 위임자가 위임된 GRT를 Arbitrum으로 전송하려면 위임하는 인덱서가 L2에서 활성화되어 있어야 합니다. -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +### GRT 베스팅 계약/토큰 잠금 지갑을 사용하는 경우 지분을 양도할 수 있나요? -4. Withdraw any remaining ETH from the transfer tool contract +예! 베스팅 컨트랙트는 L2 가스 비용을 지불하는 데 필요한 ETH를 전달할 수 없기 때문에 프로세스가 조금 다릅니다. 따라서 미리 입금해야 합니다. 귀하의 베스팅 계약이 완전히 베스팅되지 않은 경우 먼저 L2에서 상대방 베스팅 계약을 초기화해야 하며 지분을 이 L2 베스팅 계약으로만 이전할 수 있습니다. Explorer의 UI는 베스팅 잠금 지갑을 사용하여 Explorer에 연결한 경우 이 프로세스를 안내할 수 있습니다. -## How do I transfer my vesting contract if I am fully vested? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -For those that are fully vested, the process is similar: +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -2. Set your L2 address with a call to the transfer tool contract +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +## 베스팅 계약 양도 -4. Withdraw any remaining ETH from the transfer tool contract +### 베스팅 계약을 어떻게 이전하나요? -## Can I transfer my vesting contract to Arbitrum? +베스팅을 이전하려면 다음 단계를 완료해야 합니다. -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +1. 이더리움 메인넷에서 베스팅 전송을 시작합니다. -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +2. 확인을 위해 20분 정도 기다리세요 -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +3. Arbitrum에서 베스팅 양도 확인 -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +### 부분적으로만 베스팅된 경우 베스팅 계약을 어떻게 이전합니까? -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +1. 이체 도구 계약에 일부 ETH를 입금하십시오. (UI는 합리적인 금액을 추정하는 데 도움이 될 수 있습니다.) -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +2. L2 베스팅 잠금을 초기화하기 위해 전송 도구 계약을 통해 일부 잠긴 GRT를 L2로 보냅니다. 그러면 L2 수취인 주소도 설정됩니다. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +3. L1Stake 계약의 "잠긴" 전송 도구 기능을 통해 스테이크/위임권을 L2로 보냅니다. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +4. 전송 도구 계약에서 남은 ETH를 인출하세요. -## Can I specify a different beneficiary for my vesting contract on L2? +### 완전히 베스팅된 경우 베스팅 계약을 어떻게 이전합니까? -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. + -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +완전히 베스팅된 경우 프로세스는 유사합니다. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +1. 이체 도구 계약에 일부 ETH를 입금하십시오. (UI는 합리적인 금액을 추정하는 데 도움이 될 수 있습니다.) -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +2. 전송 도구 계약을 호출하여 L2 주소를 설정하세요. -This allows you to transfer your stake or delegation to any L2 address. +3. L1 스테이킹 계약의 "잠긴" 전송 도구 기능을 통해 스테이크/위임금을 L2로 보냅니다. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +4. 전송 도구 계약에서 남은 ETH를 인출하세요. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### 베스팅 계약을 Arbitrum으로 이전할 수 있나요? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +베스팅 계약의 GRT 잔액을 L2의 베스팅 계약으로 이전할 수 있습니다. 이는 베스팅 계약에서 L2로 지분이나 위임을 이전하기 위한 전제 조건입니다. 베스팅 계약은 0이 아닌 금액의 GRT를 보유해야 합니다(필요한 경우 1 GRT와 같은 소액을 전송할 수 있음). -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +L1 베스팅 계약에서 L2로 GRT를 전송할 때 보낼 금액을 선택할 수 있으며 원하는 만큼 이 작업을 수행할 수 있습니다. L2 베스팅 계약은 GRT를 처음 전송할 때 초기화됩니다. -2. Revoke protocol access to the vesting contract (needed for the next step) +이체는 베스팅 계약 계정에 연결할 때 Explorer 프로필에 표시되는 이체 도구를 사용하여 수행됩니다. -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +계약이 완전히 확정된 베스팅 일정이 끝날 때까지 L2 베스팅 계약에서 GRT를 해제/철회할 수 없다는 점에 유의하세요. 그 전에 GRT를 해제해야 하는 경우 해당 목적에 사용할 수 있는 다른 전송 도구를 사용하여 GRT를 L1 베스팅 계약으로 다시 전송할 수 있습니다. -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +베스팅 계약 잔액을 L2로 이전하지 않았고 베스팅 계약이 완전히 베스팅된 경우, 베스팅 계약을 L2로 이전해서는 안 됩니다. 대신 전송 도구를 사용하여 L2 지갑 주소를 설정하고 L2의 일반 지갑으로 지분이나 위임을 직접 전송할 수 있습니다. -5. Wait 20 minutes for confirmation +### 나는 베스팅 계약을 사용해 메인넷에 스테이킹하고 있습니다. 내 지분을 Arbitrum으로 이전할 수 있나요? -6. Confirm the balance transfer on L2 +예, 하지만 계약이 아직 베스팅 중인 경우 L2 베스팅 계약이 소유하도록 지분을 양도할 수 있습니다. 먼저 Explorer의 베스팅 계약 이전 도구를 사용하여 일부 GRT 잔액을 이전하여 이 L2 계약을 초기화해야 합니다. 계약이 완전히 확정된 경우 지분을 L2의 어떤 주소로든 전송할 수 있지만 L2 가스 비용을 지불하려면 L2 전송 도구에 대해 미리 설정하고 일부 ETH를 입금해야 합니다. -\*If necessary - i.e. you are using a contract address. +### 나는 메인넷에 위임하기 위해 베스팅 계약을 사용하고 있습니다. 내 위임을 Arbitrum으로 이전할 수 있나요? + +예. 하지만 계약이 아직 베스팅 중인 경우 L2 베스팅 계약이 소유하도록 위임을 양도할 수 있습니다. 먼저 Explorer의 베스팅 계약 이전 도구를 사용하여 일부 GRT 잔액을 이전하여 이 L2 계약을 초기화해야 합니다. 계약이 완전히 확정된 경우 L2의 어떤 주소로든 위임을 전송할 수 있지만, L2 가스 비용을 지불하려면 L2 전송 도구에 대한 일부 ETH를 미리 설정하고 예치해야 합니다. + +### L2 베스팅 계약에 대해 다른 수혜자를 지정할 수 있나요? + +예. 처음 잔액을 이체하고 L2 베스팅 계약을 설정할 때 L2 수혜자를 지정할 수 있습니다. 이 수혜자가 Arbitrum One에서 거래를 수행할 수 있는 지갑인지 확인하세요. 즉, Arbitrum One에 배포된 EOA 또는 다중 서명이어야 합니다. + +귀하의 계약이 완전히 확정된 경우 L2에 가득 계약을 설정하지 않습니다. 대신 L2 지갑 주소를 설정하게 되며 이는 Arbitrum의 스테이크 또는 위임을 위한 수신 지갑이 됩니다. + +### 내 계약은 완전히 귀속되었습니다. 내 지분이나 위임을 L2 베스팅 계약이 아닌 다른 주소로 이전할 수 있나요? + +예. 베스팅 계약 잔액을 L2로 이전하지 않았고 베스팅 계약이 완전히 베스팅된 경우, 베스팅 계약을 L2로 이전해서는 안 됩니다. 대신 전송 도구를 사용하여 L2 지갑 주소를 설정하고 L2의 일반 지갑으로 지분이나 위임을 직접 전송할 수 있습니다. + +이를 통해 스테이크나 위임을 어떤 L2 주소로든 전송할 수 있습니다. + +### 내 베스팅 계약은 아직 베스팅 중입니다. 베스팅 계약 잔액을 L2로 어떻게 이전합니까? + +이 단계는 귀하의 계약이 아직 베스팅 중이거나 계약이 아직 베스팅 중일 때 이전에 이 프로세스를 사용한 경우에만 적용됩니다. + +베스팅 계약을 L2로 이전하려면 L2 베스팅 계약을 초기화하는 이전 도구를 사용하여 GRT 잔액을 L2로 보내십시오. + +1. 일부 ETH를 전송 도구 계약에 입금하십시오. (이는 L2 가스 비용을 지불하는 데 사용됩니다.) + +2. 베스팅 계약에 대한 프로토콜 액세스 취소(다음 단계에 필요) + +3. 베스팅 계약에 프로토콜 액세스 권한을 부여합니다(귀하의 계약이 전송 도구와 상호 작용할 수 있도록 허용합니다). + +4. L2 수취인 주소\*를 지정하고 이더리움 메인넷에서 잔액 이체를 시작하세요. + +5. 확인을 위해 20분 정도 기다리세요 + +6. L2 잔액 이체 확인 + +\*필요한 경우 - 즉, 계약 주소를 사용하고 있습니다. \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### 베스팅 계약을 다시 L1으로 이동할 수 있나요? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +귀하의 베스팅 계약은 아직 L1에 있으므로 그렇게 할 필요가 없습니다. 전송 도구를 사용하면 L1 베스팅 계약과 연결된 L2에서 새 계약을 생성하고 두 계약 간에 GRT를 주고받을 수 있습니다. -## Why do I need to move my vesting contract to begin with? +### 우선 베스팅 계약을 이동해야 하는 이유는 무엇입니까? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +이 계정이 L2에 대한 지분이나 위임을 소유할 수 있도록 L2 베스팅 계약을 설정해야 합니다. 그렇지 않으면 베스팅 계약을 "탈출"하지 않고 지분/위임권을 L2로 이전할 수 있는 방법이 없습니다. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### 부분적으로만 확정된 계약을 현금화하려고 하면 어떻게 됩니까? 이것이 가능한가? -This is not a possibility. You can move funds back to L1 and withdraw them there. +이것은 가능성이 없습니다. 자금을 다시 L1으로 옮기고 그곳에서 인출할 수 있습니다. -## What if I don't want to move my vesting contract to L2? +### 베스팅 계약을 L2로 옮기고 싶지 않으면 어떻게 하나요? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +L1에서는 계속 스테이킹/위임할 수 있습니다. 시간이 지남에 따라 Arbitrum에서 프로토콜이 확장됨에 따라 L2로 이동하여 보상을 활성화하는 것을 고려할 수 있습니다. 이러한 전송 도구는 프로토콜에서 지분을 보유하고 위임할 수 있는 계약을 베스팅하기 위한 것입니다. 계약이 스테이킹이나 위임을 허용하지 않거나 취소 가능한 경우에는 사용 가능한 전송 도구가 없습니다. 가능한 경우 L1에서 GRT를 인출할 수 있습니다. diff --git a/website/pages/ko/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ko/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/ko/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ko/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/ko/billing.mdx b/website/pages/ko/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/ko/billing.mdx +++ b/website/pages/ko/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/ko/chain-integration-overview.mdx b/website/pages/ko/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/ko/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ko/cookbook/arweave.mdx b/website/pages/ko/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/ko/cookbook/arweave.mdx +++ b/website/pages/ko/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ko/cookbook/cosmos.mdx b/website/pages/ko/cookbook/cosmos.mdx index ef21e4bc0855..dc7ec0ffae7f 100644 --- a/website/pages/ko/cookbook/cosmos.mdx +++ b/website/pages/ko/cookbook/cosmos.mdx @@ -198,7 +198,7 @@ $ graph build Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: -**Hosted Service** +**호스팅 서비스** ```bash graph create account/subgraph-name --product hosted-service diff --git a/website/pages/ko/cookbook/grafting.mdx b/website/pages/ko/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/ko/cookbook/grafting.mdx +++ b/website/pages/ko/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/ko/cookbook/near.mdx b/website/pages/ko/cookbook/near.mdx index 879e8e5c15aa..ef4e22867469 100644 --- a/website/pages/ko/cookbook/near.mdx +++ b/website/pages/ko/cookbook/near.mdx @@ -193,7 +193,7 @@ $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # u The node configuration will depend on where the subgraph is being deployed. -### Hosted Service +### 호스팅 서비스 ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/ko/cookbook/upgrading-a-subgraph.mdx b/website/pages/ko/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/ko/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ko/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ko/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/ko/deploying/hosted-service.mdx b/website/pages/ko/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/ko/deploying/hosted-service.mdx +++ b/website/pages/ko/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ko/deploying/subgraph-studio.mdx b/website/pages/ko/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/ko/deploying/subgraph-studio.mdx +++ b/website/pages/ko/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/ko/developing/creating-a-subgraph.mdx b/website/pages/ko/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/ko/developing/creating-a-subgraph.mdx +++ b/website/pages/ko/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/ko/developing/developer-faqs.mdx b/website/pages/ko/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/ko/developing/developer-faqs.mdx +++ b/website/pages/ko/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/ko/developing/graph-ts/api.mdx b/website/pages/ko/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/ko/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ko/developing/graph-ts/common-issues.mdx b/website/pages/ko/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/ko/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ko/developing/supported-networks.mdx b/website/pages/ko/developing/supported-networks.mdx index 58ce56345f7c..4719cd60efe4 100644 --- a/website/pages/ko/developing/supported-networks.mdx +++ b/website/pages/ko/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: 지원되는 네트워크들 --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/ko/firehose.mdx b/website/pages/ko/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/ko/firehose.mdx +++ b/website/pages/ko/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ko/glossary.mdx b/website/pages/ko/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/ko/glossary.mdx +++ b/website/pages/ko/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ko/graphcast.mdx b/website/pages/ko/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/ko/graphcast.mdx +++ b/website/pages/ko/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/ko/index.json b/website/pages/ko/index.json index 9e28e13d5001..cac80059433b 100644 --- a/website/pages/ko/index.json +++ b/website/pages/ko/index.json @@ -4,7 +4,7 @@ "shortcuts": { "aboutTheGraph": { "title": "About The Graph", - "description": "Learn more about The Graph" + "description": "The Graph에 대해 더 알아보기" }, "quickStart": { "title": "Quick Start", @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -32,16 +32,16 @@ "description": "Learn about The Graph’s network roles.", "roles": { "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "title": "개발자", + "description": "dapp내에서 새로운 subgraph를 만들거나 이미 존재하는 subgraph들을 사용합니다." }, "indexer": { "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "description": "데이터 인덱싱 혹은 쿼리 제공을 위해 노드를 운영합니다." }, "curator": { "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "description": "서브그래프들에 신호를 보냄으로써 데이터를 구성합니다." }, "delegator": { "title": "Delegator", @@ -62,16 +62,15 @@ "description": "Explore subgraphs and interact with the protocol" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "호스팅 서비스", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "지원되는 네트워크들", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ko/mips-faqs.mdx b/website/pages/ko/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/ko/mips-faqs.mdx +++ b/website/pages/ko/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/ko/network/benefits.mdx b/website/pages/ko/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/ko/network/benefits.mdx +++ b/website/pages/ko/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/ko/network/indexing.mdx b/website/pages/ko/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/ko/network/indexing.mdx +++ b/website/pages/ko/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ko/new-chain-integration.mdx b/website/pages/ko/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/ko/new-chain-integration.mdx +++ b/website/pages/ko/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ko/operating-graph-node.mdx b/website/pages/ko/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/ko/operating-graph-node.mdx +++ b/website/pages/ko/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/ko/publishing/publishing-a-subgraph.mdx b/website/pages/ko/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/ko/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ko/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ko/querying/querying-the-hosted-service.mdx b/website/pages/ko/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/ko/querying/querying-the-hosted-service.mdx +++ b/website/pages/ko/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/ko/querying/querying-with-python.mdx b/website/pages/ko/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/ko/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ko/quick-start.mdx b/website/pages/ko/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/ko/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ko/substreams.mdx b/website/pages/ko/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/ko/substreams.mdx +++ b/website/pages/ko/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ko/sunrise.mdx b/website/pages/ko/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ko/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ko/tokenomics.mdx b/website/pages/ko/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/ko/tokenomics.mdx +++ b/website/pages/ko/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/mr/arbitrum/arbitrum-faq.mdx b/website/pages/mr/arbitrum/arbitrum-faq.mdx index 35a903c670ee..839367111dc0 100644 --- a/website/pages/mr/arbitrum/arbitrum-faq.mdx +++ b/website/pages/mr/arbitrum/arbitrum-faq.mdx @@ -1,78 +1,78 @@ --- -title: आर्बिट्रम FAQ +title: Arbitrum FAQ --- -तुम्हाला आर्बिट्रम बिलिंग FAQ वर जायचे असल्यास [here](#billing-on-arbitrum-faqs) क्लिक करा. +Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. -## ग्राफ एक L2 सोल्यूशन का अंमलात आणत आहे? +## Why is The Graph implementing an L2 Solution? -L2 वर आलेख स्केलिंग करून, नेटवर्क सहभागी अपेक्षा करू शकतात: +By scaling The Graph on L2, network participants can expect: -- गॅस फीवर 26x पेक्षा जास्त बचत +- Upwards of 26x savings on gas fees -- जलद व्यवहार गती +- Faster transaction speed -- Ethereum कडून सुरक्षा वारशाने मिळाली +- Security inherited from Ethereum Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. -ग्राफ समुदायाने [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) चर्चेच्या निकालानंतर गेल्या वर्षी आर्बिट्रमसोबत पुढे जाण्याचा निर्णय घेतला. +The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. -## L2 वर ग्राफ वापरण्यासाठी मला काय करावे लागेल? +## What do I need to do to use The Graph on L2? -वापरकर्ते खालीलपैकी एक पद्धत वापरून त्यांचे GRT आणि ETH ब्रिज करतात: +Users bridge their GRT and ETH  using one of the following methods: -- [द ग्राफ ब्रिज ऑन आर्बिट्रम](https://bridge.arbitrum.io/?l2ChainId=42161) +- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -L2 वर आलेख वापरण्याचा फायदा घेण्यासाठी, साखळी दरम्यान टॉगल करण्यासाठी हा ड्रॉपडाउन स्विचर वापरा. +To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. -![आर्बिट्रम टॉगल करण्यासाठी ड्रॉपडाउन स्विचर](/img/arbitrum-screenshot-toggle.png) +![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) ## सबग्राफ डेव्हलपर, डेटा कंझ्युमर, इंडेक्सर, क्युरेटर किंवा डेलिगेटर म्हणून, मला आता काय करावे लागेल? -कोणतीही त्वरित कारवाई आवश्यक नाही, तथापि, नेटवर्क सहभागींना L2 च्या फायद्यांचा लाभ घेण्यासाठी आर्बिट्रममध्ये जाण्यास प्रोत्साहित केले जाते. +There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. -कोर डेव्हलपर टीम L2 ट्रान्स्फर टूल्स तयार करण्यासाठी काम करत आहेत ज्यामुळे डेलिगेशन, क्युरेशन आणि सबग्राफ आर्बिट्रममध्ये हलवणे लक्षणीय सोपे होईल. नेटवर्क सहभागी 2023 च्या उन्हाळ्यापर्यंत L2 हस्तांतरण साधने उपलब्ध होण्याची अपेक्षा करू शकतात. +Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. -10 एप्रिल 2023 पर्यंत, सर्व इंडेक्सिंग रिवॉर्ड्सपैकी 5% आर्बिट्रमवर टाकले जात आहेत. जसजसा नेटवर्क सहभाग वाढतो, आणि काउन्सिलने त्यास मान्यता दिली, तसतसे अनुक्रमणिका रिवॉर्ड्स हळूहळू इथरियममधून आर्बिट्रममध्ये स्थलांतरित होतील, अखेरीस संपूर्णपणे आर्बिट्रमकडे जातील. +10 एप्रिल 2023 पर्यंत, सर्व इंडेक्सिंग रिवॉर्ड्सपैकी 5% आर्बिट्रमवर टाकले जात आहेत. जसजसा नेटवर्क सहभाग वाढतो, आणि काउन्सिलने त्याला मान्यता दिली तसतसे, अनुक्रमणिका बक्षिसे हळूहळू इथरियममधून आर्बिट्रममध्ये बदलली जातील, अखेरीस संपूर्णपणे आर्बिट्रमकडे जातील. -## मी L2 वर नेटवर्कमध्ये सहभागी होऊ इच्छित असल्यास, मी काय करावे? +## If I would like to participate in the network on L2, what should I do? -कृपया L2 वर [नेटवर्कची चाचणी](https://testnet.thegraph.com/explorer) मदत करा आणि [Discord](https://discord.gg/graphprotocol) मधील तुमच्या अनुभवाबद्दल फीडबॅक कळवा. +Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). -## नेटवर्कचे L2 स्केलिंग करण्याशी संबंधित काही जोखीम आहेत का? +## Are there any risks associated with scaling the network to L2? -सर्व स्मार्ट करारांचे कसून [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -प्रत्येक गोष्टीची कसून चाचणी केली गेली आहे आणि सुरक्षित आणि अखंड संक्रमण सुनिश्चित करण्यासाठी एक आकस्मिक योजना तयार आहे. तपशील [here] \(https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-) आढळू शकतात सुरक्षा-विचार-20. +Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## इथरियमवरील विद्यमान सबग्राफ कार्य करत राहतील? +## Will existing subgraphs on Ethereum continue to work? होय, ग्राफ नेटवर्क कॉन्ट्रॅक्ट्स नंतरच्या तारखेला पूर्णपणे आर्बिट्रममध्ये जाईपर्यंत इथरियम आणि आर्बिट्रम दोन्हीवर समांतरपणे कार्य करतील. -## GRT ला आर्बिट्रमवर नवीन स्मार्ट कॉन्ट्रॅक्ट तैनात केले जाईल का? +## Will GRT have a new smart contract deployed on Arbitrum? -होय, GRT कडे अतिरिक्त [Arbitrum वर स्मार्ट करार आहे](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). तथापि, इथरियम मेननेट [GRT करार](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) कार्यरत राहील. +Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. -## आर्बिट्रम FAQ वर बिलिंग +## Billing on Arbitrum FAQs -## माझ्या बिलिंग शिलकीमधील GRT बद्दल मला काय करावे लागेल? +## What do I need to do about the GRT in my billing balance? -काहीही नाही! तुमचा GRT सुरक्षितपणे आर्बिट्रममध्ये स्थलांतरित केला गेला आहे आणि तुम्ही हे वाचता तेव्हा प्रश्नांसाठी पैसे देण्यासाठी वापरला जात आहे. +Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. -## माझे फंड आर्बिट्रममध्ये सुरक्षितपणे स्थलांतरित झाले आहेत हे मला कसे कळेल? +## How do I know my funds have migrated securely to Arbitrum? -सर्व GRT बिलिंग शिल्लक आधीच आर्बिट्रममध्ये यशस्वीरित्या स्थलांतरित केले गेले आहेत. तुम्ही Arbitrum वर बिलिंग करार पाहू शकता [here] (https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## आर्बिट्रम ब्रिज सुरक्षित आहे हे मला कसे कळेल? +## How do I know the Arbitrum bridge is secure? -सर्व वापरकर्त्यांसाठी सुरक्षितता आणि सुरक्षितता सुनिश्चित करण्यासाठी पुलाचे [भारी लेखापरीक्षण](https://code4rena.com/contest/2022-10-the-graph-l2-bridge-contest) केले गेले आहे. +The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. ## मी माझ्या इथरियम मेननेट वॉलेटमधून नवीन GRT जोडत असल्यास मला काय करावे लागेल? -तुमच्या आर्बिट्रम बिलिंग बॅलन्समध्ये GRT जोडणे [सबग्राफ स्टुडिओ](https://thegraph.com/studio/) मध्ये एका क्लिकच्या अनुभवाने करता येते. तुम्ही तुमच्या GRT ला आर्बिट्रममध्ये सहजपणे ब्रिज करू शकता आणि तुमच्या API की एका व्यवहारात भरू शकता. +Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. -GRT जोडणे, पैसे काढणे किंवा मिळवणे याबद्दल अधिक तपशीलवार सूचनांसाठी [बिलिंग पृष्ठ](https://thegraph.com/docs/en/billing/) ला भेट द्या. +Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. diff --git a/website/pages/mr/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/mr/arbitrum/l2-transfer-tools-faq.mdx index 583617a90856..87576353e44b 100644 --- a/website/pages/mr/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/mr/arbitrum/l2-transfer-tools-faq.mdx @@ -1,20 +1,44 @@ --- -title: L2 हस्तांतरण साधने FAQ +title: L2 Transfer Tools FAQ --- -> L2 ट्रान्सफर टूल्स अजून रिलीझ झालेले नाहीत. ते 2023 च्या उन्हाळ्यात उपलब्ध होतील अशी अपेक्षा आहे. +## सामान्य -## L2 हस्तांतरण साधने काय आहेत? +### L2 हस्तांतरण साधने काय आहेत? -Arbitrum One वर प्रोटोकॉल लागू करून योगदानकर्त्यांना नेटवर्कमध्ये सहभागी होण्यासाठी ग्राफने 26x स्वस्त केले आहे. L2 कडे जाणे सोपे करण्यासाठी L2 हस्तांतरण साधने कोर devs द्वारे तयार केली गेली. प्रत्येक प्रोटोकॉल सहभागीसाठी, L2 वर जाताना, वितळण्याचा कालावधी टाळून किंवा मॅन्युअली माघार घेणे आणि GRT ब्रिज करणे टाळून अनुभव अखंडित करण्यासाठी हस्तांतरण सहाय्यकांचा संच सामायिक केला जाईल. तुमची भूमिका The Graph मध्ये काय आहे आणि तुम्ही L2 मध्ये काय हस्तांतरित करत आहात यावर अवलंबून या साधनांसाठी तुम्हाला चरणांच्या विशिष्ट संचाचे अनुसरण करणे आवश्यक आहे. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## मी इथरियम मेननेटवर वापरतो तेच वॉलेट मी वापरू शकतो का? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -तुम्ही [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) वॉलेट वापरत असल्यास तुम्ही तोच पत्ता वापरू शकता. जर तुमचे इथरियम मेननेट वॉलेट करार असेल (उदा. मल्टीसिग) तर तुम्ही [आर्बिट्रम वॉलेट पत्ता]\(/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the- निर्दिष्ट करणे आवश्यक आहे. graph-on-l2) जिथे तुमचे हस्तांतरण पाठवले जाईल. कृपया पत्ता काळजीपूर्वक तपासा कारण चुकीच्या पत्त्यावर कोणतेही हस्तांतरण कायमचे नुकसान होऊ शकते. तुम्हाला L2 वर मल्टीसिग वापरायचे असल्यास, तुम्ही आर्बिट्रम वन वर मल्टीसिग कॉन्ट्रॅक्ट तैनात केल्याची खात्री करा. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### मी इथरियम मेननेटवर वापरतो तेच वॉलेट मी वापरू शकतो का? + +आपल्याला [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) वॉलेट वापरत असल्यास, आपल्याला त्या एका आधीच्या पत्त्याचा वापर करू शकता. आपल्य्या इथे Ethereum मुख्यनेट वॉलेट कंट्रॅक्ट असल्यास (उदा. मल्टीसिग), तर आपल्याला आपल्या स्थानांतरणाच्या लक्ष्यासाठी [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) सूचित करावा लागेल. कृपया पत्त्याची वाचन सावध घ्या कारण कोणत्याही चुकीच्या पत्त्याला स्थायी नुकसान होऊ शकतो. आपल्याला L2 वर मल्टीसिग वापरायचं असल्यास, कृपया सुनिश्चित करा की आपल्याला Arbitrum One वर मल्टीसिग कॉन्ट्रॅक्ट डिप्लॉय करण्याची आवश्यकता आहे. + +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### मी 7 दिवसात माझे हस्तांतरण पूर्ण केले नाही तर काय होईल? + +L2 ट्रांस्फर टूल्स आपल्याला L1 वरून L2ला संदेश पाठविण्याच्या अर्बिट्रमच्या स्वभाविक विधानाचा वापर करतात. हा विधान "पुनः प्रयासयोग्य पर्याय" म्हणून ओळखला जातो आणि हा सर्व स्थानिक टोकन ब्रिजेस, अर्बिट्रम GRT ब्रिज यासह सहाय्यक आहे. आपण पुनः प्रयासयोग्य पर्यायांबद्दल अधिक माहिती [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) वाचू शकता. + +आपल्याला आपल्या संपत्तींच्या (सबग्राफ, स्टेक, प्रतिनिधित्व किंवा पुरवणी) L2ला स्थानांतरित केल्यास, एक संदेश अर्बिट्रम GRT ब्रिजमध्ये पाठविला जातो ज्याने L2वर पुनः प्रयासयोग्य पर्याय तयार करतो. स्थानांतरण उपकरणात्रूटील वैल्यूत्या किंवा संचलनसाठी काही ईटीएच वॅल्यू आहे, ज्यामुळे 1) पर्याय तयार करण्यासाठी पैसे देणे आणि 2) L2मध्ये पर्याय संचालित करण्यासाठी गॅस देणे ह्याचा वापर केला जातो. परंतु, पर्याय संचालनाच्या काळात गॅसची किंमते वेळेत बदलू शकतात, ज्यामुळे ही स्वयंप्रयत्न किंवा संचालन प्रयत्न अपयशी होऊ शकतात. जेव्हा ती प्रक्रिया अपयशी होते, तेव्हा अर्बिट्रम ब्रिज किंवा 7 दिवसापर्यंत पुन्हा प्रयत्न करण्याची क्षमता आहे, आणि कोणत्याही व्यक्ती त्या "पुनर्मिलन" पर्यायाचा प्रयत्न करू शकतो (त्यासाठी अर्बिट्रमवर काही ईटीएच स्थानांतरित केलेले असणे आवश्यक आहे). + +ही आपल्याला सगळ्या स्थानांतरण उपकरणांमध्ये "पुष्टीकरण" चरण म्हणून ओळखता - आपल्याला अधिकांशपेक्षा अधिक आपल्याला स्वयंप्रयत्न सध्याच्या वेळेत स्वयंप्रयत्न सध्याच्या वेळेत स्वतः संचालित होईल, परंतु आपल्याला येते कि ते दिले आहे ह्याची तपासणी करणे महत्वपूर्ण आहे. आपल्याला किंवा 7 दिवसात कोणत्याही सफल पुनर्मिलनाचे प्रयत्न केले त्यामुळे प्रयत्नशील नसत्या आणि त्या 7 दिवसांत कोणताही प्रयत्न नसत्याने, अर्बिट्रम ब्रिजने पुनर्मिलन पर्यायाचा त्याग केला आहे, आणि आपली संपत्ती (सबग्राफ, स्टेक, प्रतिनिधित्व किंवा पुरवणी) वेळेत विचली जाईल आणि पुनर्प्राप्त केली जाऊ शकणार नाही. ग्राफचे मुख्य डेव्हलपर्सन्सने या परिस्थितियांच्या जाणीवपणे प्राणीसमूह ठरविले आहे आणि त्याच्या अगोदर पुनर्मिलन केले जाईल, परंतु याच्यातून, आपल्याला आपल्या स्थानांतरणाची पूर्ण करण्याची जबाबदारी आहे. आपल्याला आपल्या व्यवहाराची पुष्टी करण्यात किंवा संचालनाची समस्या आहे का, कृपया [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) वापरून संपूर्ण डेव्हलपर्सन्सची मदत करण्याची क्षमता आहे. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. ## सबग्राफ हस्तांतरण -## मी माझा सबग्राफ कसा हस्तांतरित करू? +### मी माझा सबग्राफ कसा हस्तांतरित करू? + + तुमचा सबग्राफ हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: @@ -24,292 +48,364 @@ Arbitrum One वर प्रोटोकॉल लागू करून यो 3. आर्बिट्रमवर सबग्राफ हस्तांतरणाची पुष्टी करा\* -4. आर्बिट्रमवर प्रकाशन सबग्राफ समाप्त करा +4. आर्बिट्रम वर सबग्राफ प्रकाशित करणे समाप्त करा 5. क्वेरी URL अपडेट करा (शिफारस केलेले) -\*लक्षात ठेवा की तुम्ही ७ दिवसांच्या आत हस्तांतरणाची पुष्टी केली पाहिजे अन्यथा तुमचा सबग्राफ गमावला जाऊ शकतो. बर्‍याच प्रकरणांमध्ये, ही पायरी आपोआप चालेल, परंतु आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास मॅन्युअल पुष्टीकरण आवश्यक असू शकते. या प्रक्रियेदरम्यान काही समस्या असल्यास, मदतीसाठी संसाधने असतील: support@thegraph.com वर किंवा [Discord](https://discord.gg/graphprotocol) वर समर्थनाशी संपर्क साधा. +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## मी माझे हस्तांतरण कोठून सुरू करावे? +### मी माझे हस्तांतरण कोठून सुरू करावे? -तुम्ही [सबग्राफ स्टुडिओ](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) किंवा कोणत्याही सबग्राफ तपशील पेजवरून तुमचे हस्तांतरण सुरू करू शकता. हस्तांतरण सुरू करण्यासाठी सबग्राफ तपशील पृष्ठावरील "हस्तांतरण सबग्राफ" बटणावर क्लिक करा. +आपल्याला स्थानांतरण सुरू करण्याची क्षमता आहे Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) किंवा कोणत्याही सबग्राफ तपशील पृष्ठापासून सुरू करू शकता. सबग्राफ तपशील पृष्ठावर "सबग्राफ स्थानांतरित करा" बटणवर क्लिक करा आणि स्थानांतरण सुरू करा. -## माझा सबग्राफ हस्तांतरित होईपर्यंत मला किती वेळ प्रतीक्षा करावी लागेल +### माझा सबग्राफ हस्तांतरित होईपर्यंत मला किती वेळ प्रतीक्षा करावी लागेल -हस्तांतरण वेळ अंदाजे 20 मिनिटे घेते. पुलाचे हस्तांतरण स्वयंचलितपणे पूर्ण करण्यासाठी आर्बिट्रम ब्रिज पार्श्वभूमीत काम करत आहे. काही प्रकरणांमध्ये, गॅसची किंमत वाढू शकते आणि तुम्हाला पुन्हा व्यवहाराची पुष्टी करावी लागेल. +स्थानांतरणासाठी किंमतीतून प्रायः 20 मिनिटे लागतात. आर्बिट्रम ब्रिज आपल्याला स्वत: स्थानांतरण स्वयंप्रयत्नातून पूर्ण करण्यासाठी पारंपारिकपणे काम करत आहे. कितीतरी प्रकारांत स्थानांतरण केल्यास, गॅस किंमती वाढू शकतात आणि आपल्याला परिपुष्टीकरण पुन्हा करण्याची आवश्यकता लागू शकते. -## मी L2 मध्ये हस्तांतरित केल्यानंतर माझा सबग्राफ अजूनही शोधण्यायोग्य असेल का? +### मी L2 मध्ये हस्तांतरित केल्यानंतर माझा सबग्राफ अजूनही शोधण्यायोग्य असेल का? -तुमचा सबग्राफ ज्या नेटवर्कवर प्रकाशित झाला आहे त्यावरच शोधण्यायोग्य असेल. उदाहरणार्थ, जर तुमचा सबग्राफ आर्बिट्रम वन वर असेल, तर तुम्ही तो फक्त आर्बिट्रम वन वर एक्सप्लोररमध्ये शोधू शकता आणि तो इथरियमवर शोधू शकणार नाही. कृपया तुम्ही योग्य नेटवर्कवर असल्याची खात्री करण्यासाठी पृष्ठाच्या शीर्षस्थानी नेटवर्क स्विचरमध्ये तुम्ही आर्बिट्रम वन निवडले असल्याची खात्री करा. हस्तांतरणानंतर, L1 सबग्राफ बहिष्कृत म्हणून दिसेल. +आपला सबग्राफ केवळ त्या नेटवर्कवर शोधन्यायला येतो, ज्यावर तो प्रकाशित केला जातो. उदाहरणार्थ, आपला सबग्राफ आर्बिट्रम वनवर आहे तर आपल्याला तो केवळ आर्बिट्रम वनवरच्या एक्सप्लोररमध्ये शोधू शकता आणि आपल्याला इथे एथेरियमवर शोधायला सक्षम नसेल. कृपया पृष्ठाच्या वरील नेटवर्क स्विचरमध्ये आर्बिट्रम वन निवडल्याची आपल्याला कसे सुनिश्चित करण्याची आवश्यकता आहे. स्थानांतरणानंतर, L1 सबग्राफ विकलप म्हणून दिसणारा. -## माझा सबग्राफ हस्तांतरित करण्यासाठी प्रकाशित करणे आवश्यक आहे का? +### माझा सबग्राफ हस्तांतरित करण्यासाठी प्रकाशित करणे आवश्यक आहे का? -सबग्राफ ट्रान्सफर टूलचा लाभ घेण्यासाठी, तुमचा सबग्राफ आधीच इथरियम मेननेटवर प्रकाशित केलेला असणे आवश्यक आहे आणि सबग्राफच्या मालकीच्या वॉलेटच्या मालकीचे काही क्युरेशन सिग्नल असणे आवश्यक आहे. जर तुमचा सबग्राफ प्रकाशित झाला नसेल, तर तुम्ही फक्त Arbitrum One वर थेट प्रकाशित करण्याची शिफारस केली जाते - संबंधित गॅस फी खूपच कमी असेल. जर तुम्हाला प्रकाशित सबग्राफ हस्तांतरित करायचा असेल परंतु मालकाच्या खात्याने त्यावर कोणताही सिग्नल क्युरेट केलेला नसेल, तर तुम्ही त्या खात्यातून लहान रक्कम (उदा. 1 GRT) सिग्नल करू शकता; "स्वयं-स्थलांतर" सिग्नल निवडण्याची खात्री करा. +सबग्राफ स्थानांतरण उपकरणाचा लाभ घेण्यासाठी, आपल्याला आपल्या सबग्राफला आधीच प्रकाशित केलेला पाहिजे आणि त्याच्या सबग्राफच्या मालक वॉलेटमध्ये काही परिपुष्टी संकेत असणे आवश्यक आहे. आपला सबग्राफ प्रकाशित नसल्यास, आपल्याला साधारणपणे आर्बिट्रम वनवर सीधे प्रकाशित करण्यात योग्य आहे - संबंधित गॅस फीस खूपच किमान असतील. आपल्याला प्रकाशित सबग्राफ स्थानांतरित करू इच्छित असल्यास, परंतु मालक खाते त्यावर कोणतीही प्रतिसाद संकेत दिली नाही, तर आपण त्या खाते पासून थोडीसी परिपुष्टी (उदा. 1 GRT) संकेतिक करू शकता; कृपया "स्वत: स्थानांतरित होणारी" संकेत निवडायला नक्की करा. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### माझ्या सबग्राफच्या इथेरियम मुख्य नेटवर्कचा संस्करण हस्तांतरित करताना Arbitrum वर काय होतं? -तुमचा सबग्राफ आर्बिट्रममध्ये हस्तांतरित केल्यानंतर, इथरियम मेननेट आवृत्ती नापसंत केली जाईल. आम्ही तुम्हाला तुमची क्वेरी URL 48 तासांच्या आत अपडेट करण्याची शिफारस करतो. तथापि, एक वाढीव कालावधी आहे जो तुमची मेननेट URL कार्यरत ठेवतो जेणेकरून कोणताही तृतीय-पक्ष dapp समर्थन अद्यतनित केला जाऊ शकतो. +आर्बिट्रमकडे आपल्या सबग्राफ स्थानांतरित करण्यानंतर, एथेरियम मुख्यनेट आवृत्ती विकलप म्हणून दिली जाईल. आपल्याला आपल्या क्वेरी URL वरील बदल करण्याची सल्ला आहे की त्याच्या 48 तासांत दिला जाईल. हेरंब विलंबप्रदान केलेले आहे ज्यामुळे आपली मुख्यनेट URL सक्रिय ठेवली जाईल आणि कोणत्याही तृतीय पक्षाच्या dapp समर्थनाच्या आधी अद्यतनित केल्या जाऊ शकतात. -## मी हस्तांतरित केल्यानंतर, मला आर्बिट्रमवर पुन्हा प्रकाशित करण्याची देखील आवश्यकता आहे का? +### मी हस्तांतरित केल्यानंतर, मला आर्बिट्रमवर पुन्हा प्रकाशित करण्याची देखील आवश्यकता आहे का? -20 मिनिटांच्या ट्रान्सफर विंडोनंतर, तुम्हाला ट्रान्सफर पूर्ण करण्यासाठी UI मध्ये ट्रान्झॅक्शनसह ट्रान्सफरची पुष्टी करावी लागेल, परंतु ट्रान्सफर टूल तुम्हाला यामध्ये मार्गदर्शन करेल. तुमचा L1 एंडपॉइंट ट्रान्सफर विंडो दरम्यान सपोर्ट करत राहील आणि त्यानंतरच्या वाढीव कालावधीत. तुमच्यासाठी सोयीस्कर असेल तेव्हा तुम्ही तुमचा एंडपॉइंट अपडेट करावा असे प्रोत्साहन दिले जाते. +२० मिनिटांच्या स्थानांतरण कालाच्या नंतर, आपल्याला स्थानांतरण पूर्ण करण्यासाठी UIमध्ये एक संचित्र व्यवहाराच्या माध्यमातून पुनः पुष्टीकरण करण्याची आवश्यकता आहे, परंतु स्थानांतरण उपकरण तुम्हाला ह्याच्या माध्यमातून प्रेषित केल्यास, ते तुम्हाला मार्गदर्शन करेल. आपल्या L1 एंडपॉइंटने स्थानांतरण कालाच्या नंतर आणि एक क्षणानंतर समर्थित राहणारे आहे. तुम्हाला आपल्याला सुविधानुसार एंडपॉइंटच्या अद्यतनित करण्याची प्रोत्साहने आहे. -## Will there be down-time on my endpoint while republishing? +### Will my endpoint experience downtime while re-publishing? -तुमचा सबग्राफ L2 वर हलवण्‍यासाठी ट्रान्स्फर टूल वापरताना कोणताही डाउन टाइम नसावा. तुमचा L1 एंडपॉइंट ट्रान्स्फर विंडो आणि नंतर वाढीव कालावधी दरम्यान सपोर्ट करत राहील. तुमच्यासाठी सोयीस्कर असेल तेव्हा तुम्ही तुमचा एंडपॉइंट अपडेट करावा असे प्रोत्साहन दिले जाते. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## प्रकाशन आणि आवृत्ती L2 वर Ethereum Ethereum mainnet प्रमाणेच आहे का? +### एल2 व Ethereum मुख्य नेटवर्कवर प्रकाशन आणि संस्करणदेखील सारखं आहे का? -होय. Subgraph Studio मध्ये प्रकाशित करताना तुमचे प्रकाशित नेटवर्क म्हणून Arbitrum One निवडण्याची खात्री करा. स्टुडिओमध्ये, नवीनतम एंडपॉइंट उपलब्ध असेल जो सबग्राफच्या नवीनतम अद्यतनित आवृत्तीकडे निर्देश करेल. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## माझ्या सबग्राफचे क्युरेशन माझ्या सबग्राफसह हलवेल का? +### पुन्हा प्रकाशित करताना माझ्या एंडपॉईंटला डाउन-टाइम असेल का? -तुम्ही ऑटो-माइग्रेटिंग सिग्नल निवडले असल्यास, तुमचे स्वतःचे 100% क्युरेशन तुमच्या सबग्राफसह आर्बिट्रम वन वर हलवले जाईल. हस्तांतरणाच्या वेळी सर्व सबग्राफचे क्युरेशन सिग्नल GRT मध्ये रूपांतरित केले जातील आणि तुमच्या क्युरेशन सिग्नलशी संबंधित GRT L2 सबग्राफवर मिंट सिग्नल करण्यासाठी वापरला जाईल. +आपण "स्वत: स्थानांतरित होणारी" संकेत निवडल्यास, आपल्या आपल्या स्वत: स्थानांतरित करणार्या सबग्राफसह 100% आपल्या पुरवणीने निवडलेल्या स्थानांतरण होईल. सबग्राफच्या सर्व स्थानांतरण संकेताच्या स्थानांतरणाच्या क्षणी जीआरटीत रूपांतरित केली जाईल, आणि आपल्या पुरवणीसंकेताशी संबंधित जीआरटी आपल्याला L2 सबग्राफवर संकेत वितरित करण्यासाठी वापरली जाईल. -इतर क्युरेटर निवडू शकतात की त्यांचा GRT चा अंश काढून घ्यायचा किंवा त्याच सबग्राफवरील मिंट सिग्नलला L2 मध्ये हस्तांतरित करा. +इतर क्युरेटर्सनी त्याच्या भागाची GRT वापरून घेण्याची किंवा त्याच्या सबग्राफवर सिग्नल मिंट करण्यासाठी त्याची GRT L2वर हस्तांतरित करण्याची परवानगी आहे. -## मी हस्तांतरित केल्यानंतर मी माझा सबग्राफ परत इथरियम मेननेटवर हलवू शकतो का? +### तुम्ही आपले सबग्राफ L2 वर हस्तांतरित केल्यानंतर पुन्हा Ethereum मुख्य नेटवर्कवर परत करू शकता का? -एकदा हस्तांतरित केल्यावर, या सबग्राफची तुमची इथरियम मेननेट आवृत्ती नापसंत केली जाईल. तुम्हाला मेननेटवर परत जायचे असल्यास, तुम्हाला मेननेटवर पुन्हा तैनात करणे आणि प्रकाशित करणे आवश्यक आहे. तथापि, Ethereum mainnet वर परत हस्तांतरित करणे जोरदारपणे निरुत्साहित आहे कारण अनुक्रमणिका बक्षिसे अखेरीस पूर्णपणे Arbitrum One वर वितरित केली जातील. +स्थानांतरित केल्यानंतर, आपल्या आर्बिट्रम वनवरच्या सबग्राफची एथेरियम मुख्यनेट आवृत्ती विकलप म्हणून दिली जाईल. आपल्याला मुख्यनेटवर परत जाण्याची इच्छा आहे किंवा, आपल्याला मुख्यनेटवर परत जाण्याची इच्छा आहे तर आपल्याला पुन्हा डिप्लॉय आणि प्रकाशित करण्याची आवश्यकता आहे. परंतु आर्बिट्रम वनवर परत गेल्याच्या बदलाच्या दिल्लाला मुख्यनेटवरील सूचना पूर्णपणे त्यात दिलेली आहे. -## माझे हस्तांतरण पूर्ण करण्यासाठी मला ब्रिज्ड ETH का आवश्यक आहे? +### माझे हस्तांतरण पूर्ण करण्यासाठी मला ब्रिज्ड ETH का आवश्यक आहे? -आर्बिट्रम वन वरील गॅस फी ब्रिज्ड ईटीएच (म्हणजे ईटीएच ज्याला आर्बिट्रम वनवर ब्रिज केले गेले आहे) वापरून दिले जाते. तथापि, इथरियम मेननेटच्या तुलनेत गॅस फी लक्षणीयरीत्या कमी आहे. +आर्बिट्रम वनवरील गॅस फीस आर्बिट्रम वनवर स्थानांतरित केलेल्या इथरियम (अर्थात Arbitrum One वर स्थानांतरित केलेल्या) ईटीएचवारे चुकतात. परंतु ते गॅस फीस अत्यंत खूप किमान आहेत त्यासाठी ज्यातरी ईथरियम मुख्यनेटवरच्या गॅस फीसेसारखे आहे. -## क्युरेशन सिग्नल +## प्रतिनिधी -## मी माझे क्युरेशन कसे हस्तांतरित करू? +### मी माझे प्रतिनिधी मंडळ कसे हस्तांतरित करू? -तुमचे क्युरेशन हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: + -1. इथरियम मेननेटवर सिग्नल ट्रान्सफर सुरू करा +तुमच्या प्रतिनियुक्तीची हस्तांतरण करण्यासाठी, तुम्हाला खालील प्रक्रिया पूर्ण करण्याची आवश्यकता आहे: -2. L2 क्युरेटर पत्ता निर्दिष्ट करा\* +1. Ethereum mainnet वर शिष्टमंडळ हस्तांतरण सुरू करा +2. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा +3. आर्बिट्रमवर प्रतिनिधी मंडळाच्या हस्तांतरणाची पुष्टी करा -3. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*आवश्यक असल्यास - म्हणजे तुम्ही कराराचा पत्ता वापरत आहात. +### मी इथरियम मेननेटवर खुल्या वाटपासह हस्तांतरण सुरू केल्यास माझ्या पुरस्कारांचे काय होईल? -## मी क्युरेट केलेला सबग्राफ L2 वर गेला असल्यास मला कसे कळेल? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -सबग्राफ तपशील पृष्ठ पाहताना, एक बॅनर तुम्हाला सूचित करेल की हा सबग्राफ हस्तांतरित केला गेला आहे. तुम्ही तुमचे क्युरेशन हस्तांतरित करण्यासाठी सूचना फॉलो करू शकता. तुम्ही ही माहिती हलवलेल्या कोणत्याही सबग्राफच्या सबग्राफ तपशील पृष्ठावर देखील शोधू शकता. +### मी सध्या ज्या इंडेक्सरला नियुक्त करतो तो आर्बिट्रम वन वर नसल्यास काय होईल? -## मी माझे क्युरेशन L2 वर हलवू इच्छित नसल्यास काय करावे? +L2 स्थानांतरण साध्य असेल तेव्हा जर तुम्ही प्रतिनियुक्त केलेल्या इंडेक्सरने स्वतःच्या स्टेकला Arbitrum वर स्थानांतरित केलेल्या आहे, तर आपल्याला L2 स्थानांतरण साध्य होईल. -जेव्हा सबग्राफ नापसंत केला जातो तेव्हा तुम्हाला तुमचा सिग्नल मागे घेण्याचा पर्याय असतो. त्याचप्रमाणे, जर सबग्राफ L2 वर गेला असेल, तर तुम्ही Ethereum mainnet मध्ये तुमचे सिग्नल मागे घेणे किंवा L2 ला सिग्नल पाठवणे निवडू शकता. +### क्या स्थानिक लेखकों को अन्य इंडेक्सर को प्रतिष्ठित करने का विकल्प है? -## माझे क्युरेशन यशस्वीरित्या हस्तांतरित झाले हे मला कसे कळेल? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -L2 ट्रान्सफर टूल सुरू झाल्यानंतर अंदाजे 20 मिनिटांनंतर सिग्नल तपशील एक्सप्लोररद्वारे प्रवेशयोग्य असतील. +### जर मी L2 वर माझ्याला प्रतिष्ठित करणारे इंडेक्सर शोधू शकत नसल्यास काय होईल? -## मी माझे क्युरेशन एका वेळी एकापेक्षा जास्त सबग्राफवर हस्तांतरित करू शकतो का? +L2 हस्तांतरण साधन स्वतःच्या तंतून आपण आधीपासून विनिमय केलेल्या विनिमयकर्त्याला स्वत: प्रमाणे ओळखतो. -यावेळी मोठ्या प्रमाणात हस्तांतरण पर्याय नाही. +### तुम्ही नवीन किंवा विविध अनुक्रमणिकांकडून आपल्याच्या प्रतिनियुक्तीला मिश्रित व किंवा 'पसरवा' द्यायला सक्षम असाल का, किंवा आपल्याच्या अगोदरच्या अनुक्रमणिकाकडून? -## इंडेक्सर स्टेक +L2 स्थानांतरण उपकरण आपल्या देगा लगेच किंवा तुम्ही आत्ताच दिलेल्या इंडेक्सरला हे स्वत: स्थानांतरित करेल. एकदा आपल्याला L2 वर स्थानांतरित केलं आहे, तुम्ही देगा किंवा पुनर्मित्रीकरण करून, थॉइंग कालाची प्रतीक्षा करून, आणि तुम्हाला आपल्या देगा विभागण्याचा निर्णय घेण्याची संधी आहे. -## मी माझे स्टेक आर्बिट्रमला कसे हस्तांतरित करू? +### मी कूलडाउन कालावधीच्या अधीन आहे किंवा मी L2 प्रतिनिधी हस्तांतरण साधन वापरल्यानंतर लगेच पैसे काढू शकतो? -तुमचा हिस्सा हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: +स्थानांतरण साध्य करणारे टूल आपल्याला तुरंत L2 वर स्थानांतरित करण्याची परवाह देतो. जर आपल्याला पुन्हा प्रतिनियुक्ती करण्याची इच्छा असली तर आपल्याला थॉइंग पेरिअडसाठी प्रतीक्षा करावी लागते. परंतु, जर कोणत्याही इंडेक्सरने आपल्या सर्व स्टेकला L2 वर स्थानांतरित केलं आहे, तर आपल्याला Ethereum मुख्य नेटवरील तुरंत विथ्ड्रॉ करण्याची संधी आहे. -1. इथरियम मेननेटवर स्टेक ट्रान्सफर सुरू करा +### मी माझ्या प्रतिनिधींचे हस्तांतरण न केल्यास माझ्या पुरस्कारांवर नकारात्मक परिणाम होऊ शकतो का? -2. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा +आपल्याला वाटले आहे की भविष्यात सर्व नेटवर्क सहभाग आर्बिट्रम वनमध्ये होईल. -3. आर्बिट्रमवर स्टेक ट्रान्सफरची पुष्टी करा +### माझ्या प्रतिनियुक्तीची L2ला हस्तांतरिती किती वेळ लागतो? -\*लक्षात ठेवा की तुम्ही 7 दिवसांच्या आत हस्तांतरणाची पुष्टी केली पाहिजे अन्यथा तुमचा स्टेक गमावला जाऊ शकतो. बर्‍याच प्रकरणांमध्ये, ही पायरी आपोआप चालेल, परंतु आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास मॅन्युअल पुष्टीकरण आवश्यक असू शकते. या प्रक्रियेदरम्यान काही समस्या असल्यास, मदतीसाठी संसाधने असतील: support@thegraph.com वर किंवा [Discord](https://discord.gg/graphprotocol) वर समर्थनाशी संपर्क साधा. +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## माझे सर्व स्टेक ट्रान्सफर होतील का? +### जर आपल्याला GRT वेस्टिंग कॉन्ट्रॅक्ट/टोकन लॉक वॉलेटचा वापर आहे, तर आपल्याला प्रतिनियुक्ती हस्तांतरित करण्याची क्षमता आहे का? -तुमचा किती हिस्सा हस्तांतरित करायचा हे तुम्ही निवडू शकता. तुम्ही तुमचे सर्व स्टेक एकाच वेळी हस्तांतरित करणे निवडल्यास, तुम्हाला प्रथम कोणतेही खुले वाटप बंद करावे लागेल. +होय! प्रक्रिया काही वेगवेगळी आहे कारण वेस्टिंग कॉन्ट्रॅक्ट्स L2 गॅससाठी आवश्यक असलेले ETH फॉरवर्ड करू शकत नाहीत, म्हणजे आपल्याला त्यापूर्वीक ठेवावं आवश्यक आहे. जर आपल्या वेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे वेस्टेड नसलेला आहे, तर आपल्याला पहिल्यांदाच L2 वरील वेस्टिंग कॉन्ट्रॅक्टवर एक सहकार्यक वेस्टिंग कॉन्ट्रॅक्ट सुरू करण्याची आवश्यकता आहे आणि तुम्ही फक्त हे ल2 वेस्टिंग कॉन्ट्रॅक्टला प्रतिनियुक्ती स्थानांतरित करण्याच्या सर्वोत्तम वेळी होईल. वेस्टिंग लॉक वॉलेट वापरून Explorer ला कनेक्ट केल्यास, Explorer वरील UI तुमच्याला हे प्रक्रियेत मार्गदर्शन करू शकतो. -तुम्‍ही अनेक व्‍यवहारांवर तुमच्‍या स्‍टेकचे काही भाग हस्‍तांतरित करण्‍याची योजना करत असल्‍यास, तुम्‍ही नेहमी समान लाभार्थी पत्ता नमूद करणे आवश्‍यक आहे. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -टीप: तुम्ही पहिल्यांदा ट्रान्सफर टूल वापरता तेव्हा तुम्ही L2 वर किमान स्टेक आवश्यकता पूर्ण केल्या पाहिजेत. इंडेक्सर्सनी किमान 100k GRT पाठवणे आवश्यक आहे (हे फंक्शन पहिल्यांदा कॉल करताना). L1 वर स्टेकचा काही भाग सोडल्यास, तो किमान 100k GRT पेक्षा जास्त असणे आवश्यक आहे आणि तुमचे खुले वाटप कव्हर करण्यासाठी पुरेसे (तुमच्या प्रतिनिधींसह) असणे आवश्यक आहे. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## आर्बिट्रममध्ये माझे स्टेक हस्तांतरण निश्चित करण्यासाठी मला किती वेळ लागेल? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* तुम्ही आर्बिट्रमवर स्टेक ट्रान्सफर पूर्ण करण्यासाठी तुमच्या व्यवहाराची पुष्टी करणे आवश्यक आहे. ही पायरी 7 दिवसांच्या आत पूर्ण करणे आवश्यक आहे अन्यथा स्टेक गमावला जाऊ शकतो. +### काही डेलिगेशन कर आहे का? -## माझ्याकडे खुले वाटप असल्यास काय? +नाही. L2 वर प्राप्त झालेले टोकन्स निर्दिष्ट डेलिगेटरच्या नावे घेतल्याने निर्दिष्ट इंडेक्सरवर डेलिगेट केले जातात आणि कोणत्याही डेलिगेशन करारक वर्गासाठी कर केले जात नाही. -तुम्ही तुमचा सर्व स्टेक पाठवत नसल्यास, L2 ट्रान्सफर टूल हे प्रमाणित करेल की किमान 100k GRT इथरियम मेननेटमध्ये राहते आणि तुमचे उर्वरित स्टेक आणि डेलिगेशन कोणत्याही खुल्या वाटपासाठी पुरेसे आहे. तुमची GRT शिल्लक किमान + ओपन ऍलोकेशन समाविष्ट करत नसल्यास तुम्हाला ओपन ऍलोकेशन बंद करावे लागेल. +### Will my unrealized rewards be transferred when I transfer my delegation? -## हस्तांतरण साधने वापरून, हस्तांतरण करण्यापूर्वी Ethereum mainnet वर अनस्टेक करण्यासाठी 28 दिवस प्रतीक्षा करणे आवश्यक आहे का? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -नाही, तुम्ही तुमचा स्टेक ताबडतोब L2 मध्ये हस्तांतरित करू शकता, हस्तांतरण साधन वापरण्यापूर्वी अनस्टेक करण्याची आणि प्रतीक्षा करण्याची गरज नाही. तुम्हाला Ethereum mainnet किंवा L2 वर तुमच्या वॉलेटमधील स्टेक परत घ्यायचा असेल तरच 28 दिवसांची प्रतीक्षा लागू होते. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## माझा हिस्सा हस्तांतरित करण्यासाठी किती वेळ लागेल? +### Is moving delegations to L2 mandatory? Is there a deadline? -L2 ट्रान्स्फर टूलला तुमचा स्टेक हस्तांतरित करणे पूर्ण होण्यासाठी अंदाजे 20 मिनिटे लागतील. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## मी माझा हिस्सा हस्तांतरित करण्यापूर्वी मला आर्बिट्रमवर इंडेक्स करावे लागेल का? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -इंडेक्सिंग सेट करण्यापूर्वी तुम्ही तुमचा स्टेक प्रभावीपणे हस्तांतरित करू शकता, परंतु जोपर्यंत तुम्ही L2 वर सबग्राफचे वाटप करत नाही, त्यांना इंडेक्स करत नाही आणि POI सादर करत नाही तोपर्यंत तुम्ही L2 वर कोणत्याही रिवॉर्डचा दावा करू शकणार नाही. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## मी माझा इंडेक्सिंग स्टेक हलवण्यापूर्वी प्रतिनिधी त्यांचे प्रतिनिधी हलवू शकतात का? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -नाही, प्रतिनिधींना त्यांचे प्रतिनिधी GRT आर्बिट्रमकडे हस्तांतरित करण्यासाठी, ते ज्या इंडेक्सरकडे प्रतिनिधीत्व करत आहेत ते L2 वर सक्रिय असणे आवश्यक आहे. +### I don't see a button to transfer my delegation. Why is that? -## मी GRT वेस्टिंग कॉन्ट्रॅक्ट/टोकन लॉक वॉलेट वापरत असल्यास मी माझा स्टेक ट्रान्सफर करू शकतो का? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -होय! प्रक्रिया थोडी वेगळी आहे, कारण वेस्टिंग कॉन्ट्रॅक्ट्स L2 गॅससाठी पैसे भरण्यासाठी आवश्यक असलेला ETH फॉरवर्ड करू शकत नाही, म्हणून तुम्हाला ते आधीच जमा करणे आवश्यक आहे. जर तुमचा वेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे निहित नसेल, तर तुम्हाला प्रथम L2 वर काउंटरपार्ट व्हेस्टिंग कॉन्ट्रॅक्ट देखील सुरू करावा लागेल आणि फक्त या L2 व्हेस्टिंग कॉन्ट्रॅक्टमध्ये स्टेक ट्रान्सफर करू शकाल. जेव्हा तुम्ही वेस्टिंग लॉक वॉलेट वापरून एक्सप्लोररशी कनेक्ट केले असेल तेव्हा एक्सप्लोररवरील UI तुम्हाला या प्रक्रियेद्वारे मार्गदर्शन करू शकते. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## शिष्टमंडळ +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## मी माझे प्रतिनिधी मंडळ कसे हस्तांतरित करू? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -तुमचे प्रतिनिधीत्व हस्तांतरित करण्यासाठी, तुम्हाला खालील पायऱ्या पूर्ण कराव्या लागतील: +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -1. Ethereum mainnet वर शिष्टमंडळ हस्तांतरण सुरू करा +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + +## क्युरेशन सिग्नल + +### मी माझे क्युरेशन कसे हस्तांतरित करू? + +तुमच्या कुरेशनची हस्तांतरण करण्यासाठी, तुम्हाला खालील प्रक्रिया पूर्ण करण्याची आवश्यकता आहे: + +1. इथरियम मेननेटवर सिग्नल ट्रान्सफर सुरू करा + +2. L2 क्युरेटर पत्ता निर्दिष्ट करा\* + +3. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा + +\*आवश्यक असल्यास - उदा. तुम्ही एक कॉन्ट्रॅक्ट पत्ता वापरत आहात. + +### मी क्युरेट केलेला सबग्राफ L2 वर गेला असल्यास मला कसे कळेल? + +सबग्राफ तपशील पृष्ठाची पाहणी केल्यास, एक बॅनर आपल्याला सूचित करेल की हा सबग्राफ स्थानांतरित केलेला आहे. आपल्याला सुचवल्यास, आपल्या पुरवणीचे स्थानांतरण करण्यासाठी प्रॉम्प्ट अनुसरण करू शकता. आपल्याला ह्या माहितीला सापडण्याची किंवा स्थानांतरित केलेल्या कोणत्याही सबग्राफच्या तपशील पृष्ठावर मिळवू शकता. + +### मी माझे क्युरेशन L2 वर हलवू इच्छित नसल्यास काय करावे? + +कोणत्याही सबग्राफला प्राकृतिक रितीने प्रतिसादित केल्यानंतर, आपल्याला आपल्या सिग्नलला वापरून घेण्याची पर्वाह आहे. तसेच, आपल्याला जर सबग्राफ L2 वर हस्तांतरित केलेला असेल तर, आपल्याला आपल्या सिग्नलला ईथेरियम मेननेटवरून वापरून घेण्याची किंवा L2 वर सिग्नल पाठवण्याची पर्वाह आहे. + +### माझे क्युरेशन यशस्वीरित्या हस्तांतरित झाले हे मला कसे कळेल? + +L2 हस्तांतरण साधन सुरू केल्यानंतर, संकेत तपशील २० मिनिटांनंतर Explorer मध्ये पहिल्या दिशेने प्रवेशक्षम होईल. + +### किंवा तुम्ही एकापेक्षा अधिक सबग्राफवर एकावेळी आपल्या कुरेशनची हस्तांतरण करू शकता का? + +यावेळी मोठ्या प्रमाणात हस्तांतरण पर्याय नाही. + +## इंडेक्सर स्टेक + +### मी माझे स्टेक आर्बिट्रमला कसे हस्तांतरित करू? + +> अस्वीकरण: तुम्ही सध्या तुमच्या इंडेक्सरवर तुमच्या GRT चा कोणताही भाग अनस्टॅक करत असल्यास, तुम्ही L2 ट्रान्सफर टूल्स वापरू शकणार नाही. + + + +तुमचा हिस्सा हस्तांतरित करण्यासाठी, तुम्हाला खालील चरण पूर्ण करावे लागतील: + +1. इथरियम मेननेटवर स्टेक ट्रान्सफर सुरू करा 2. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा -3. आर्बिट्रमवर प्रतिनिधी मंडळाच्या हस्तांतरणाची पुष्टी करा +3. आर्बिट्रमवर स्टेक ट्रान्सफरची पुष्टी करा + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*\*\*\*तुम्ही आर्बिट्रमवर प्रतिनिधी हस्तांतरण पूर्ण करण्यासाठी व्यवहाराची पुष्टी करणे आवश्यक आहे. ही पायरी 7 दिवसांच्या आत पूर्ण करणे आवश्यक आहे अन्यथा प्रतिनिधिमंडळ गमावले जाऊ शकते. बर्‍याच प्रकरणांमध्ये, ही पायरी आपोआप चालेल, परंतु आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास मॅन्युअल पुष्टीकरण आवश्यक असू शकते. या प्रक्रियेदरम्यान काही समस्या असल्यास, मदतीसाठी संसाधने असतील: support@thegraph.com वर किंवा [Discord](https://discord.gg/graphprotocol) वर समर्थनाशी संपर्क साधा. +### सर्व माझ्या स्टेकची हस्तांतरण होईल का? -## मी इथरियम मेननेटवर खुल्या वाटपासह हस्तांतरण सुरू केल्यास माझ्या पुरस्कारांचे काय होईल? +आपल्याला आपल्या स्टेकच्या किती ट्रान्सफर करायचं तुमच्या स्वतःच्या निर्णयाक्रमाने घेतला जाऊ शकतो. जर आपण आपला संपूर्ण स्टेक एकदम ट्रान्सफर करण्याचा निर्णय घेतला तर, तुम्हाला सर्व उघड्या आवंटनसह पहिल्यांदा बंद करावा लागेल. -तुम्ही ज्या इंडेक्सरला सुपूर्द करत आहात तो अजूनही L1 वर कार्यरत असल्यास, जेव्हा तुम्ही आर्बिट्रममध्ये हस्तांतरित करता तेव्हा तुम्ही इथरियम मेननेटवरील खुल्या वाटपातून कोणतेही प्रतिनिधी पुरस्कार गमावाल. याचा अर्थ असा की तुम्ही, जास्तीत जास्त, शेवटच्या २८ दिवसांच्या कालावधीतील रिवॉर्ड गमावाल. जर तुम्ही इंडेक्सरने वाटप बंद केल्यानंतर लगेचच हस्तांतरणाची वेळ दिली तर तुम्ही हे सुनिश्चित करू शकता की ही किमान रक्कम आहे. तुमच्‍या इंडेक्सरसोबत संप्रेषण चॅनेल असल्‍यास, तुमच्‍या स्‍थानांतरणासाठी सर्वोत्तम वेळ शोधण्‍यासाठी त्‍यांच्‍याशी चर्चा करण्‍याचा विचार करा. +जर आपली विचारायला आहे की आपला स्टेक विभागांत काढण्याच्या किंवा मल्टिपल व्यवहारांत काढण्याच्या योजना आहे, तरीही आपल्याला नेहमीच समान लाभार्थ्य पत्ता स्पष्टपणे सूचित करायचं आवश्यक आहे. -## मी सध्या ज्या इंडेक्सरला नियुक्त करतो तो आर्बिट्रम वन वर नसल्यास काय होईल? +सूचना: आपल्याला पहिल्यांदा ट्रान्सफर टूल वापरण्याच्या वेळी L2 वरील किमान स्टेक आवश्यकता आहे. इंडेक्सर्सने (ही क्रिया पहिल्यांदा केल्याने) किमान 100 हजार GRT पाठवावं लागेल. L1 वर किंवा त्यातला किमान 100 हजार GRT आणि आपल्या सुनिवादांसह आपल्या उघड्या आवंटनांची कवर करणारी सापडणारी पर्याप्त स्टेक असावी. -L2 हस्तांतरण साधन केवळ तेव्हाच सक्षम केले जाईल जेव्हा तुम्ही नियुक्त केलेल्या इंडेक्सरने त्यांचा स्वतःचा हिस्सा आर्बिट्रमकडे हस्तांतरित केला असेल. +### आर्बिट्रममध्ये माझे स्टेक हस्तांतरण निश्चित करण्यासाठी मला किती वेळ लागेल? -## प्रतिनिधींना दुसर्‍या इंडेक्सरकडे सोपवण्याचा पर्याय आहे का? +\*\*\* आपल्याला आपल्या स्टेकची हस्तांतरण पूर्ण करण्यासाठी आपल्या व्यवहाराची पुष्टी करण्याची आवश्यकता आहे, ही पाने 7 दिवसांमध्ये पूर्ण केली पाहिजे किंवा स्टेक हरून जाऊ शकतो. -जर तुम्हाला दुसर्‍या इंडेक्सरकडे सोपवायचे असेल, तर तुम्ही आर्बिट्रमवर त्याच इंडेक्सरवर हस्तांतरित करू शकता, नंतर अस्वीकृत करा आणि वितळण्याच्या कालावधीची प्रतीक्षा करा. यानंतर, तुम्ही नियुक्त करण्यासाठी दुसरा सक्रिय इंडेक्सर निवडू शकता. +### माझ्याकडे खुले वाटप असल्यास काय? -## मी L2 वर ज्या इंडेक्सरला मी नियुक्त करत आहे तो मला सापडला नाही तर? +जर आपला स्टेक सर्व काढणार नसल्यास, L2 ट्रान्सफर टूल तपासणार आहे की किमान 100 हजार GRT ईथेरियम मेननेटवर अद्याप आहे आणि आपला शिल्लक स्टेक आणि सुनिवाद आपल्या उघड्या आवंटनांची कवर करणारी पर्याप्त आहे किंवा नाही, त्यातल्या न्यायमूलक आवंटने बंद करण्याची आवश्यकता आहे जर आपले GRT शिल्लक किमती + उघड्या आवंटनांची कवर करत नसल्यास. -L2 ट्रान्स्फर टूल आपोआप इंडेक्सर ओळखेल ज्यावर तुम्ही आधी नियुक्त केले होते. +### ट्रांसफर टूल्सचा वापर करताना, हस्तांतरण करण्यापूर्वी 28 दिवसांच्या काळापासून इथेरियम मुख्य नेटवर्कवरून अनस्टेक करणे आवश्यक आहे का? -## मी आधीच्या इंडेक्सरऐवजी नवीन किंवा अनेक इंडेक्सर्समध्ये माझे प्रतिनिधी मिक्स आणि मॅच किंवा 'स्प्रेड' करू शकेन का? +नाही, आपल्याला आपल्या स्थानांतरणासाठी स्थानांतरण उपकरणाचा वापर करण्यापूर्वी स्थानांतरित करण्याची आवश्यकता नाही. 28 दिवसांच्या प्रतिमानात, आपल्याला आपल्य्या वॉलेटवर स्थानांतरित करण्याच्या आवश्यकता आहे, ईथेरियम मुख्यनेट किंवा L2 वर. -L2 हस्तांतरण साधन नेहमी तुमचे प्रतिनिधीत्व त्याच इंडेक्सरवर हलवेल ज्याला तुम्ही पूर्वी नियुक्त केले होते. एकदा तुम्ही L2 वर गेल्यावर, तुम्ही अस्वीकृत करू शकता, वितळण्याच्या कालावधीची प्रतीक्षा करू शकता आणि तुम्हाला तुमच्या प्रतिनिधी मंडळाचे विभाजन करायचे आहे का ते ठरवू शकता. +### स्टेक हस्तांतरणासाठी किती वेळ लागेल? -## मी कूलडाउन कालावधीच्या अधीन आहे किंवा मी L2 प्रतिनिधी हस्तांतरण साधन वापरल्यानंतर लगेच पैसे काढू शकतो? +L2 स्थानांतरण उपकरणाने आपल्याच्या स्थानांतरणाच्या प्रक्रियेच्या पूर्णत्वाकरीता आणखी आधीपासून २० मिनिटे लागणार आहेत. -हस्तांतरण साधन तुम्हाला ताबडतोब L2 वर जाण्याची परवानगी देते. जर तुम्ही अस्वीकृत करू इच्छित असाल तर तुम्हाला वितळण्याच्या कालावधीची प्रतीक्षा करावी लागेल. तथापि, जर इंडेक्सरने त्यांचे सर्व स्टेक L2 मध्ये हस्तांतरित केले असतील, तर तुम्ही Ethereum mainnet वर त्वरित पैसे काढू शकता. +### माझ्या शेअर्स हस्तांतरित करण्यापूर्वी मला Arbitrum वर सूचीबद्ध करण्याची आवश्यकता आहे का? -## मी माझ्या प्रतिनिधींचे हस्तांतरण न केल्यास माझ्या पुरस्कारांवर नकारात्मक परिणाम होऊ शकतो का? +आपल्याला स्वारूपण ठरविण्यापूर्वीच आपले स्टेक प्रभावीपणे स्थानांतरित करू शकता, परंतु L2 वर कोणत्या उत्पादनाची मागणी करण्याची अनुमती नसेल तोंद, ते लागू करण्यास आपल्याला L2 वरील सबग्राफ्सला आवंटन देण्याची, त्यांची सूचीबद्धीकरण करण्याची आणि POIs प्रस्तुत करण्याची आवश्यकता आहे, ते तुम्ही L2 वर कोणत्याही प्रामोड पावण्याच्या पर्यायी नसेल. -असा अंदाज आहे की भविष्यात सर्व नेटवर्क सहभाग आर्बिट्रम वनमध्ये जाईल. +### मी माझा इंडेक्सिंग स्टेक हलवण्यापूर्वी प्रतिनिधी त्यांचे प्रतिनिधी हलवू शकतात का? -## माझ्या प्रतिनिधी मंडळाचे L2 मध्ये हस्तांतरण पूर्ण होण्यासाठी किती वेळ लागेल? +नाही, सुचवलेल्या Delegators ला आपल्या हस्तांतरित GRT ला Arbitrum वर त्यांच्या ध्येयकांना सहमती आहे, त्यासाठी त्यांच्याकडून L2 वर सक्रिय असणार्या अनुक्रमणिकेला आवश्यक आहे. -शिष्टमंडळ हस्तांतरणासाठी 20-मिनिटांची पुष्टी आवश्यक आहे. कृपया लक्षात ठेवा की 20-मिनिटांच्या कालावधीनंतर, तुम्ही परत यावे आणि 7 दिवसांच्या आत हस्तांतरण प्रक्रियेची 3 पायरी पूर्ण केली पाहिजे. आपण हे करण्यात अयशस्वी झाल्यास, आपले प्रतिनिधीत्व गमावले जाऊ शकते. लक्षात ठेवा की बहुतेक प्रकरणांमध्ये हस्तांतरण साधन तुमच्यासाठी ही पायरी आपोआप पूर्ण करेल. अयशस्वी स्वयं-प्रयत्नाच्या बाबतीत, तुम्हाला ते व्यक्तिचलितपणे पूर्ण करावे लागेल. या प्रक्रियेदरम्यान कोणतीही समस्या उद्भवल्यास, काळजी करू नका, आम्ही मदतीसाठी येथे आहोत: आमच्याशी support@thegraph.com किंवा [Discord](https://discord.gg/graphprotocol) वर संपर्क साधा. +### मी GRT वेस्टिंग कॉन्ट्रॅक्ट/टोकन लॉक वॉलेट वापरत असल्यास मी माझा स्टेक ट्रान्सफर करू शकतो का? -## मी GRT वेस्टिंग कॉन्ट्रॅक्ट/टोकन लॉक वॉलेट वापरत असल्यास मी माझे प्रतिनिधीत्व हस्तांतरित करू शकतो का? +होय! प्रक्रिया काही वेगवेगळी आहे, कारण वेस्टिंग कॉन्ट्रॅक्ट्स L2 गॅससाठी आवश्यक असलेले ETH प्रायोगिक नही जाऊ शकतात, म्हणजे आपल्याला त्यापूर्वीक ठेवावं आवश्यक आहे. आपल्या वेस्टिंग कॉन्ट्रॅक्ट आपल्याला पूर्णत: वेस्टेड नसल्यास, आपल्याला पहिल्यांदाच L2 वरील वेस्टिंग कॉन्ट्रॅक्टवर आपल्या वेस्टिंग कॉन्ट्रॅक्टला ट्रांसफर करण्याची आवश्यकता आहे आणि तुम्हाला हे L2 वेस्टिंग कॉन्ट्रॅक्ट सुरू करण्याची आवश्यकता आहे. Explorer वरील UI तुमच्याला हे प्रक्रियेत नेत्रुत्व करण्याच्या प्रक्रियेत मार्गदर्शन करू शकतो, जेथे आपल्याला वेस्टिंग लॉक वॉलेट वापरून Explorer ला कनेक्ट केल्यास. -होय! प्रक्रिया थोडी वेगळी आहे कारण वेस्टिंग कॉन्ट्रॅक्ट्स L2 गॅससाठी पैसे भरण्यासाठी आवश्यक असलेला ETH फॉरवर्ड करू शकत नाही, म्हणून तुम्हाला ते आधी जमा करणे आवश्यक आहे. जर तुमचा वेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे निहित नसेल, तर तुम्हाला प्रथम L2 वर काउंटरपार्ट व्हेस्टिंग कॉन्ट्रॅक्ट देखील सुरू करावे लागेल आणि केवळ या L2 व्हेस्टिंग कॉन्ट्रॅक्टमध्ये प्रतिनिधी हस्तांतरित करू शकाल. जेव्हा तुम्ही वेस्टिंग लॉक वॉलेट वापरून एक्सप्लोररशी कनेक्ट केले असेल तेव्हा एक्सप्लोररवरील UI तुम्हाला या प्रक्रियेद्वारे मार्गदर्शन करू शकते. +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -## काही डेलिगेशन कर आहे का? +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -क्र. L2 वर प्राप्त टोकन्स निर्दिष्ट प्रतिनिधीच्या वतीने निर्दिष्ट निर्देशांकाला प्रतिनिधी कर आकारल्याशिवाय दिले जातात. +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? + +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## वेस्टिंग कॉन्ट्रॅक्ट ट्रान्सफर -## मी माझा वेस्टिंग करार कसा हस्तांतरित करू? +### मी माझा वेस्टिंग करार कसा हस्तांतरित करू? -तुमची वेस्टिंग हस्तांतरित करण्यासाठी, तुम्हाला खालील पायऱ्या पूर्ण कराव्या लागतील: +तुमच्या वेस्टिंगची हस्तांतरण करण्यासाठी, तुम्हाला खालील प्रक्रिया पूर्ण करण्याची आवश्यकता आहे: -1. इथरियम मेननेटवर व्हेस्टिंग ट्रान्सफर सुरू करा +1. ईथेरियम मेननेटवर वेस्टिंग हस्तांतरण सुरू करा 2. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा -3. आर्बिट्रमवर वेस्टिंग ट्रान्सफरची पुष्टी करा +3. आर्बिट्रमवर वेस्टिंग ट्रांस्फरची पुष्टी करा + +### मी केवळ अंशतः निहित असल्यास माझा वेस्टिंग करार कसा हस्तांतरित करू? -## मी केवळ अंशतः निहित असल्यास माझा वेस्टिंग करार कसा हस्तांतरित करू? + -1. हस्तांतरण साधन करारामध्ये काही ETH जमा करा (UI वाजवी रकमेचा अंदाज लावण्यास मदत करू शकते) +1. हस्तांतरण साधन संविदाच्या एकांत्रीत थेट ETH जमा करा (UI एक उचित मात्रा आकलन करण्यात मदत करू शकते) -2. L2 वेस्टिंग लॉक सुरू करण्यासाठी L2 ला ट्रान्सफर टूल कॉन्ट्रॅक्टद्वारे काही लॉक केलेले GRT पाठवा. हे त्यांचे L2 लाभार्थी पत्ता देखील सेट करेल. +2. लॉक्ड GRT किंवा बंद झालेल्या GRTला स्थानांतरण उपकरण कॉन्ट्रॅक्टद्वारे L2ला पाठवा, ल2ला स्थानांतरित करण्यासाठी. ही प्रक्रिया त्यांना त्याच्या L2 वेस्टिंग लॉक ची सुरुवात करण्याचीची पर्याय देतेल. हे त्याच्या L2 लाभार्थ्याच्या पत्त्यासहही सेट करेल. -3. L1Staking करारातील "लॉक्ड" ट्रान्सफर टूल फंक्शनद्वारे त्यांचे स्टेक/डेलिगेशन L2 ला पाठवा. +3. त्यांनी आपल्या स्थानिकीकरण/अधिग्रहाची धार लॉक केल्याशिवाय, "लॉक्ड" हस्तांतरण साधने L1Staking कर्मचेरीतील कार्य करून L2ला पाठवा. -4. ट्रान्स्फर टूल कॉन्ट्रॅक्टमधून उर्वरित ETH मागे घ्या +4. कोणत्याही शिल्लक ETH बाकी आहे त्याच्या ट्रांस्फर टूल कॉन्ट्रॅक्टमधून विचला -## मी पूर्ण निहित असल्यास माझा वेस्टिंग करार कसा हस्तांतरित करू? +### मी पूर्ण निहित असल्यास माझा वेस्टिंग करार कसा हस्तांतरित करू? + + पूर्णपणे निहित असलेल्यांसाठी, प्रक्रिया समान आहे: -1. हस्तांतरण साधन करारामध्ये काही ETH जमा करा (UI वाजवी रकमेचा अंदाज लावण्यास मदत करू शकते) +1. हस्तांतरण साधन संविदाच्या एकांत्रीत थेट ETH जमा करा (UI एक उचित मात्रा आकलन करण्यात मदत करू शकते) -2. ट्रान्सफर टूल कॉन्ट्रॅक्टला कॉल करून तुमचा L2 पत्ता सेट करा +2. तुमचा L2 पत्ता ट्रांस्फर टूल कॉन्ट्रॅक्टला कॉल करून सेट करा -3. L1 स्टॅकिंग कॉन्ट्रॅक्टमधील "लॉक्ड" ट्रान्सफर टूल फंक्शनद्वारे तुमचा स्टेक/डेलिगेशन L2 ला पाठवा. +3. आपली शेअर/प्रतिनियुक्ती "लॉक" केलेल्या हस्तांतरण साधन फंक्शन्सद्वारे L1 Staking संविदातील L2 वर पाठवा. -4. ट्रान्स्फर टूल कॉन्ट्रॅक्टमधून उर्वरित ETH मागे घ्या +4. कोणत्याही शिल्लक ETH बाकी आहे त्याच्या ट्रांस्फर टूल कॉन्ट्रॅक्टमधून विचला -## मी माझा वेस्टिंग कॉन्ट्रॅक्ट आर्बिट्रमकडे हस्तांतरित करू शकतो का? +### मी माझा वेस्टिंग कॉन्ट्रॅक्ट आर्बिट्रमकडे हस्तांतरित करू शकतो का? -तुम्ही तुमच्या व्हेस्टिंग कॉन्ट्रॅक्टची GRT शिल्लक L2 मधील व्हेस्टिंग कॉन्ट्रॅक्टमध्ये ट्रान्सफर करू शकता. तुमच्या व्हेस्टिंग कॉन्ट्रॅक्टमधून L2 मध्ये स्टेक किंवा डेलिगेशन हस्तांतरित करण्यासाठी ही एक पूर्व शर्त आहे. वेस्टिंग कॉन्ट्रॅक्टमध्ये GRT ची शून्य रक्कम असणे आवश्यक आहे (आवश्यक असल्यास तुम्ही 1 GRT सारखी छोटी रक्कम हस्तांतरित करू शकता). +तुम्ही आपल्या वेस्टिंग कॉन्ट्रॅक्टच्या GRT शिल्लकाची L2 वरील एक वेस्टिंग कॉन्ट्रॅक्टला स्थानांतरित करू शकता. हे आपल्याला आपल्या वेस्टिंग कॉन्ट्रॅक्टकिंवा L2 वरील आपल्या स्थानांतरण किंवा प्रतिनियुक्तीसाठी अपायरीक्षक आहे. वेस्टिंग कॉन्ट्रॅक्टला एक अवैध GRT अकाउंट ठेवणे आवश्यक आहे (जर आवश्यक असल्यास, तुम्ही त्याच्याकडून 1 GRT असे एक सारखे किंवा त्याच्यापुढे स्थानांतरित करू शकता). -जेव्हा तुम्ही तुमच्या L1 व्हेस्टिंग कॉन्ट्रॅक्टमधून L2 मध्ये GRT हस्तांतरित करता, तेव्हा तुम्ही पाठवायची रक्कम निवडू शकता आणि तुम्हाला पाहिजे तितक्या वेळा तुम्ही हे करू शकता. तुम्ही पहिल्यांदा GRT हस्तांतरित करता तेव्हा L2 वेस्टिंग करार सुरू केला जाईल. +तुम्ही आपल्या L1 वेस्टिंग कॉन्ट्रॅक्टमधून L2 ला GRT स्थानांतरित करता, तुम्ही पाठविण्याच्या रक्कम निवडू शकता आणि तुम्हाला तुमच्याकडून जेव्हा आवश्यक असेल त्या अनेकदमी वेळा हे करू शकता. L2 वेस्टिंग कॉन्ट्रॅक्ट पहिल्यांदाच आपल्याला GRT स्थानांतरित करताना प्रारंभ केला जाईल. -ट्रान्सफर टूल वापरून हस्तांतरण केले जाते जे तुम्ही वेस्टिंग कॉन्ट्रॅक्ट खात्याशी कनेक्ट केल्यावर तुमच्या एक्सप्लोरर प्रोफाइलवर दिसेल. +हे स्थानांतरण वेस्टिंग कॉन्ट्रॅक्ट खात्याच्या संपर्क केल्याने दिलेल्या ट्रॅन्स्फर टूलच्या माध्यमाने केले जातात. जेव्हा आपल्याला वेस्टिंग कॉन्ट्रॅक्ट खात्याच्या संपर्काने कनेक्ट करता, तेव्हा आपल्या एक्सप्लोरर प्रोफाइलवर दिलेल्या ट्रॅन्स्फर टूलचा दृश्य होईल. -कृपया लक्षात घ्या की तुमचा करार पूर्णपणे निहित झाल्यावर तुमची वेस्टिंग टाइमलाइन संपेपर्यंत तुम्ही L2 व्हेस्टिंग कॉन्ट्रॅक्टमधून GRT रिलीझ/माघार घेऊ शकणार नाही. त्याआधी तुम्हाला GRT रिलीझ करायची असल्यास, तुम्ही GRT परत L1 वेस्टिंग कॉन्ट्रॅक्टमध्ये हस्तांतरित करू शकता जे त्या उद्देशासाठी उपलब्ध असलेले दुसरे हस्तांतरण साधन वापरून. +कृपया लक्षात घ्या कि आपल्याला आपल्य्या वेस्टिंग वेळापत्रिकेच्या शेवटी त्याच्या वेस्टेड असल्यावर तुम्ही L2 वेस्टिंग कॉन्ट्रॅक्टमधून GRT मोजण्याची किंवा विथ्ड्रॉ करण्याची क्षमता नसेल. जर आपल्याला त्यापूर्वी ग्रीटी छोडण्याची आवश्यकता असली तर आपण त्या उद्देशाने उपलब्ध आहे त्यासाठी इतर एक ट्रॅन्स्फर टूलचा वापर करू शकता, ज्याचा त्याच्यासाठी उपलब्ध आहे. -तुम्ही कोणतेही वेस्टिंग कॉन्ट्रॅक्ट बॅलन्स L2 मध्ये हस्तांतरित केले नसेल आणि तुमचा व्हेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे निहित असेल, तर तुम्ही तुमचा व्हेस्टिंग कॉन्ट्रॅक्ट L2 मध्ये हस्तांतरित करू नये. त्याऐवजी, तुम्ही L2 वॉलेट पत्ता सेट करण्यासाठी हस्तांतरण साधने वापरू शकता आणि L2 वर या नियमित वॉलेटमध्ये तुमचा हिस्सा किंवा प्रतिनिधी थेट हस्तांतरित करू शकता. +जर आपल्याला वेस्टिंग कॉन्ट्रॅक्टच्या शिल्लकाची L2 वर कोणतीही अकाउंट स्थानांतरित केली नाही आणि आपला वेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे वेस्टेड आहे, तर आपल्याला आपल्या वेस्टिंग कॉन्ट्रॅक्टला L2 वर स्थानांतरित करू नये. परंतु, तुम्ही आपल्याला L2 वरील एक सामान्य वॉलेटवर प्रतिनियुक्ती किंवा स्थानांतरण स्थापित करण्यासाठी ट्रॅन्सफर टूल्स वापरू शकता, आणि या साधारण वॉलेटवर तुमची स्थानांतरण किंवा प्रतिनियुक्ती सीधे L2 वर स्थानांतरित करू शकता. -## मी मेननेटवर स्टेक करण्यासाठी माझा वेस्टिंग कॉन्ट्रॅक्ट वापरत आहे. मी माझा हिस्सा आर्बिट्रमकडे हस्तांतरित करू शकतो का? +### मी मेननेटवर स्टेक करण्यासाठी माझा वेस्टिंग कॉन्ट्रॅक्ट वापरत आहे. मी माझा हिस्सा आर्बिट्रमकडे हस्तांतरित करू शकतो का? -होय, परंतु जर तुमचा करार अजूनही निहित असेल, तर तुम्ही फक्त स्टेक हस्तांतरित करू शकता जेणेकरून ते तुमच्या L2 व्हेस्टिंग कराराच्या मालकीचे असेल. एक्सप्लोररवरील व्हेस्टिंग कॉन्ट्रॅक्ट ट्रान्सफर टूल वापरून काही GRT शिल्लक हस्तांतरित करून तुम्ही प्रथम हा L2 करार सुरू करणे आवश्यक आहे. जर तुमचा करार पूर्णपणे निहित असेल, तर तुम्ही तुमचा स्टेक L2 मधील कोणत्याही पत्त्यावर हस्तांतरित करू शकता, परंतु तुम्ही ते आधीच सेट केले पाहिजे आणि L2 गॅसचे पैसे देण्यासाठी L2 हस्तांतरण साधनासाठी काही ETH जमा केले पाहिजेत. +होय, परंतु जर आपल्या कॉन्ट्रॅक्टला अद्याप वेस्टिंग आहे, तर आपण केवळ त्याच्याकडून स्थानांतरण करू शकता, ज्याची स्वामित्व आपल्या L2 वेस्टिंग कॉन्ट्रॅक्टच्या आहे. तुम्हाला सध्याच्या प्रक्रियेकरीता हे L2 कॉन्ट्रॅक्ट आपल्याला Explorer वरील वेस्टिंग कॉन्ट्रॅक्ट ट्रांसफर टूलच्या मदतीने केले पाहिजे. जर आपल्या कॉन्ट्रॅक्टला पूर्णपणे वेस्टेड आहे, तर तुम्ही तुमच्या L2 वरील किंवा किमान ETH ला L2 गॅससाठी भरून त्याच्यापुढे स्थापित करावं आवश्यक आहे आणि आपल्याला त्याच्याकडून स्थानांतरित करायला आवश्यक आहे. -## मी मेननेटवर प्रतिनिधी करण्यासाठी माझा वेस्टिंग कॉन्ट्रॅक्ट वापरत आहे. मी माझे प्रतिनिधी मंडळ आर्बिट्रमकडे हस्तांतरित करू शकतो का? +### मी मेननेटवर प्रतिनिधी करण्यासाठी माझा वेस्टिंग कॉन्ट्रॅक्ट वापरत आहे. मी माझे प्रतिनिधी मंडळ आर्बिट्रमकडे हस्तांतरित करू शकतो का? -होय, परंतु तुमचा करार अद्यापही निहित असल्यास, तुम्ही केवळ प्रतिनिधी हस्तांतरित करू शकता जेणेकरून ते तुमच्या L2 व्हेस्टिंग कराराच्या मालकीचे असेल. एक्सप्लोररवरील व्हेस्टिंग कॉन्ट्रॅक्ट ट्रान्सफर टूल वापरून काही GRT शिल्लक हस्तांतरित करून तुम्ही प्रथम हा L2 करार सुरू करणे आवश्यक आहे. जर तुमचा करार पूर्णपणे निहित असेल, तर तुम्ही तुमचे प्रतिनिधीमंडळ L2 मधील कोणत्याही पत्त्यावर हस्तांतरित करू शकता, परंतु तुम्ही ते आधीच सेट केले पाहिजे आणि L2 गॅसचे पैसे देण्यासाठी L2 हस्तांतरण साधनासाठी काही ETH जमा करणे आवश्यक आहे. +होय, परंतु जर आपल्याच्या कॉन्ट्रॅक्टला अद्याप वेस्टिंग आहे, तर आपण केवळ त्याच्याकडून प्रतिनियुक्ती स्थानांतरित करू शकता, ज्याची स्वामित्व आपल्या L2 वेस्टिंग कॉन्ट्रॅक्टच्या आहे. तुम्हाला सध्याच्या प्रक्रियेकरीता हे L2 कॉन्ट्रॅक्ट आपल्याला Explorer वरील वेस्टिंग कॉन्ट्रॅक्ट ट्रांसफर टूलच्या मदतीने केले पाहिजे. जर आपल्या कॉन्ट्रॅक्टला पूर्णपणे वेस्टेड आहे, तर तुम्ही तुमच्या L2 वरील किंवा किमान ETH ला L2 गॅससाठी भरून त्याच्यापुढे स्थापित करावं आवश्यक आहे आणि आपल्याला त्याच्याकडून स्थानांतरित करायला आवश्यक आहे. -## L2 वर माझ्या वेस्टिंग करारासाठी मी वेगळा लाभार्थी निर्दिष्ट करू शकतो का? +### L2 वर माझ्या वेस्टिंग करारासाठी मी वेगळा लाभार्थी निर्दिष्ट करू शकतो का? -होय, जेव्हा तुम्ही पहिल्यांदा शिल्लक हस्तांतरित करता आणि तुमचा L2 वेस्टिंग करार सेट करता तेव्हा तुम्ही L2 लाभार्थी निर्दिष्ट करू शकता. हे लाभार्थी एक वॉलेट आहे जे आर्बिट्रम वन वर व्यवहार करू शकते याची खात्री करा, म्हणजे तो एक EOA किंवा आर्बिट्रम वन वर तैनात केलेला मल्टीसिग असावा. +होय, पहिल्यांदाच आपल्याच्या शिल्लकाच्या स्थानांतरित करण्याच्या वेळी आपल्याला आपल्या L2 वेस्टिंग कॉन्ट्रॅक्टमध्ये बॅनेफिशरचा स्पष्ट उल्लेख करू शकता. कृपया खाते निर्वाचित केला पाहिजे आणि खाता Arbitrum One वर व्यवहार करू शकतो, अर्थात ते एक EOA किंवा Arbitrum One वर डिप्लॉय केलेल्या मल्टीसिगच्या एक महत्त्वाच्या काम करू शकतो. -जर तुमचा करार पूर्णपणे निहित असेल, तर तुम्ही L2 वर व्हेस्टिंग कॉन्ट्रॅक्ट सेट करणार नाही; त्याऐवजी, तुम्ही L2 वॉलेट पत्ता सेट कराल आणि आर्बिट्रमवरील तुमच्या स्टेक किंवा डेलिगेशनसाठी हे प्राप्त करणारे वॉलेट असेल. +जर आपल्या कॉन्ट्रॅक्टला पूर्णपणे वेस्टेड आहे, तर आपल्याला L2 वर वेस्टिंग कॉन्ट्रॅक्ट स्थापित करायला हवाच नाही; त्यामुळे, आपल्याला एल 2 वॉलेट पत्ता सेट करायला हवा आणि त्याचं अर्बिट्रमवर आपल्या स्टेक किंवा प्रतिनियुक्तीसाठी प्राप्त केलेल्या वॉलेट होईल. -## माझा करार पूर्णपणे निहित आहे. मी माझा स्टेक किंवा डेलिगेशन L2 वेस्टिंग कॉन्ट्रॅक्ट नसलेल्या दुसर्‍या पत्त्यावर हस्तांतरित करू शकतो का? +### माझा करार पूर्णपणे निहित आहे. मी माझा स्टेक किंवा डेलिगेशन L2 वेस्टिंग कॉन्ट्रॅक्ट नसलेल्या दुसर्‍या पत्त्यावर हस्तांतरित करू शकतो का? -होय. तुम्ही कोणतेही वेस्टिंग कॉन्ट्रॅक्ट बॅलन्स L2 मध्ये हस्तांतरित केले नसेल आणि तुमचा व्हेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे निहित असेल, तर तुम्ही तुमचा व्हेस्टिंग कॉन्ट्रॅक्ट L2 मध्ये हस्तांतरित करू नये. त्याऐवजी, तुम्ही L2 वॉलेट पत्ता सेट करण्यासाठी हस्तांतरण साधने वापरू शकता आणि L2 वर या नियमित वॉलेटमध्ये तुमचा हिस्सा किंवा प्रतिनिधी थेट हस्तांतरित करू शकता. +होय. जर आपल्याला कोणतेही वेस्टिंग कॉन्ट्रॅक्ट शिल्लक L2 वर स्थानांतरित केलेले नाहीत आणि आपल्याचा वेस्टिंग कॉन्ट्रॅक्ट पूर्णपणे वेस्टेड आहे, तर आपल्याला आपल्या वेस्टिंग कॉन्ट्रॅक्टच्या L2 वर स्थानांतरित करू नये. परंतु, तुम्ही त्यापूर्वी L2 वरील एक साधारण वॉलेट पत्ता सेट करण्याच्या ट्रॅन्सफर टूल्स वापरून स्थानांतरण करू शकता, आणि या साधारण वॉलेटवर तुमची स्थानांतरण किंवा प्रतिनियुक्ती सीधे L2 वर स्थानांतरित करू शकता. -हे तुम्हाला तुमचा स्टेक किंवा डेलिगेशन कोणत्याही L2 पत्त्यावर हस्तांतरित करण्यास अनुमती देते. +हे आपल्याला आपल्या स्टेक किंवा डिलिगेशनला कोणत्याही L2 पत्त्यावर हस्तांतरित करण्याची परवानगी देते. -## माझा व्हेस्टिंग कॉन्ट्रॅक्ट अजूनही व्हेस्टिंग आहे. मी माझी व्हेस्टिंग कॉन्ट्रॅक्ट शिल्लक L2 मध्ये कशी हस्तांतरित करू? +### माझ्या वेस्टिंग करारपत्राची किंमत अद्याप वेस्टिंग होत आहे. माझ्या वेस्टिंग करारपत्राची शिल्लक शिल्लक कसे L2 वर हस्तांतरित करू शकतो? -जर तुमचा करार अजूनही निहित असेल किंवा तुमचा करार अजूनही निहित होता तेव्हा तुम्ही ही प्रक्रिया वापरली असेल तरच या पायऱ्या लागू होतात. +या क्रियांचे अर्ज केवळ तेव्हा लागू होतात जेव्हा आपला करार अजिबात दिलेला आहे, किंवा जेव्हा आपल्याला आपल्या कराराची वेस्टिंग सुरू आहे तेव्हा पुन्हा या प्रक्रियेचा वापर केला आहे. -तुमचा व्हेस्टिंग कॉन्ट्रॅक्ट L2 मध्ये ट्रान्सफर करण्यासाठी, तुम्ही ट्रान्सफर टूल्स वापरून कोणतीही GRT बॅलन्स L2 ला पाठवाल, ज्यामुळे तुमचा L2 व्हेस्टिंग कॉन्ट्रॅक्ट सुरू होईल: +तुमच्या वेस्टिंग करारपत्राचा L2 वर हस्तांतर करण्यासाठी, तुम्ही हस्तांतरण साधने वापरून लोटसपूर्ण L2 वर GRT शिल्लक हस्तांतरित करणार, ज्यामुळे तुमचा L2 वेस्टिंग करारपत्र प्रारंभ होईल: -1. ट्रान्सफर टूल कॉन्ट्रॅक्टमध्ये काही ETH जमा करा (हे L2 गॅससाठी पैसे देण्यासाठी वापरले जाईल) +1. हस्तांतरण साधनात ETH किंमती जमा करा (ह्यामुळे L2 गॅससाठी पैसे देण्यात येईल) -2. व्हेस्टिंग कॉन्ट्रॅक्टचा प्रोटोकॉल प्रवेश रद्द करा (पुढील चरणासाठी आवश्यक) +2. प्रोटोकॉलला वेस्टिंग करारक्षमतेसाठी प्रवेश सुरक्षित करा (पुढील पायर्या साठी आवश्यक) -3. व्हेस्टिंग कॉन्ट्रॅक्टला प्रोटोकॉल ऍक्सेस द्या (तुमच्या कॉन्ट्रॅक्टला ट्रान्सफर टूलशी संवाद साधण्याची अनुमती देईल) +3. करारपत्राच्या प्रोटोकॉलला प्रवेश परवानगी द्या (ह्यामुळे आपल्या करारपत्राने हस्तांतरण साधने संवाद साधू शकतील) -4. L2 लाभार्थीचा पत्ता निर्दिष्ट करा\* आणि Ethereum mainnet वर शिल्लक हस्तांतरण सुरू करा +4. L2 लाभार्थ्याचा पत्ता सूचीत करा\* आणि इथे Ethereum मुख्यनेटवर शिल्लक हस्तांतरण प्रारंभ करा 5. पुष्टीकरणासाठी 20 मिनिटे प्रतीक्षा करा 6. L2 वर शिल्लक हस्तांतरणाची पुष्टी करा -\*आवश्यक असल्यास - म्हणजे तुम्ही कराराचा पत्ता वापरत आहात. +\*आवश्यक असल्यास - उदा. तुम्ही एक कॉन्ट्रॅक्ट पत्ता वापरत आहात. + +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. -\*\*\*\*तुम्ही आर्बिट्रमवर शिल्लक हस्तांतरण पूर्ण करण्यासाठी तुमच्या व्यवहाराची पुष्टी करणे आवश्यक आहे. ही पायरी ७ दिवसांच्या आत पूर्ण करणे आवश्यक आहे अन्यथा शिल्लक गमावली जाऊ शकते. बर्‍याच प्रकरणांमध्ये, ही पायरी आपोआप चालेल, परंतु आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास मॅन्युअल पुष्टीकरण आवश्यक असू शकते. या प्रक्रियेदरम्यान काही समस्या असल्यास, मदतीसाठी संसाधने असतील: support@thegraph.com वर किंवा [Discord](https://discord.gg/graphprotocol) वर समर्थनाशी संपर्क साधा. +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. -## मी माझा वेस्टिंग कॉन्ट्रॅक्ट परत L1 वर हलवू शकतो का? +### मी माझा वेस्टिंग कॉन्ट्रॅक्ट परत L1 वर हलवू शकतो का? -असे करण्याची गरज नाही कारण तुमचा वेस्टिंग कॉन्ट्रॅक्ट अजूनही L1 मध्ये आहे. जेव्हा तुम्ही हस्तांतरण साधने वापरता, तेव्हा तुम्ही फक्त L2 मध्ये एक नवीन करार तयार करता जो तुमच्या L1 व्हेस्टिंग कॉन्ट्रॅक्टशी जोडलेला असतो आणि तुम्ही या दोघांमध्ये GRT पाठवू शकता. +तुम्हाला तो करावयाचं आवश्यक नाही कारण आपला वेस्टिंग कॉन्ट्रॅक्ट अद्याप L1 वर आहे. जेव्हा आपण ट्रॅन्सफर टूल्स वापरता, तेव्हा आपल्याला फक्त आपल्या L1 वेस्टिंग कॉन्ट्रॅक्टसह संबंधित असलेल्या L2 ला एक नवीन कॉन्ट्रॅक्ट तयार करावा लागतो, आणि तुम्ही त्याच्याकडून GRT दोन्हेकऱ्यांच्या वर्गाच्या किंवा लक्षात घेण्याच्या किंवा त्याच्यामध्ये पाठवू शकता. -## मला माझ्या व्हेस्टिंग कॉन्ट्रॅक्टला सुरुवात करण्यासाठी हलवण्याची गरज का आहे? +### प्रारंभ करण्याच्या संदर्भात माझ्या वेस्टिंग करारक्षमतेची हातभार का आवश्यक आहे? -तुम्हाला L2 वेस्टिंग कॉन्ट्रॅक्ट सेट अप करणे आवश्यक आहे जेणेकरून हे खाते L2 वर तुमचा स्टेक किंवा डेलिगेशन घेऊ शकेल. अन्यथा, तुमचा हिस्सा/प्रतिनिधी L2 कडे हस्तांतरित करण्याचा कोणताही मार्ग वेस्टिंग करारापासून "पळून" न जाता. +तुम्हाला त्याच्याकडून स्थानांतरित करायला, आपल्याला तुमचा L2 वेस्टिंग कॉन्ट्रॅक्ट स्थापित करायला हवं आणि या खात्याच्या माध्यमाने आपल्याला L2 वर आपली स्थानांतरण किंवा प्रतिनियुक्ती असण्याची किंमत वाढवू लागेल. अन्यथा, तुमच्या वेस्टिंग कॉन्ट्रॅक्टला "अच्छादित" करण्याचा कोणताही मार्ग नसेल. -## माझे करार अंशतः निहित असताना मी पैसे काढण्याचा प्रयत्न केल्यास काय होईल? हे शक्य आहे का? +### पूर्णपणे वेस्टेड नसलेल्या काराराची आपल्याला पुरस्कृत केल्यास ती काय होईल? कोणत्या प्रकारे काढू शकता? -ही शक्यता नाही. तुम्ही निधी परत L1 मध्ये हलवू शकता आणि ते तेथे काढू शकता. +हे संभाव्य नाही. तुम्ही धन वापस L1वर पाठवू शकता आणि त्यावर ते काढू शकता. -## मला माझा वेस्टिंग कॉन्ट्रॅक्ट L2 वर हलवायचा नसेल तर? +### मला माझा वेस्टिंग कॉन्ट्रॅक्ट L2 वर हलवायचा नसेल तर? -तुम्ही L1 वर स्टॅकिंग/डेलिगेट करत राहू शकता. कालांतराने, तुम्ही Arbitrum वर प्रोटोकॉल स्केल म्हणून रिवॉर्ड्स सक्षम करण्यासाठी L2 वर जाण्याचा विचार करू शकता. लक्षात घ्या की ही हस्तांतरण साधने व्हेस्टिंग कॉन्ट्रॅक्ट्ससाठी आहेत ज्यांना प्रोटोकॉलमध्ये भाग घेण्याची आणि नियुक्त करण्याची परवानगी आहे. जर तुमचा करार स्टॅकिंग किंवा डेलिगेटिंगला परवानगी देत नाही किंवा रद्द करण्यायोग्य असेल, तर कोणतेही हस्तांतरण साधन उपलब्ध नाही. उपलब्ध असताना तुम्ही तुमचा GRT L1 वरून काढू शकता. +तुम्ही L1 वर स्टेकिंग/डेलीगेटिंग केल्यामुळे ठेवू शकता. वेळेच्या वेळी, तुम्ही अर्बिट्रमवर प्रोटोकॉलला मोजण्यासाठी L2 ला स्थानांतरित करण्याची लक्षात घेऊ शकता, कारण प्रोटोकॉल अर्बिट्रमवर स्थानिक आसापास वाढत आहे. नक्की करा की या स्थानांतरण टूल्स वेस्टिंग कॉन्ट्रॅक्टसाठी आहेत ज्यामध्ये प्रोटोकॉलमध्ये स्टेकिंग किंवा डेलीगेटिंग करण्याची परवानगी आहे. जर आपल्या कॉन्ट्रॅक्टला स्टेकिंग किंवा डेलीगेटिंग करण्याची किंमत नसली तर न येथे नक्की ट्रांसफर टूल उपलब्ध आहे. तुम्हाला आपल्या लाभल्यास तुम्ही L1 वरून तुमच्या GRT अचलीस करू शकता. diff --git a/website/pages/mr/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/mr/arbitrum/l2-transfer-tools-guide.mdx index 2e5be6b4752d..7bc557e33b06 100644 --- a/website/pages/mr/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/mr/arbitrum/l2-transfer-tools-guide.mdx @@ -1,50 +1,50 @@ --- -title: L2 हस्तांतरण साधने मार्गदर्शक +title: L2 Transfer Tools Guide --- -> L2 ट्रान्सफर टूल्स अजून रिलीझ झालेले नाहीत. ते 2023 च्या उन्हाळ्यात उपलब्ध होतील अशी अपेक्षा आहे. - ग्राफने Arbitrum One वर L2 वर जाणे सोपे केले आहे. प्रत्येक प्रोटोकॉल सहभागीसाठी, सर्व नेटवर्क सहभागींसाठी L2 मध्ये हस्तांतरण अखंडपणे करण्यासाठी L2 हस्तांतरण साधनांचा संच आहे. तुम्ही काय हस्तांतरित करत आहात त्यानुसार या साधनांसाठी तुम्हाला चरणांच्या विशिष्ट संचाचे अनुसरण करणे आवश्यक आहे. या साधनांबद्दलच्या काही वारंवार प्रश्नांची उत्तरे [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq) मध्ये दिली आहेत. FAQ मध्ये साधने कशी वापरायची, ते कसे कार्य करतात आणि ते वापरताना लक्षात ठेवण्यासारख्या गोष्टींचे सखोल स्पष्टीकरण असते. ## तुमचा सबग्राफ आर्बिट्रम (L2) वर कसा हस्तांतरित करायचा + + ## तुमचे सबग्राफ हस्तांतरित करण्याचे फायदे -ग्राफचा समुदाय आणि मुख्य devs गेल्या वर्षभरात आर्बिट्रममध्ये जाण्यासाठी [तयारी करत आहेत](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). आर्बिट्रम, एक लेयर 2 किंवा "L2" ब्लॉकचेन, इथरियमकडून सुरक्षितता वारशाने मिळवते परंतु गॅसचे शुल्क खूपच कमी करते. +मागील वर्षापासून, The Graph चे समुदाय आणि मुख्य डेव्हलपर [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)करीत होते त्याच्या गोष्टीसाठी आर्बिट्रमवर जाण्याची. आर्बिट्रम, एक श्रेणी 2 किंवा "L2" ब्लॉकचेन, ईथेरियमकिडून सुरक्षा अनुभवतो परंतु काही लोअर गॅस फी प्रदान करतो. -जेव्हा आपण आपला सबग्रॅफ द ग्राफ नेटवर्कवर प्रकाशित करता आहात किंवा अद्यतित करता, तेव्हा आपलं स्मार्ट कॉन्ट्रॅक्टस संरचनासाठी प्रोटोकॉलवर अंतरक्रिया करता आहात आणि यासाठी ईथरियमवर गॅस वापरावे लागतं आहे. आपलं सबग्रॅफ आर्बिट्रमवर स्थानांतरित करण्यासारखं त्याचं भविष्यातील कोणताही अद्यतित करण्यासाठी खूप कमी गॅस फी आवश्यक असेल. या कमी फी आणि L2वरील क्युरेशन बॉन्डिंग कर्व्स समतलं असलंयाने, इतर क्युरेटर्सला आपल्या सबग्रॅफवर क्युरेट करणे सोपे होतं, ज्यामुळे आपल्या सबग्रॅफवर इंडेक्सर्सला अधिक पुरस्कार मिळतात. हे खर्चकमी वातावरण इंडेक्सर्सला सबग्रॅफवर इंडेक्स करण्यास आणि सेव करण्यासह खूप सोपं करतं. अर्बिट्रमवर इंडेक्सिंग प्रतिसाद आणि ईथेरियम मेननेटवर कमी होतं जातील, यामुळे अधिक आणि अधिक इंडेक्सर्सला त्यांचा भागांतर करण्याचं व त्यांच्या ऑपरेशन ल2वर सेट करण्याचं जातंय. +जेव्हा तुम्ही आपल्या सबग्राफला The Graph Network वर प्रकाशित किंवा अपग्रेड करता तेव्हा, तुम्ही प्रोटोकॉलवरच्या स्मार्ट कॉन्ट्रॅक्ट्ससोबत संवाद साधता आहात आणि हे ईथ वापरून गॅससाठी पैसे देता येतात. आर्बिट्रमवर तुमच्या सबग्राफला हल्लीक अपडेट्सची आवश्यकता असल्यामुळे आपल्याला खूप कमी गॅस फी परतण्यात आलेली आहे. या कमी फीस, आणि लोअर करण्याची बंद पट आर्बिट्रमवर असल्याचे, तुमच्या सबग्राफवर इतर क्युरेटरसाठी सुविधा असताना तुमच्या सबग्राफवर कुणासही क्युरेशन करणे सोपे होते, आणि तुमच्या सबग्राफवर इंडेक्सरसाठी पुरस्कारांची वाढ होतील. या किमतीसवर्गीय वातावरणात इंडेक्सरसाठी सबग्राफला सूचीबद्ध करणे आणि सेव करणे सोपे होते. आर्बिट्रमवर इंडेक्सिंग पुरस्कारे आणि ईथेरियम मेननेटवर किमतीची वाढ होणारी आहेत, आणि यामुळे अगदी अधिक इंडेक्सरस त्याची स्थानिकता हस्तांतरित करत आहेत आणि त्यांचे ऑपरेशन्स L2 वर स्थापित करत आहेत.". ## सिग्नल, तुमचा L1 सबग्राफ आणि क्वेरी URL सह काय होते हे समजून घेणे -सबग्राफ आर्बिट्रमला हस्तांतरित करताना आर्बिट्रम GRT ब्रिजचा वापर केला जातो, जो पर्यायाने L2 ला सबग्राफ पाठवण्यासाठी मूळ आर्बिट्रम ब्रिज वापरतो. "हस्तांतरण" मेननेटवरील सबग्राफचे अवमूल्यन करेल आणि ब्रिज वापरून L2 वर सबग्राफ पुन्हा तयार करण्यासाठी माहिती पाठवेल. यामध्ये सबग्राफ मालकाचा सिग्नल केलेला GRT देखील समाविष्ट असेल, जे हस्तांतरण स्वीकारण्यासाठी पुलासाठी शून्यापेक्षा जास्त असणे आवश्यक आहे. +सबग्राफला आर्बिट्रमवर हस्तांतरित करण्यासाठी, आर्बिट्रम GRT सेतूक वापरला जातो, ज्याच्या परत आर्बिट्रमच्या मूळ सेतूकाचा वापर केला जातो, सबग्राफला L2 वर पाठवण्यासाठी. "हस्तांतरण" मुख्यनेटवर सबग्राफची वैल्यू कमी करणारा आहे आणि सेतूकाच्या ब्रिजच्या माध्यमातून लॉकल 2 वर सबग्राफ पुन्हा तयार करण्याची माहिती पाठवण्यात आली आहे. त्यामुळे हा "हस्तांतरण" मुख्यनेटवरील सबग्राफला अस्तित्वातून टाकेल आणि त्याची माहिती ब्रिजवार L2 वर पुन्हा तयार करण्यात आली आहे. हस्तांतरणात सबग्राफ मालकाची संकेतित GRT समाविष्ट केली आहे, ज्याची उपसंकेतित GRT मूळ सेतूकाच्या ब्रिजकडून हस्तांतरित करण्यासाठी जास्तीत जास्त शून्यापेक्षा असणे आवश्यक आहे. -तुम्ही सबग्राफ हस्तांतरित करणे निवडता तेव्हा, हे सबग्राफचे सर्व क्युरेशन सिग्नल GRT मध्ये रूपांतरित करेल. हे मेननेटवरील सबग्राफ "नापस्य" करण्यासारखे आहे. तुमच्या क्युरेशनशी संबंधित GRT सबग्राफसह L2 वर पाठवला जाईल, जिथे ते तुमच्या वतीने सिग्नल देण्यासाठी वापरले जातील. +जेव्हा तुम्ही सबग्राफला हस्तांतरित करण्याची निवड करता, हे सबग्राफचे सर्व क्युरेशन सिग्नल GRT मध्ये रूपांतरित होईल. ह्याचे मुख्यनेटवर "अप्रामाणिक" घेण्याच्या अर्थाने आहे. तुमच्या क्युरेशनसह संबंधित GRT सबग्राफसह पाठवली जाईल, त्यामुळे त्यांचा L2 वर पाठवला जाईल, त्यातून त्यांचा नमूद कुंडला तयार केला जाईल. -इतर क्युरेटर निवडू शकतात की त्यांचा GRT चा अंश काढून घ्यायचा किंवा त्याच सबग्राफवरील मिंट सिग्नलला L2 मध्ये हस्तांतरित करा. जर सबग्राफ मालकाने त्यांचा सबग्राफ L2 वर हस्तांतरित केला नाही आणि कॉन्ट्रॅक्ट कॉलद्वारे तो व्यक्तिचलितपणे नापसंत केला, तर क्युरेटर्सना सूचित केले जाईल आणि ते त्यांचे क्युरेशन मागे घेण्यास सक्षम असतील. +इतर क्युरेटरस स्वत: त्यांच्या भागाचा GRT परत घेण्याची किंवा त्याच्या एकल सबग्राफवर त्यांच्या सिग्नल तयार करण्यासाठी हस्तांतरित करण्याची पर्वानगी देऊ शकतात. जर सबग्राफ मालक त्याच्या सबग्राफला L2 वर हस्तांतरित करत नसता आणि त्याच्या कॉन्ट्रॅक्ट कॉलद्वारे मौना करतो, तर क्युरेटरसला सूचना दिली जाईल आणि त्यांना आपल्याच्या क्युरेशनची परवानगी वापरून परत घेतली जाईल. -सबग्राफ हस्तांतरित होताच, सर्व क्युरेशन GRT मध्ये रूपांतरित केल्यामुळे, इंडेक्सर्सना यापुढे सबग्राफ अनुक्रमित करण्यासाठी पुरस्कार मिळणार नाहीत. तथापि, असे इंडेक्सर्स असतील जे 1) ट्रान्सफर केलेले सबग्राफ 24 तास देत राहतील आणि 2) लगेच L2 वर सबग्राफ इंडेक्स करणे सुरू करतील. या इंडेक्सर्सकडे आधीपासून सबग्राफ इंडेक्स केलेला असल्याने, सबग्राफ सिंक होण्याची प्रतीक्षा करण्याची गरज नाही आणि L2 सबग्राफची चौकशी करणे शक्य होईल. +सबग्राफ हस्तांतरित केल्यानंतर, क्युरेशन सर्व GRT मध्ये रूपांतरित केल्यामुळे इंडेक्सरसला सबग्राफच्या इंडेक्सिंगसाठी पुरस्कार मिळवत नाही. परंतु, 24 तासांसाठी हस्तांतरित केलेल्या सबग्राफवर सेवा देणारे इंडेक्सर असतील आणि 2) L2 वर सबग्राफची इंडेक्सिंग प्रारंभ करतील. ह्या इंडेक्सरसांच्या पासून आधीपासूनच सबग्राफची इंडेक्सिंग आहे, म्हणून सबग्राफ सिंक होण्याची वाटचाल नसल्याची आवश्यकता नसून, आणि L2 सबग्राफची क्वेरी करण्यासाठी त्याच्यासाठी वाटचाल नसेल. -L2 सबग्राफसाठी क्वेरी वेगळ्या URL वर करणे आवश्यक आहे (`arbitrum-gateway.thegraph.com` वर), परंतु L1 URL किमान 48 तास काम करत राहील. त्यानंतर, L1 गेटवे L2 गेटवेकडे (काही काळ) क्वेरी फॉरवर्ड करेल, परंतु यामुळे लेटन्सी वाढेल त्यामुळे तुमच्या सर्व क्वेरी शक्य तितक्या लवकर नवीन URL वर स्विच करण्याची शिफारस केली जाते. +L2 सबग्राफला क्वेरीसाठी वेगवेगळे URL वापरण्याची आवश्यकता आहे ('arbitrum-gateway.thegraph.com' वरील), परंतु L1 URL किमान 48 तासांसाठी काम करणार आहे. त्यानंतर, L1 गेटवे वेगवेगळ्या क्वेरीला L2 गेटवेला पुर्वानुमान देईल (काही कालावधीसाठी), परंतु त्यामुळे द्रुतिकरण वाढतो, म्हणजे तुमच्या क्वेरीस सर्व किंवा नवीन URL वर स्विच करणे शक्य आहे. ## तुमचे L2 वॉलेट निवडत आहे -जेव्हा तुम्ही तुमचा सबग्राफ मेननेटवर प्रकाशित केला होता, तेव्हा तुम्ही सबग्राफ तयार करण्यासाठी कनेक्ट केलेले वॉलेट वापरले होते आणि या वॉलेटचे NFT होते जे या सबग्राफचे प्रतिनिधित्व करते आणि तुम्हाला अपडेट प्रकाशित करण्याची परवानगी देते. +तुम्ही तुमच्या सबग्राफची मेननेटवर प्रकाशित केल्यास, तुम्ही सबग्राफ तयार करण्यासाठी एक संयुक्त केलेल्या वॉलेटचा वापर केला होता, आणि हा वॉलेट हा सबग्राफ प्रतिनिधित्व करणारा NFT मिळवतो, आणि तुम्हाला अपडेट प्रकाशित करण्याची परवानगी देतो. सबग्राफ आर्बिट्रममध्ये हस्तांतरित करताना, तुम्ही वेगळे वॉलेट निवडू शकता जे L2 वर या सबग्राफ NFT चे मालक असेल. -तुम्ही मेटामास्क (बाह्य मालकीचे खाते किंवा EOA, म्हणजे स्मार्ट करार नसलेले वॉलेट) सारखे "नियमित" वॉलेट वापरत असल्यास, हे पर्यायी आहे आणि L1 प्रमाणेच मालकाचा पत्ता ठेवण्याची शिफारस केली जाते. +आपल्याला "सामान्य" वॉलेट वापरत आहे किंवा MetaMask (एक बाह्यिकपणे मालकीत खाता किंवा EOA, अर्थात स्मार्ट कॉन्ट्रॅक्ट नसलेला वॉलेट), तर ह्या निवडनीय आहे आणि L1 मध्ये असलेल्या समान मालकीचे पत्ते ठेवणे शिफारसले जाते. -जर तुम्ही मल्टीसिग (उदा. सेफ) सारखे स्मार्ट कॉन्ट्रॅक्ट वॉलेट वापरत असाल, तर वेगळा L2 वॉलेट पत्ता निवडणे अनिवार्य आहे, कारण हे खाते फक्त मेननेटवर अस्तित्वात असण्याची शक्यता आहे आणि तुम्ही व्यवहार करू शकणार नाही. हे वॉलेट वापरून आर्बिट्रमवर. तुम्हाला स्मार्ट कॉन्ट्रॅक्ट वॉलेट किंवा मल्टीसिग वापरणे सुरू ठेवायचे असल्यास, Arbitrum वर एक नवीन वॉलेट तयार करा आणि त्याचा पत्ता तुमच्या सबग्राफचा L2 मालक म्हणून वापरा. +आपल्याला स्मार्ट कॉन्ट्रॅक्ट वॉलेट वापरता येत आहे, उदाहरणार्थ, मल्टीसिग (उदा. एक सुरक्षित वॉलेट जसे), तर वेगवेगळे L2 वॉलेट पत्ता निवडणे आवश्यक आहे, किंवा हे संभावनेने तुमच्या हे खाते केवळ मुख्यनेटवर आहे आणि तुम्ही या वॉलेटसह Arbitrumवर व्यवहार सापडण्यात आला नाही. जर आपल्याला स्मार्ट कॉन्ट्रॅक्ट वॉलेट किंवा मल्टीसिग वापरायचे असले तर, Arbitrum वर एक नवीन वॉलेट तयार करा आणि त्याचा पत्ता तुमच्या सबग्राफच्या L2 मालकीसाठी वापरा. -**आपण नियंत्रित करत असलेला वॉलेट पत्ता वापरणे खूप महत्वाचे आहे आणि तो Arbitrum वर व्यवहार करू शकतो. अन्यथा, सबग्राफ गमावला जाईल आणि पुनर्प्राप्त केला जाऊ शकत नाही.** +**तुम्हाला एक वॉलेट पत्ता वापरण्याची महत्त्वाची आहे ज्याच्या तुम्ही नियंत्रण असता आणि त्याने Arbitrum वर व्यवहार करू शकतो. अन्यथा, सबग्राफ गमावला जाईल आणि त्याची पुनर्प्राप्ती केली जाऊ शकणार नाही.** ## हस्तांतरणाची तयारी: काही ETH ब्रिजिंग -सबग्राफ हस्तांतरित करण्यामध्ये ब्रिजद्वारे व्यवहार पाठवणे आणि नंतर आर्बिट्रमवर दुसरा व्यवहार करणे समाविष्ट आहे. पहिला व्यवहार मेननेटवर ETH वापरतो आणि L2 वर संदेश प्राप्त झाल्यावर गॅसचे पैसे देण्यासाठी काही ETH समाविष्ट करतो. तथापि, जर हा गॅस अपुरा असेल तर, तुम्हाला पुन्हा व्यवहार करण्याचा प्रयत्न करावा लागेल आणि गॅससाठी थेट L2 वर पैसे द्यावे लागतील (हे खाली "चरण 3: हस्तांतरणाची पुष्टी करणे" आहे). ही पायरी **हस्तांतरण सुरू केल्यापासून ७ दिवसांच्या आत अंमलात आणणे आवश्यक आहे**. शिवाय, दुसरा व्यवहार ("चरण 4: L2 वर हस्तांतरण पूर्ण करणे") थेट आर्बिट्रमवर केले जाईल. या कारणांसाठी, तुम्हाला आर्बिट्रम वॉलेटवर काही ETH ची आवश्यकता असेल. तुम्ही मल्टीसिग किंवा स्मार्ट कॉन्ट्रॅक्ट खाते वापरत असल्यास, ETH हे नियमित (EOA) वॉलेटमध्ये असणे आवश्यक आहे जे तुम्ही व्यवहार करण्यासाठी वापरत आहात, मल्टीसिग वॉलेटमध्येच नाही. +सबग्राफला हस्तांतरित करण्यासाठी एक ट्रॅन्झॅक्शन सेंड करण्यात आल्यामुळे ब्रिजद्वारे एक ट्रॅन्झॅक्शन आणि नंतर आर्बिट्रमवर दुसर्या ट्रॅन्झॅक्शन चालवावा लागतो. पहिल्या ट्रॅन्झॅक्शनमध्ये मुख्यनेटवर ETH वापरले जाते, आणि L2 वर संदेश प्राप्त होण्यात आल्यावर गॅस देण्यासाठी काही ETH समाविष्ट केले जाते. हेच गॅस कमी असल्यास, तर तुम्ही ट्रॅन्झॅक्शन पुन्हा प्रयत्न करून लॅटन्सीसाठी त्याच्यावर थेट पैसे द्यायला हवे, त्याच्यामुळे हे "चरण 3: हस्तांतरणाची पुष्टी करणे" असते (खालीलपैकी). ह्या कदाचित्का **तुम्ही हस्तांतरण सुरू केल्याच्या 7 दिवसांच्या आत** हे प्रक्रिया पुर्ण करणे आवश्यक आहे. इतरत्र, दुसऱ्या ट्रॅन्झॅक्शन ("चरण 4: L2 वर हस्तांतरण समाप्त करणे") ही आपल्याला खासगी आर्बिट्रमवर आणण्यात आली आहे. ह्या कारणांसाठी, तुम्हाला किमानपर्यंत काही ETH आवश्यक आहे, एक मल्टीसिग किंवा स्मार्ट कॉन्ट्रॅक्ट खात्याच्या आवश्यक आहे, ETH रोजच्या (EOA) वॉलेटमध्ये असणे आवश्यक आहे, ज्याचा तुम्ही ट्रॅन्झॅक्शन चालवण्यासाठी वापरता, मल्टीसिग वॉलेट स्वत: नसतो. -तुम्ही काही एक्सचेंजेसवर ETH खरेदी करू शकता आणि ते थेट Arbitrum वर काढू शकता किंवा तुम्ही मेननेट वॉलेटवरून L2 वर ETH पाठवण्यासाठी Arbitrum ब्रिज वापरू शकता: [bridge.arbitrum.io](http://bridge.arbitrum.io). आर्बिट्रमवरील गॅस फी कमी असल्याने, तुम्हाला फक्त थोड्या प्रमाणात आवश्यक आहे. तुमचा व्यवहार मंजूर होण्यासाठी तुम्ही कमी थ्रेशोल्डपासून (0. 01 ETH) सुरुवात करण्याची शिफारस केली जाते. +तुम्ही किमानतरी एक्सचेंजेसवर ETH खरेदी करू शकता आणि त्याच्यामध्ये सीधे Arbitrum वर विद्यमान ठेवू शकता, किंवा तुम्ही Arbitrum ब्रिजवापरून ETH मुख्यनेटवरील एक वॉलेटपासून L2 वर पाठवू शकता: bridge.arbitrum.io. आर्बिट्रमवर गॅस फीस खूप कमी आहेत, म्हणजे तुम्हाला फक्त थोडेसे फक्त आवश्यक आहे. तुमच्या ट्रॅन्झॅक्शनसाठी मंजूरी मिळविण्यासाठी तुम्हाला किमान अंतरावर (उदा. 0.01 ETH) सुरुवात करणे शिफारसले जाते. ## सबग्राफ ट्रान्सफर टूल शोधत आहे @@ -52,7 +52,7 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ![transfer tool](/img/L2-transfer-tool1.png) -तुम्ही सबग्राफ असलेल्या वॉलेटशी आणि एक्सप्लोररवरील सबग्राफच्या पृष्ठावर कनेक्ट केलेले असल्यास ते एक्सप्लोररवर देखील उपलब्ध आहे: +हे तयार आहे Explorer वर, आपल्याला जर तुमच्याकडून एक सबग्राफच्या मालकीची वॉलेट असेल आणि Explorer सह कनेक्ट केले तर, आणि त्या सबग्राफच्या पृष्ठावर Explorer वरून मिळवू शकता: ![Transferring to L2](/img/transferToL2.png) @@ -60,19 +60,19 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ## पायरी 1: हस्तांतरण सुरू करत आहे -हस्तांतरण सुरू करण्यापूर्वी, L2 वरील सबग्राफ कोणत्या पत्त्यावर असेल हे तुम्ही ठरवले पाहिजे (वरील "तुमचे L2 वॉलेट निवडणे" पहा), आणि आर्बिट्रमवर आधीच ब्रिज केलेल्या गॅससाठी काही ETH असण्याची जोरदार शिफारस केली जाते (पहा "हस्तांतरणाची तयारी: ब्रिजिंग काही ETH" वर). +हस्तांतरण सुरू करण्यापूर्वी, तुम्ही L2 वर सबग्राफच्या मालकपत्रक्षयक्षमतेचे निर्णय करावे लागेल (वरील "तुमच्या L2 वॉलेटची निवड" पहा), आणि आपल्याला आर्बिट्रमवर पुर्न ठेवण्यासाठी आधीपासून काही ETH असणे अत्यंत शिफारसले जाते (वरील "हस्तांतरण साठी प्राप्ती करणे: काही ETH हस्तांतरित करणे" पहा). -तसेच कृपया लक्षात घ्या की सबग्राफ हस्तांतरित करण्यासाठी सबग्राफच्या मालकीच्या खात्यासह सबग्राफवर शून्य नॉन सिग्नल असणे आवश्यक आहे; जर तुम्ही सबग्राफवर संकेत दिलेला नसेल तर तुम्हाला थोडे क्युरेशन जोडावे लागेल (1 GRT सारखी थोडी रक्कम जोडणे पुरेसे आहे). +कृपया लक्षात घ्या की सबग्राफ हस्तांतरित करण्यासाठी सबग्राफवर आपल्याला त्याच्या मालकपत्रक्षयक्षमतेसह अगदीच सिग्नल असावे; जर तुम्हाला सबग्राफवर सिग्नल केलेलं नसलं तर तुम्हाला थोडीसी क्युरेशन वाढवावी (एक थोडीसी असांतर किंवा 1 GRT आढवंच काही आहे). -ट्रान्सफर टूल उघडल्यानंतर, तुम्ही "वॉलेट अॅड्रेस प्राप्त करत आहे" फील्डमध्ये L2 वॉलेट पत्ता इनपुट करण्यास सक्षम असाल - **तुम्ही येथे योग्य पत्ता प्रविष्ट केल्याची खात्री करा**. ट्रान्स्फर सबग्राफ वर क्लिक केल्याने तुम्हाला तुमच्या वॉलेटवर व्यवहार करण्यासाठी सूचित केले जाईल (लक्षात ठेवा की L2 गॅससाठी काही ETH मूल्य समाविष्ट केले आहे); हे हस्तांतरण सुरू करेल आणि तुमचा L1 सबग्राफ काढून टाकेल (पहा "सिग्नल, तुमचा L1 सबग्राफ आणि क्वेरी URL सह काय होते ते समजून घेणे" पडद्यामागे काय चालले आहे याबद्दल अधिक तपशीलांसाठी वरील). +हस्तांतरण साधन उघडण्यात आल्यावर, तुम्ही "प्राप्ति वॉलेट पत्ता" क्षेत्रात L2 वॉलेट पत्ता भरू शकता - **तुम्ही येथे योग्य पत्ता नोंदवला आहे हे खात्री करा**. सबग्राफ हस्तांतरित करण्याच्या वर्तमानीत तुम्ही आपल्या वॉलेटवर ट्रॅन्झॅक्शन सुरू करण्याच्या आवश्यकता आहे (लक्षात घ्या की L2 गॅससाठी काही ETH मूळ आहे); हे हस्तांतरणाच्या प्रक्रियेचे सुरूवात करेल आणि आपल्या L1 सबग्राफला कमी करेल (अद्यतनसाठी "सिग्न. -तुम्ही ही पायरी अंमलात आणल्यास, **तुम्ही 7 दिवसांपेक्षा कमी वेळात पायरी 3 पूर्ण करेपर्यंत पुढे जाण्याची खात्री करा, अन्यथा सबग्राफ आणि तुमचा सिग्नल GRT गमावला जाईल.** हे आर्बिट्रमवर L1-L2 मेसेजिंग कसे कार्य करते यामुळे आहे: संदेश ब्रिजवरून पाठविलेली "पुन्हा प्रयत्न करण्यायोग्य तिकिटे" आहेत जी 7 दिवसांच्या आत कार्यान्वित करणे आवश्यक आहे आणि आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास प्रारंभिक अंमलबजावणीसाठी पुन्हा प्रयत्न करणे आवश्यक आहे. +जर तुम्ही हे कदम पूर्ण करता आहात, नुकसान होऊ नये हे सुनिश्चित करा की 7 दिवसांपेक्षा कमी वेळेत पुन्हा आपल्या क्रियान्वयनाचा तपास करा, किंवा सबग्राफ आणि तुमच्या सिग्नल GRT नष्ट होईल. हे त्याच्या कारणे आहे की आर्बिट्रमवर L1-L2 संदेशाचा कसा काम करतो: ब्रिजद्वारे पाठवलेले संदेश "पुन्हा प्रयत्नीय पर्यायपत्रे" आहेत ज्याचा क्रियान्वयन 7 दिवसांच्या आत अंदाजपत्री केला पाहिजे, आणि सुरुवातीचा क्रियान्वयन, आर्बिट्रमवर गॅस दरात वाढ असल्यास, पुन्हा प्रयत्न करण्याची आवश्यकता असेल. ![Start the trnasfer to L2](/img/startTransferL2.png) ## पायरी 2: सबग्राफ L2 वर येण्याची वाट पाहत आहे -तुम्ही ट्रान्सफर सुरू केल्यानंतर, तुमचा L1 सबग्राफ L2 ला पाठवणारा मेसेज आर्बिट्रम ब्रिजद्वारे प्रसारित झाला पाहिजे. यास अंदाजे 20 मिनिटे लागतात (संभाव्य साखळी पुनर्रचनांपासून व्यवहार "सुरक्षित" होण्यासाठी पूल मेननेट ब्लॉकची वाट पाहतो). +तुम्ही हस्तांतरण सुरू केल्यानंतर, तुमच्या L1 सबग्राफला L2 वर हस्तांतरित करण्याचे संदेश Arbitrum ब्रिजद्वारे प्रसारित होणे आवश्यक आहे. हे किंवा. 20 मिनिटे लागतात (ब्रिज त्या व्यक्तिमत्वीकृत आहे की L1 मेननेट ब्लॉक जो लेनदार चेन reorgs साठी "सुरक्षित" आहे, त्यातील संदेश किंवा लेनदार चेन reorgs साठी "सुरक्षित" आहे, त्यातील संदेश होऊन जातो). ही प्रतीक्षा वेळ संपल्यानंतर, आर्बिट्रम L2 करारांवर हस्तांतरण स्वयं-अंमलबजावणी करण्याचा प्रयत्न करेल. @@ -80,7 +80,7 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ## पायरी 3: हस्तांतरणाची पुष्टी करणे -बहुतेक प्रकरणांमध्ये, ही पायरी स्वयं-अंमलबजावणी होईल कारण चरण 1 मध्ये समाविष्ट केलेला L2 गॅस आर्बिट्रम कॉन्ट्रॅक्ट्सवर सबग्राफ प्राप्त करणार्‍या व्यवहाराची अंमलबजावणी करण्यासाठी पुरेसा असावा. तथापि, काही प्रकरणांमध्ये, हे शक्य आहे की आर्बिट्रमवरील गॅसच्या किमती वाढल्याने ही स्वयं-अंमलबजावणी अयशस्वी होईल. या प्रकरणात, तुमचा सबग्राफ L2 ला पाठवणारे "तिकीट" प्रलंबित असेल आणि 7 दिवसांच्या आत पुन्हा प्रयत्न करणे आवश्यक आहे. +अधिकांश प्रकरणात, आपल्याला प्राथमिकपणे संघटित ल2 गॅस असेल, ज्यामुळे सबग्राफला आर्बिट्रम कॉन्ट्रॅक्टवर प्राप्त करण्याच्या ट्रॅन्झॅक्शनची स्वत: क्रियारत झाली पाहिजे. कितीतरी प्रकरणात, आर्बिट्रमवर गॅस दरात वाढ असल्यामुळे ह्या स्वत: क्रियान्वितीत अयशस्वीता आपल्याला काहीतरी किंवा काहीतरी संभावना आहे. ह्या प्रकारे, आपल्या सबग्राफला L2 वर पाठवण्याच्या "पर्यायपत्रास" क्रियारत बसण्यासाठी अपूर्ण ठरेल आणि 7 दिवसांच्या आत पुन्हा प्रयत्न करण्याची आवश्यकता आहे. असे असल्यास, तुम्हाला आर्बिट्रमवर काही ETH असलेले L2 वॉलेट वापरून कनेक्ट करावे लागेल, तुमचे वॉलेट नेटवर्क आर्बिट्रमवर स्विच करा आणि व्यवहाराचा पुन्हा प्रयत्न करण्यासाठी "हस्तांतरण पुष्टी करा" वर क्लिक करा. @@ -88,13 +88,13 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ## पायरी 4: L2 वर हस्तांतरण पूर्ण करणे -या टप्प्यावर, तुमचा सबग्राफ आणि GRT आर्बिट्रमवर प्राप्त झाला आहे, परंतु सबग्राफ अद्याप प्रकाशित झालेला नाही. तुम्ही रिसीव्हिंग वॉलेट म्हणून निवडलेले L2 वॉलेट वापरून तुम्हाला कनेक्ट करावे लागेल, तुमचे वॉलेट नेटवर्क आर्बिट्रमवर स्विच करा आणि "सबग्राफ प्रकाशित करा" वर क्लिक करा +आता, आपला सबग्राफ आणि GRT आर्बिट्रमवर प्राप्त झालेले आहेत, परंतु सबग्राफ अद्याप प्रकाशित झालेला नाही. आपल्याला प्राप्ति वॉलेटसाठी निवडलेल्या L2 वॉलेटशी कनेक्ट करण्याची आवश्यकता आहे, आपला वॉलेट नेटवर्क आर्बिट्रमवर स्विच करण्याची आणि "पब्लिश सबग्राफ" वर क्लिक करण्याची आवश्यकता आहे ![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) ![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -हे सबग्राफ प्रकाशित करेल जेणेकरुन आर्बिट्रमवर कार्यरत असलेले इंडेक्सर्स ते सर्व्ह करू शकतील. हे L1 वरून हस्तांतरित केलेल्या GRT वापरून क्युरेशन सिग्नल देखील जोडेल. +हे सबग्राफ प्रकाशित करेल आहे, त्यामुळे त्याचे सेवन करणारे इंडेक्सर्स आर्बिट्रमवर संचालित आहेत, आणि त्यामुळे ला ट्रान्सफर केलेल्या GRT वापरून संवाद सिग्नल क्युरेशन निर्माणित केले जाईल. ## पायरी 5: क्वेरी URL अपडेट करत आहे @@ -102,43 +102,43 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -लक्षात घ्या की आर्बिट्रमवरील सबग्राफ आयडी तुमच्या मेननेटवर असलेल्या सबग्राफ आयडीपेक्षा वेगळा असेल, परंतु तुम्हाला तो नेहमी एक्सप्लोरर किंवा स्टुडिओवर मिळू शकेल. वर नमूद केल्याप्रमाणे ("सिग्नल, तुमचा L1 सबग्राफ आणि क्वेरी URL सह काय होते ते समजून घेणे" पहा) जुनी L1 URL थोड्या काळासाठी समर्थित असेल, परंतु सबग्राफ समक्रमित होताच तुम्ही तुमच्या क्वेरी नवीन पत्त्यावर स्विच कराव्यात. L2 वर. +लक्षात घ्या की आर्बिट्रमवर सबग्राफचे ID मुख्यनेटवर आपल्याला आहे आणि त्याच्या परिपर्यंत आपल्याला आर्बिट्रमवर आहे, परंतु आपल्याला वेगवेगळा सबग्राफ ID असेल, परंतु तुम्ही सदैव तो Explorer किंवा Studio वर शोधू शकता. उपरोक्त (वरील "सिग्नलसह, आपल्या L1 सबग्राफसह आणि क्वेरी URLसह काय करता येईल" पहा) म्हणजे पुराणे L1 URL थोडेसे वेळाने समर्थित राहील, परंतु आपल्याला सबग्राफ L2 वर सिंक केल्यानंतर आपल्या क्वेरीजला त्वरित नवीन पत्ता देणे शिफारसले जाते. ## तुमचे क्युरेशन आर्बिट्रम (L2) वर कसे हस्तांतरित करावे ## L2 मध्ये सबग्राफ ट्रान्सफरवरील क्युरेशनचे काय होते हे समजून घेणे -सबग्राफचा मालक जेव्हा सबग्राफ आर्बिट्रममध्ये हस्तांतरित करतो, तेव्हा सबग्राफचे सर्व सिग्नल एकाच वेळी GRT मध्ये रूपांतरित केले जातात. हे "स्वयं-स्थलांतरित" सिग्नलला लागू होते, म्हणजेच सबग्राफ आवृत्ती किंवा उपयोजनासाठी विशिष्ट नसलेले परंतु सबग्राफच्या नवीनतम आवृत्तीचे अनुसरण करणारे सिग्नल. +सबग्राफच्या मालकाने सबग्राफला आर्बिट्रमवर हस्तांतरित केल्यास, सर्व सबग्राफच्या सिग्नलला एकाच वेळी GRT मध्ये रूपांतरित केला जातो. ही "ऑटो-माइग्रेटेड" सिग्नलसाठी लागू होते, अर्थात सबग्राफाच्या कोणत्याही संस्करण किंवा डिप्लॉयमेंटसाठी नसलेली सिग्नल किंवा नवीन संस्करणाच्या आधीच्या सबग्राफच्या आवृत्तीस पुरावीत केली जाते. -सिग्नल ते GRT मधील हे रूपांतरण सबग्राफ मालकाने L1 मधील सबग्राफ नापसंत केल्यास काय होईल सारखेच आहे. जेव्हा सबग्राफ नापसंत केला जातो किंवा हस्तांतरित केला जातो, तेव्हा सर्व क्युरेशन सिग्नल एकाच वेळी "बर्न" केले जातात (क्युरेशन बाँडिंग वक्र वापरून) आणि परिणामी GRT GNS स्मार्ट कॉन्ट्रॅक्टद्वारे धारण केला जातो (म्हणजे सबग्राफ अपग्रेड आणि स्वयं-स्थलांतरित सिग्नल हाताळणारा करार). त्यामुळे त्या सबग्राफवरील प्रत्येक क्युरेटरचा सबग्राफसाठी त्यांच्याकडे असलेल्या शेअर्सच्या प्रमाणात त्या GRT वर दावा आहे. +सिग्नलपासून GRTमध्ये असे रूपांतरण होण्याचे त्याचे आपल्याला उदाहरण दिले आहे ज्याच्यासाठी जर सबग्राफमालक सबग्राफला L1मध्ये पुरावा दिला तर. सबग्राफ विकल्प किंवा हस्तांतरित केला जाता तेव्हा सर्व सिग्नलला समयानुसार "दहन" केला जातो (क्युरेशन बोंडिंग कर्वच्या वापराने) आणि निकाललेल्या GRTने GNS स्मार्ट कॉन्ट्रॅक्टने (जो सबग्राफ अपग्रेड्स आणि ऑटो-माइग्रेटेड सिग्नलच्या व्यवस्थापनासाठी जबाबदार आहे) साठवलेले आहे. प्रत्येक क्युरेटरने त्या सबग्राफसाठी कितीशेअर्स आहेत त्या प्रमाणे त्याच्याकडे गणना असते, आणि त्यामुळे त्याच्या शेअर्सचा GRTचा दावा असतो. सबग्राफ मालकाशी संबंधित या GRT चा एक अंश सबग्राफसह L2 ला पाठविला जातो. -या टप्प्यावर, क्युरेट केलेले GRT कोणतेही अधिक क्वेरी शुल्क जमा करणार नाही, त्यामुळे क्युरेटर त्यांचे GRT मागे घेणे किंवा L2 वरील त्याच सबग्राफमध्ये हस्तांतरित करणे निवडू शकतात, जेथे ते नवीन क्युरेशन सिग्नल मिंट करण्यासाठी वापरले जाऊ शकते. हे करण्याची घाई नाही कारण जीआरटी अनिश्चित काळासाठी मदत होऊ शकते आणि प्रत्येकाला त्यांच्या समभागांच्या प्रमाणात रक्कम मिळते, मग ते केव्हाही करतात. +आत्ताच, संशोधित GRTमध्ये कोणतीही अधिक क्वेरी फीस घटना आहे नसून, क्युरेटर्सला आपली GRT वापरण्याची किंवा त्याची L2वर त्याच्या आपल्या वर्णनासाठी हस्तांतरित करण्याची पर्वानगी आहे, ज्याच्या माध्यमातून नवीन क्युरेशन सिग्नल तयार केला जाऊ शकतो. हे करण्यासाठी त्वरित किंवा अनिश्चित काळासाठी कोणतीही जरूरत नाही कारण GRT अनश्वास पाहिजे आणि प्रत्येकाला त्याच्या शेअर्सच्या प्रमाणानुसार एक निश्चित वस्तु मिळणार आहे, कोणत्या वेळीही. ## तुमचे L2 वॉलेट निवडत आहे -तुम्ही तुमचा क्युरेटेड GRT L2 वर हस्तांतरित करण्याचे ठरवल्यास, तुम्ही वेगळे वॉलेट निवडू शकता जे L2 वर क्युरेशन सिग्नलचे मालक असेल. +जर आपल्याला आपल्या संवादनिर्मित GRT ला L2 वर हस्तांतरित करायचं निवडलं तर, तुम्ही L2 वर संवादनिर्मित सिग्नलच्या मालकीच्या वॉलेटसाठी विविध वॉलेट निवडू शकता. -तुम्ही मेटामास्क (बाह्य मालकीचे खाते किंवा EOA, म्हणजे स्मार्ट करार नसलेले वॉलेट) सारखे "नियमित" वॉलेट वापरत असल्यास, हे पर्यायी आहे आणि L1 प्रमाणेच क्युरेटर पत्ता ठेवण्याची शिफारस केली जाते. +आपल्याला जर "सामान्य" वॉलेट वापरत आहे, जसे Metamask (एक बाह्यिकपणे मालकपत्रक्षमतेचे खाते किंवा EOA किंवा हे एक स्मार्ट कॉन्ट्रॅक्ट नसलेले वॉलेट), तर हे वैकल्पिक आहे आणि शिफारसलेले आहे की तुम्ही L1मध्ये असलेल्या त्या जुन्य क्युरेटर पत्त्याचा वापर करण्यात आनंद घ्यावा. -जर तुम्ही मल्टीसिग (उदा. सुरक्षित) सारखे स्मार्ट कॉन्ट्रॅक्ट वॉलेट वापरत असाल, तर वेगळा L2 वॉलेट पत्ता निवडणे अनिवार्य आहे, कारण हे खाते फक्त मेननेटवर अस्तित्वात असण्याची शक्यता आहे आणि तुम्ही व्यवहार करू शकणार नाही. हे वॉलेट वापरून आर्बिट्रमवर. तुम्हाला स्मार्ट कॉन्ट्रॅक्ट वॉलेट किंवा मल्टीसिग वापरणे सुरू ठेवायचे असल्यास, आर्बिट्रमवर नवीन वॉलेट तयार करा आणि त्याचा पत्ता L2 प्राप्त करणारा वॉलेट पत्ता म्हणून वापरा. +आपल्याला स्मार्ट कॉन्ट्रॅक्ट वॉलेट वापरत आहे, उदाहरणार्थ, मल्टिसिग (उदा. Safe), तर वेगवेगळा L2 वॉलेट पत्त्याचा वापर करणे अनिवार्य आहे, कारण अत्यंत संभावित आहे की हे खाते केवळ मुख्यनेटवर आहे आणि आपल्याला यात्रा करता येणार नाही किंवा आपल्याला या वॉलेटवर Arbitrum वर्तनी करण्याची क्षमता नसून. जर आपल्याला स्मार्ट कॉन्ट्रॅक्ट वॉलेट किंवा मल्टिसिगचे वापर करत आहे, तर आर्बिट्रमवर एक नवीन वॉलेट तयार करून त्याचा पत्ता लवकरच वापरा आणि त्याच्या L2 प्राप्ति वॉलेट पत्त्याचा वापर करा. -**तुम्ही नियंत्रित करता असा वॉलेट पत्ता वापरणे खूप महत्वाचे आहे आणि तो आर्बिट्रमवर व्यवहार करू शकतो, अन्यथा क्युरेशन गमावले जाईल आणि परत मिळवता येणार नाही.** +**तुम्हाला एक वॉलेट पत्ता वापरण्याची महत्त्वाची आहे ज्याच्या तुम्ही नियंत्रण असता आणि त्याने Arbitrum वर व्यवहार करू शकतो, अन्यथा संवादना गमावली जाईल आणि त्याची पुनर्प्राप्ती केली जाऊ शकणार नाही.** ## L2 वर क्युरेशन पाठवत आहे: पायरी 1 -हस्तांतरण सुरू करण्यापूर्वी, L2 वरील क्युरेशनचा मालक कोणता पत्ता असेल हे तुम्ही ठरवले पाहिजे (वरील "तुमचे L2 वॉलेट निवडणे" पहा), आणि तुम्हाला पुन्हा एकदा अंमलबजावणी करण्याचा प्रयत्न करण्याची आवश्यकता असल्यास आर्बिट्रमवर आधीच ब्रिज केलेल्या गॅससाठी काही ETH असण्याची शिफारस केली जाते. L2 वर संदेश. तुम्ही काही एक्सचेंजेसवर ETH खरेदी करू शकता आणि ते थेट Arbitrum वर काढू शकता किंवा तुम्ही मेननेट वॉलेटमधून L2 वर ETH पाठवण्यासाठी आर्बिट्रम ब्रिज वापरू शकता: [bridge.arbitrum.io](http://bridge.arbitrum.io) - आर्बिट्रमवरील गॅस फी खूप कमी असल्याने, तुम्हाला फक्त थोड्या प्रमाणात आवश्यक आहे, उदा. 0.01 ETH कदाचित पुरेसे असेल. +हस्तांतरण सुरू करण्यापूर्वी, तुम्ही त्याच्या L2 वर क्युरेशनचा मालक होणारा पत्ता निवडणे आवश्यक आहे (वरील "तुमच्या L2 वॉलेटची निवड" पाहा), आणि आर्बिट्रमवर संदेशाच्या क्रियान्वयनाचा पुन्हा प्रयत्न केल्यास लागणारे गॅससाठी काही ETH आधीच्या पुलाकीत सांडलेले असले पर्याय सुरुवातीच्या वेळी किंवा पुन्हा प्रयत्नीय पर्यायसाठी. आपल्याला काही एक्सचेंजवरून ETH खरेदी करून त्याची तुमच्या आर्बिट्रमवर स्थानांतरित करून सुरू आहे, किंवा आपल्याला मुख्यनेटवरून L2 वर ETH पाठवण्याच्या आर्बिट्रम ब्रिजचा वापर करून किंवा ETH खरेदी करून L2 वर पाठवण्याच्या कामाकरीत करण्याची शक्यता आहे: [bridge.arbitrum.io](http://bridge.arbitrum.io)- आर्बिट्रमवर गॅस दरात तोंड असल्यामुळे, तुम्हाला केवळ किंवा 0.01 ETH ची किंमत दरम्यानची आवश्यकता असेल. -तुम्ही क्युरेट केलेला सबग्राफ L2 वर हस्तांतरित झाला असल्यास, तुम्हाला एक्सप्लोररवर एक संदेश दिसेल ज्यामध्ये तुम्ही हस्तांतरित केलेल्या सबग्राफसाठी क्युरेट करत आहात. +आपल्याला संवादित केलेल्या सबग्राफ्टला L2 वर हस्तांतरित केले आहे तर, आपल्याला एक संदेश दिलेला जाईल ज्याच्या माध्यमातून Explorer वरून आपल्याला सांगण्यात येईल की आपण हस्तांतरित सबग्राफ्टच्या संवादनी आहात. -सबग्राफ पृष्ठ पाहताना, तुम्ही क्युरेशन मागे घेणे किंवा हस्तांतरित करणे निवडू शकता. "Transfer Signal to Arbitrum" वर क्लिक केल्याने ट्रान्सफर टूल उघडेल. +सबग्राफ्ट पेज पाहताना, आपण संवादनाची पुनर्प्राप्ती किंवा हस्तांतरित करण्याचा निवड करू शकता. "Transfer Signal to Arbitrum" वर क्लिक केल्यास, हस्तांतरण साधने उघडतील. ![Transfer signal](/img/transferSignalL2TransferTools.png) -ट्रान्सफर टूल उघडल्यानंतर, तुमच्या वॉलेटमध्ये काही ETH जोडण्यासाठी तुम्हाला सूचित केले जाईल. त्यानंतर तुम्ही L2 वॉलेटचा पत्ता "प्राप्त होत असलेला वॉलेट पत्ता" फील्डमध्ये टाकण्यास सक्षम असाल - **तुम्ही येथे योग्य पत्ता प्रविष्ट केल्याची खात्री करा**. ट्रान्सफर सिग्नलवर क्लिक केल्याने तुम्हाला तुमच्या वॉलेटवर व्यवहार करण्यासाठी सूचित केले जाईल (लक्षात ठेवा की L2 गॅससाठी काही ETH मूल्य समाविष्ट केले आहे); हे हस्तांतरण सुरू करेल. +हस्तांतरण साधन उघडण्यानंतर, जर आपल्याला कोणतेही ETH नसेल तर आपल्याला आपल्या वॉलेटमध्ये किंवा खात्यात किंवा वॉलेटमध्ये किंवा आपल्याला कोणत्याही ETH असल्याची प्रवृत्ति किंवा कोणतीही वस्तु जाहीर केली जात नाही. त्यानंतर, आपल्याला "प्राप्ती वॉलेट पत्ता" फील्डमध्ये L2 वॉलेट पत्ता दाखल करण्याची क्षमता आहे - **तुम्हाला येथे योग्य पत्ता दिलेला आहे ते सुनिश्चित करा.** "सिग्नल हस्तांतरण" वर क्लिक केल्यास, आपल्याला आपल्या वॉलेटवर व्यवस्थापन क्रिया केल्याची प्रवृत्ति केली जाईल (ध्यान द्या की किंवा त्यात किंवा L2 गॅससाठी किंवा तो जोडलेले काही ETH मूल्य असतील); हे हस्तांतरण सुरू करेल. -तुम्ही ही पायरी अंमलात आणल्यास, **7 दिवसांपेक्षा कमी वेळात पायरी 3 पूर्ण करेपर्यंत तुम्ही पुढे जात असल्याची खात्री करा, अन्यथा तुमचा सिग्नल GRT गमावला जाईल.** हे आर्बिट्रमवर L1-L2 मेसेजिंग कसे कार्य करते: याद्वारे पाठवले जाणारे संदेश यामुळे आहे ब्रिज ही "पुन्हा प्रयत्न करण्यायोग्य तिकिटे" आहेत जी 7 दिवसांच्या आत कार्यान्वित करणे आवश्यक आहे आणि आर्बिट्रमवर गॅसच्या किमतीत वाढ झाल्यास प्रारंभिक अंमलबजावणीसाठी पुन्हा प्रयत्न करणे आवश्यक आहे. +आपल्याला हे पायथ्याच्या आयातनाची प्रक्रिया केली तरी, **तुम्हाला सुनिश्चित करा की 7 दिवसांपेक्षा कमी काळात प्रक्रिया पूर्ण करण्याच्या प्रमाणे पुढे जाता यावा, किंवा आपली सिग्नल GRT गमावली जाईल.** ह्या आर्बिट्रमवर एल1-एल2 संदेशपासून कसे काम करते, हे कारण: ब्रिजद्वारे पाठवलेले संदेश "पुन्हा प्रयत्नने सुसंगत" तिकिट आहेत, त्याच्या आयातनापासून 7 दिवसांमध्ये काम करण्याची आवश्यकता आहे, आणि त्यामुळे आर्बिट्रमवर गॅसच्या किंमतीमध्ये उच्चता असल्यास प्रारंभिक प्रयत्नास पुन्हा प्रयत्नीय पर्याय आवश्यक असू शकतो. ## L2 वर क्युरेशन पाठवत आहे: पायरी 2 @@ -146,7 +146,7 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ![Send signal to L2](/img/sendingCurationToL2Step2First.png) -तुम्ही ट्रान्सफर सुरू केल्यानंतर, तुमचा L1 क्युरेशन L2 ला पाठवणारा मेसेज आर्बिट्रम ब्रिजद्वारे प्रसारित होणे आवश्यक आहे. यास अंदाजे 20 मिनिटे लागतात (संभाव्य साखळी पुनर्रचनांपासून व्यवहार "सुरक्षित" होण्यासाठी पूल मेननेट ब्लॉकची वाट पाहतो). +तुम्ही हस्तांतरण सुरू केल्यानंतर, तुमच्या L1 क्युरेशनला L2मध्ये पाठवणारा संदेश आर्बिट्रम ब्रिजमाध्ये प्रसारित होण्याची आवश्यकता आहे. ह्याच्या प्रक्रियेत संदेशीला अनुमती दिली जाते की तो मुख्यनेटवरील ट्रांझॅक्शनच्या "सुरक्षित" ब्लॉकमध्ये प्रसारित होईल (संभाव्य श्रॅणी पुनर्व्यवस्थापनांपासून "सुरक्षित" आहे). हे किंवा किंवा 20 मिनिटे लागतात (ब्रिजला प्रमाणे, व्यायामच्या पुनर्व्यवस्थापनांकिंवा संभाव्य जलसंचारांकिंवा संबंधित प्रक्रियांकिंवा आपल्याला काही दिल्याच्या अनुमती दिलेल्या आहेत). ही प्रतीक्षा वेळ संपल्यानंतर, आर्बिट्रम L2 करारांवर हस्तांतरण स्वयं-अंमलबजावणी करण्याचा प्रयत्न करेल. @@ -154,7 +154,7 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ## L2 वर क्युरेशन पाठवत आहे: पायरी 3 -बहुतेक प्रकरणांमध्ये, ही पायरी स्वयं-अंमलबजावणी होईल कारण चरण 1 मध्ये समाविष्ट केलेला L2 गॅस आर्बिट्रम कॉन्ट्रॅक्ट्सवर क्युरेशन प्राप्त करणार्‍या व्यवहाराची अंमलबजावणी करण्यासाठी पुरेसा असावा. तथापि, काही प्रकरणांमध्ये, हे शक्य आहे की आर्बिट्रमवरील गॅसच्या किमती वाढल्याने ही स्वयं-अंमलबजावणी अयशस्वी होईल. या प्रकरणात, तुमचे क्युरेशन L2 ला पाठवणारे "तिकीट" प्रलंबित असेल आणि 7 दिवसांच्या आत पुन्हा प्रयत्न करणे आवश्यक आहे. +अधिकांशपेक्षा आपल्याला ह्या पायथ्याच्या आयातनाच्या प्रक्रियेचा या पायथ्यात स्वतः क्रियान्वित होईल, किंवा प्रवेशणीला तसेच समाविष्ट केलेल्या L2 गॅससाठी पर्याप्त असावा आहे, आर्बिट्रमच्या करारांवर क्रियान्वित केलेल्या ट्रांझॅक्शनला. किंवा किंवा, किंवा, किंवा, आर्बिट्रमवर गॅसच्या किंमतीमध्ये वाढीसाठी किंवा दिल्याने ह्या स्वतः क्रियान्वयाच्या प्रक्रियेस संपल्याच्या पायथ्यात याची अपवादज असू शकते. हे प्रकरण असल्यास, आपल्याला L2 वर कुरेशन पाठवता येईल तो प्रलंबित असून, 7 दिवसांच्या आताच्या पुन्हा प्रयत्नाची आवश्यकता असेल. असे असल्यास, तुम्हाला आर्बिट्रमवर काही ETH असलेले L2 वॉलेट वापरून कनेक्ट करावे लागेल, तुमचे वॉलेट नेटवर्क आर्बिट्रमवर स्विच करा आणि व्यवहाराचा पुन्हा प्रयत्न करण्यासाठी "हस्तांतरण पुष्टी करा" वर क्लिक करा. @@ -162,4 +162,4 @@ L2 सबग्राफसाठी क्वेरी वेगळ्या UR ## L1 वर तुमचे क्युरेशन मागे घेत आहे -तुम्ही तुमचा GRT L2 वर न पाठवण्यास प्राधान्य दिल्यास, किंवा तुम्ही GRT मॅन्युअली ब्रिज करत असाल, तर तुम्ही L1 वर तुमचा क्युरेट केलेला GRT मागे घेऊ शकता. सबग्राफ पृष्ठावरील बॅनरवर, "सिग्नल मागे घ्या" निवडा आणि व्यवहाराची पुष्टी करा; जीआरटी तुमच्या क्युरेटरच्या पत्त्यावर पाठवली जाईल. +जर आपल्याला आपल्या GRT ला L2 वर पाठवायचं आवडत नसलं तर किंवा आपल्याला GRT ला मॅन्युअली ब्रिज करण्याची प्राथमिकता आहे, तर आपल्याला L1 वरील आपल्या क्युरेटेड GRT ला काढून घ्यायला दिले आहे. सबग्राफच्या पृष्ठाच्या बॅनरवरून "Withdraw Signal" निवडा आणि व्यवस्थापन प्रक्रियेची पुष्टी करा; GRT आपल्या क्युरेटर पत्त्याला पाठविला जाईल. diff --git a/website/pages/mr/billing.mdx b/website/pages/mr/billing.mdx index 9b73dcaf7adc..74f44263d43c 100644 --- a/website/pages/mr/billing.mdx +++ b/website/pages/mr/billing.mdx @@ -1,15 +1,15 @@ --- -title: बिलिंग +title: Billing --- -> पावत्या साप्ताहिक आधारावर तयार केल्या जातात. +> Invoices are generated on a weekly basis. -तुमच्या क्वेरी फी भरण्यासाठी दोन पर्याय आहेत: +There are two options for paying for your query fees: -- [बॅंक्सासह फियाट चलनासह पैसे देणे](#billing-with-banxa) -- [क्रिप्टो वॉलेटसह पैसे देणे](#billing-on-arbitrum) +- [Paying with fiat currency with Banxa](#billing-with-banxa) +- [Paying with crypto wallet](#billing-on-arbitrum) -## बॅंक्सासह बिलिंग +## Billing with Banxa बॅंक्सा तुम्हाला एक्सचेंजची गरज टाळण्यास आणि तुमच्या पसंतीच्या फियाट चलनाचा वापर करून तुमच्या क्वेरी शुल्कासाठी पैसे देण्यास सक्षम करते. फियाट चलन GRT मध्ये रूपांतरित केले जाईल, बिलिंग करारावरील तुमच्या खात्यातील शिल्लक जोडले जाईल आणि तुमच्या API कीशी संबंधित प्रश्नांसाठी पैसे देण्यासाठी वापरले जाईल. @@ -17,33 +17,37 @@ title: बिलिंग तुम्ही त्यांचे [दस्तऐवज](https://docs.banxa.com/docs) वाचून Banxa बद्दल अधिक जाणून घेऊ शकता. -### बॅंक्सासह क्वेरी फी भरणे +### Paying for query fees with Banxa -1. [सबग्राफ स्टुडिओ](https://thegraph.com/studio/billing/?show=Deposit) मध्ये “कार्डने पैसे द्या” पर्याय निवडा. -2. तुमच्या खात्यातील शिल्लकमध्ये जोडण्यासाठी जीआरटीची रक्कम प्रविष्ट करा. +1. Select “Pay with Card” option in [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). +2. Enter the amount of GRT to be added to your account balance. 3. 'Banxa' बटणावर क्लिक करा सह सुरू ठेवा. 4. पेमेंट पद्धतीसह बँक्सावर आवश्यक बँकिंग माहिती प्रविष्ट करा & पसंतीचे फियाट चलन. -5. व्यवहार पूर्ण करा. +5. Finish the transaction. व्यवहार पूर्ण होण्यासाठी 10 मिनिटे लागू शकतात. व्यवहाराची पुष्टी झाल्यानंतर, खरेदी केलेला GRT आपोआप आर्बिट्रमवरील तुमच्या खात्यातील शिल्लकमध्ये जोडला जाईल. -## आर्बिट्रमवर बिलिंग +## Billing on Arbitrum ग्राफ प्रोटोकॉल इथरियम मेननेटवर कार्यरत असताना, [बिलिंग करार](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) [आर्बिट्रम](https://arbitrum.io/ वर राहतो व्यवहाराची वेळ आणि खर्च कमी करण्यासाठी) नेटवर्क. तुम्हाला तुमच्या API की मधून व्युत्पन्न केलेली क्वेरी फी भरावी लागेल. बिलिंग करार वापरून, तुम्ही हे करू शकाल: -- तुमच्या खात्यातील शिल्लकमधून GRT जोडा आणि काढा. +- Add and withdraw GRT from your account balance. - तुम्ही तुमच्या खात्यातील शिल्लकमध्ये किती GRT जोडले आहे, तुम्ही किती काढले आहे आणि तुमच्या इनव्हॉइसवर आधारित तुमच्या शिल्लकांचा मागोवा ठेवा. - जोपर्यंत तुमच्या खात्यातील शिल्लक पुरेशी GRT आहे तोपर्यंत व्युत्पन्न केलेल्या क्वेरी शुल्कावर आधारित चलन स्वयंचलितपणे भरा. -### क्रिप्टो वॉलेट वापरून GRT जोडणे +### Adding GRT using a crypto wallet + + > तुमच्या क्रिप्टो वॉलेटमध्ये तुमच्याकडे आधीपासूनच GRT आहे आणि तुम्ही Ethereum mainnet वर आहात असे गृहीत धरून हा विभाग लिहिलेला आहे. तुमच्याकडे GRT नसल्यास, तुम्ही GRT कसे मिळवायचे ते [येथे](#getting-grt) शिकू शकता. -1. [सबग्राफ स्टुडिओ बिलिंग पेज](https://thegraph.com/studio/billing/) वर जा. +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. पृष्ठाच्या वरच्या उजव्या कोपर्यात "कनेक्ट वॉलेट" बटणावर क्लिक करा. तुम्हाला वॉलेट निवड पृष्ठावर पुनर्निर्देशित केले जाईल. तुमचे वॉलेट निवडा आणि "कनेक्ट" वर क्लिक करा. -3. पृष्ठाच्या मध्यभागी असलेल्या 'GRT जोडा' बटणावर क्लिक करा. एक साइड पॅनेल दिसेल. +3. Click the 'Add GRT' button at the center of the page. A side panel will appear. 4. तुम्हाला तुमच्या खात्यातील शिल्लकमध्ये जोडायची असलेली GRT रक्कम एंटर करा. "मॅक्स" बटणावर क्लिक करून तुम्ही तुमच्या खात्यातील शिल्लकमध्ये जोडू इच्छित GRT ची कमाल रक्कम देखील निवडू शकता. @@ -51,39 +55,41 @@ title: बिलिंग 6. तुमच्या खात्यातील शिल्लकमध्ये GRT जोडण्यासाठी 'खात्यातील शिल्लकमध्ये GRT जोडा' वर क्लिक करा. तुमच्या वॉलेटमध्ये संबंधित व्यवहारावर सही करा. यामुळे गॅसचा खर्च होईल. -7. एकदा व्यवहाराची पुष्टी झाल्यानंतर, तुम्हाला एका तासाच्या आत तुमच्या खात्यातील शिल्लकमध्ये GRT जोडलेला दिसेल. +7. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. -### क्रिप्टो वॉलेट वापरून GRT काढणे +### Withdrawing GRT using a crypto wallet > तुम्ही [सबग्राफ स्टुडिओ](https://thegraph.com/studio/billing/) वर तुमच्या खात्यातील शिल्लकमध्ये GRT जमा केला आहे आणि तुम्ही आर्बिट्रम नेटवर्कवर आहात असे गृहीत धरून हा विभाग लिहिलेला आहे. -1. [सबग्राफ स्टुडिओ बिलिंग पेज](https://thegraph.com/studio/billing/) वर जा. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. पृष्ठाच्या वरच्या उजव्या कोपर्यात "कनेक्ट वॉलेट" बटणावर क्लिक करा. तुमचे वॉलेट निवडा आणि "कनेक्ट" वर क्लिक करा. 3. पृष्ठाच्या मध्यभागी असलेल्या 'GRT जोडा' बटणाच्या पुढील ड्रॉपडाउनवर क्लिक करा. GRT काढा निवडा. एक साइड पॅनेल दिसेल. -4. तुम्ही काढू इच्छित असलेली GRT रक्कम एंटर करा. +4. Enter the amount of GRT you would like to withdraw. 5. तुमच्या खात्यातील शिल्लकमधून GRT काढण्यासाठी 'GRT काढा' वर क्लिक करा. तुमच्या वॉलेटमध्ये संबंधित व्यवहारावर सही करा. यामुळे गॅसचा खर्च होईल. GRT तुमच्या आर्बिट्रम वॉलेटवर पाठवला जाईल. 6. व्यवहाराची पुष्टी झाल्यानंतर, तुम्हाला तुमच्या आर्बिट्रम वॉलेटमध्ये तुमच्या खात्यातील शिल्लक रकमेतून GRT काढलेला दिसेल. -### मल्टीसिग वॉलेट वापरून GRT जोडत आहे +### Adding GRT using a multisig wallet -1. [सबग्राफ स्टुडिओ बिलिंग पेज](https://thegraph.com/studio/billing/) वर जा. + + +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. पृष्ठाच्या वरच्या उजव्या कोपर्यात "कनेक्ट वॉलेट" बटणावर क्लिक करा. तुमचे वॉलेट निवडा आणि "कनेक्ट" वर क्लिक करा. तुम्ही [Gnosis-Safe](https://gnosis-safe.io/) वापरत असल्यास, तुम्ही तुमचे मल्टीसिग तसेच तुमचे साइनिंग वॉलेट कनेक्ट करण्यात सक्षम व्हाल. त्यानंतर, संबंधित संदेशावर स्वाक्षरी करा. यामुळे गॅसची किंमत लागणार नाही. -3. पृष्ठाच्या मध्यभागी असलेल्या 'GRT जोडा' बटणावर क्लिक करा. एक साइड पॅनेल दिसेल. +3. Click the 'Add GRT' button at the center of the page. A side panel will appear. -4. एकदा व्यवहाराची पुष्टी झाल्यानंतर, तुम्हाला एका तासाच्या आत तुमच्या खात्यातील शिल्लकमध्ये GRT जोडलेला दिसेल. +4. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. -### मल्टीसिग वॉलेट वापरून GRT काढणे +### Withdrawing GRT using a multisig wallet > हा विभाग तुम्ही [सबग्राफ स्टुडिओ](https://thegraph.com/studio/billing/) वर तुमच्या खात्यातील शिल्लकमध्ये GRT जमा केला आहे आणि तुम्ही Ethereum mainnet वर आहात असे गृहीत धरून लिहिलेले आहे. -1. [सबग्राफ स्टुडिओ बिलिंग पेज](https://thegraph.com/studio/billing/) वर जा. +1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. पृष्ठाच्या वरच्या उजव्या कोपर्यात "कनेक्ट वॉलेट" बटणावर क्लिक करा. तुमचे वॉलेट निवडा आणि "कनेक्ट" वर क्लिक करा. @@ -93,66 +99,110 @@ title: बिलिंग 5. तुमच्या खात्यातील शिल्लकमधून GRT काढण्यासाठी 'GRT काढा' वर क्लिक करा. तुमच्या वॉलेटमध्ये संबंधित व्यवहारावर सही करा. यामुळे गॅसचा खर्च होईल. -6. एकदा व्यवहाराची पुष्टी झाल्यानंतर, तुम्हाला एका तासाच्या आत तुमच्या आर्बिट्रम वॉलेटमध्ये GRT जोडलेला दिसेल. +6. Once the transaction is confirmed, you'll see the GRT added to your Arbitrum wallet within an hour. -## GRT मिळवत आहे +## Getting GRT -हा विभाग तुम्हाला क्वेरी फी भरण्यासाठी GRT कसे मिळवायचे ते दाखवेल. +This section will show you how to get GRT to pay for query fees. -### कॉइनबेस +### Coinbase -Coinbase वर GRT खरेदी करण्यासाठी हे चरण-दर-चरण मार्गदर्शक असेल. +This will be a step by step guide for purchasing GRT on Coinbase. -1. [Coinbase](https://www.coinbase.com/) वर जा आणि खाते तयार करा. +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. 2. एकदा तुम्ही खाते तयार केल्यानंतर, तुम्हाला KYC (किंवा तुमच्या ग्राहकाला जाणून घ्या) या नावाने ओळखल्या जाणार्‍या प्रक्रियेद्वारे तुमची ओळख सत्यापित करणे आवश्यक आहे. सर्व केंद्रीकृत किंवा कस्टोडियल क्रिप्टो एक्सचेंजसाठी ही एक मानक प्रक्रिया आहे. -3. एकदा तुम्ही तुमची ओळख सत्यापित केल्यानंतर, तुम्ही GRT खरेदी करू शकता. पृष्ठाच्या वरच्या उजव्या बाजूला असलेल्या "खरेदी/विक्री" बटणावर क्लिक करून तुम्ही हे करू शकता. -4. तुम्ही खरेदी करू इच्छित चलन निवडा. GRT निवडा. -5. पेमेंट पद्धत निवडा. तुमची पसंतीची पेमेंट पद्धत निवडा. -6. तुम्हाला खरेदी करायची असलेली GRT रक्कम निवडा. -7. तुमच्या खरेदीचे पुनरावलोकन करा. तुमच्या खरेदीचे पुनरावलोकन करा आणि "GRT खरेदी करा" वर क्लिक करा. -8. तुमच्या खरेदीची पुष्टी करा. तुमच्या खरेदीची पुष्टी करा आणि तुम्ही यशस्वीरित्या GRT खरेदी कराल. -9. तुम्ही तुमच्या खात्यातून तुमच्या क्रिप्टो वॉलेटमध्ये जीआरटी ट्रान्सफर करू शकता जसे की [मेटामास्क](https://metamask.io/). +3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select GRT. +5. Select the payment method. Select your preferred payment method. +6. Select the amount of GRT you want to purchase. +7. Review your purchase. Review your purchase and click "Buy GRT". +8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. +9. You can transfer the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - तुमच्या क्रिप्टो वॉलेटमध्ये GRT हस्तांतरित करण्यासाठी, पृष्ठाच्या वरच्या उजव्या बाजूला असलेल्या "खाते" बटणावर क्लिक करा. - - GRT खात्याच्या पुढे असलेल्या "पाठवा" बटणावर क्लिक करा. - - तुम्हाला जीआरटी पाठवायची आहे आणि वॉलेटचा पत्ता एंटर करा. + - Click on the "Send" button next to the GRT account. + - Enter the amount of GRT you want to send and the wallet address you want to send it to. - "सुरू ठेवा" वर क्लिक करा आणि तुमच्या व्यवहाराची पुष्टी करा. -कृपया लक्षात घ्या की मोठ्या खरेदी रकमेसाठी, Coinbase ला तुम्हाला संपूर्ण रक्कम क्रिप्टो वॉलेटमध्ये हस्तांतरित करण्यापूर्वी 7-10 दिवस प्रतीक्षा करावी लागेल. -तुम्ही Coinbase वर GRT मिळवण्याबद्दल अधिक जाणून घेऊ शकता [येथे](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i- buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -Binance वर GRT खरेदी करण्यासाठी हे चरण-दर-चरण मार्गदर्शक असेल. +This will be a step by step guide for purchasing GRT on Binance. -1. [Binance](https://www.binance.com/en) वर जा आणि खाते तयार करा. +1. Go to [Binance](https://www.binance.com/en) and create an account. 2. एकदा तुम्ही खाते तयार केल्यानंतर, तुम्हाला KYC (किंवा तुमच्या ग्राहकाला जाणून घ्या) या नावाने ओळखल्या जाणार्‍या प्रक्रियेद्वारे तुमची ओळख सत्यापित करणे आवश्यक आहे. सर्व केंद्रीकृत किंवा कस्टोडियल क्रिप्टो एक्सचेंजसाठी ही एक मानक प्रक्रिया आहे. 3. एकदा तुम्ही तुमची ओळख सत्यापित केल्यानंतर, तुम्ही GRT खरेदी करू शकता. तुम्ही होमपेज बॅनरवरील "Buy Now" बटणावर क्लिक करून हे करू शकता. -4. तुम्हाला एका पृष्ठावर नेले जाईल जेथे तुम्ही खरेदी करू इच्छित चलन निवडू शकता. GRT निवडा. +4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. 5. तुमची पसंतीची पेमेंट पद्धत निवडा. तुम्ही युरो, यूएस डॉलर्स आणि बरेच काही यांसारख्या विविध फियाट चलनांसह देय देण्यास सक्षम असाल. -6. तुम्हाला खरेदी करायची असलेली GRT रक्कम निवडा. -7. तुमच्या खरेदीचे पुनरावलोकन करा आणि "GRT खरेदी करा" वर क्लिक करा. +6. Select the amount of GRT you want to purchase. +7. Review your purchase and click "Buy GRT". 8. तुमच्या खरेदीची पुष्टी करा आणि तुम्ही तुमच्या Binance Spot Wallet मध्ये तुमचा GRT पाहण्यास सक्षम असाल. 9. तुम्ही तुमच्या खात्यातून तुमच्या क्रिप्टो वॉलेटमध्ये GRT काढू शकता जसे की [MetaMask](https://metamask.io/). - तुमच्या क्रिप्टो वॉलेटमधील GRT [मागे घेण्यासाठी](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570), पैसे काढण्याच्या श्वेतसूचीमध्ये तुमचा क्रिप्टो वॉलेट पत्ता जोडा. - - "वॉलेट" बटणावर क्लिक करा, पैसे काढा क्लिक करा आणि GRT निवडा. + - Click on the "wallet" button, click withdraw, and select GRT. - तुम्हाला पाठवायची असलेली GRT ची रक्कम आणि तुम्हाला तो पाठवायचा असलेला व्हाइटलिस्ट केलेला वॉलेट पत्ता एंटर करा. - - "सुरू ठेवा" वर क्लिक करा आणि तुमच्या व्यवहाराची पुष्टी करा. + - Click "Continue" and confirm your transaction. -तुम्ही Binance वर GRT मिळवण्याबद्दल [येथे अधिक जाणून घेऊ शकता ](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -अशा प्रकारे तुम्ही Uniswap वर GRT खरेदी करू शकता. +This is how you can purchase GRT on Uniswap. -1. [Uniswap](https://app.uniswap.org/#/swap) वर जा आणि तुमचे वॉलेट कनेक्ट करा. -2. तुम्हाला ज्या टोकनवरून स्वॅप करायचे आहे ते निवडा. ETH निवडा. -3. तुम्हाला स्वॅप करायचे असलेले टोकन निवडा. GRT निवडा. +1. Go to [Uniswap](https://app.uniswap.org/#/swap) and connect your wallet. +2. Select the token you want to swap from. Select ETH. +3. Select the token you want to swap to. Select GRT. - तुम्ही योग्य टोकनसाठी स्वॅप करत असल्याची खात्री करा. GRT स्मार्ट करार पत्ता आहे: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` -4. तुम्ही स्वॅप करू इच्छित असलेल्या ETH ची रक्कम प्रविष्ट करा. -5. "स्वॅप" वर क्लिक करा. -6. तुमच्या वॉलेटमधील व्यवहाराची पुष्टी करा आणि तुम्ही व्यवहारावर प्रक्रिया होण्याची वाट पहा. +4. Enter the amount of ETH you want to swap. +5. Click "Swap". +6. Confirm the transaction in your wallet and you wait for the transaction to process. + +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. -तुम्ही Uniswap वर GRT मिळवण्याबद्दल [येथे](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) अधिक जाणून घेऊ शकता. +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). -## आर्बिट्रम ब्रिज +## Arbitrum Bridge -बिलिंग कॉन्ट्रॅक्ट फक्त GRT ला इथरियम मेननेट ते आर्बिट्रम नेटवर्कपर्यंत जोडण्यासाठी डिझाइन केले आहे. तुम्ही तुमचा GRT Arbitrum वरून Ethereum mainnet वर हस्तांतरित करू इच्छित असल्यास, तुम्हाला [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161) वापरावे लागेल. +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/mr/chain-integration-overview.mdx b/website/pages/mr/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/mr/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/mr/cookbook/arweave.mdx b/website/pages/mr/cookbook/arweave.mdx index 2611a753e483..0547a2aa5ce4 100644 --- a/website/pages/mr/cookbook/arweave.mdx +++ b/website/pages/mr/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Arweave वर सबग्राफ तयार करणे --- -> ग्राफ नोडमध्ये आणि होस्ट केलेल्या सेवेवर आर्वीव्ह सपोर्ट बीटामध्ये आहे: कृपया Arweave सबग्राफ तयार करण्याबाबत कोणतेही प्रश्न असल्यास आमच्याशी [Discord](https://discord.gg/graphprotocol) वर संपर्क साधा! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! या मार्गदर्शकामध्ये, तुम्ही Arweave ब्लॉकचेन इंडेक्स करण्यासाठी सबग्राफ कसे तयार करावे आणि कसे तैनात करावे ते शिकाल. @@ -83,7 +83,7 @@ dataSources: ``` - आर्वीव्ह सबग्राफ नवीन प्रकारचे डेटा स्रोत सादर करतात (`arweave`) -- नेटवर्क होस्टिंग ग्राफ नोडवरील नेटवर्कशी संबंधित असावे. होस्ट केलेल्या सेवेवर, Arweave चे mainnet `arweave-mainnet` आहे +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave डेटा स्रोत पर्यायी source.owner फील्ड सादर करतात, जी Arweave वॉलेटची सार्वजनिक की आहे Arweave डेटा स्रोत दोन प्रकारच्या हँडलरला समर्थन देतात: @@ -150,9 +150,9 @@ class Transaction { Arweave Subgraph चे मॅपिंग लिहिणे हे Ethereum Subgraph चे मॅपिंग लिहिण्यासारखेच आहे. अधिक माहितीसाठी, क्लिक करा [येथे](/developing/creating-a-subgraph/#writing-mappings). -## होस्ट केलेल्या सेवेवर आर्वीव्ह सबग्राफ तैनात करणे +## Deploying an Arweave Subgraph on the hosted service -एकदा तुमचा सबग्राफ Hosed सेवा डॅशबोर्डवर तयार झाला की, तुम्ही `graph deploy` CLI कमांड वापरून तैनात करू शकता. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash आलेख उपयोजित --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/mr/cookbook/grafting.mdx b/website/pages/mr/cookbook/grafting.mdx index a53c351d0803..971f572af23e 100644 --- a/website/pages/mr/cookbook/grafting.mdx +++ b/website/pages/mr/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: करार बदला आणि त्याचा इतिहास या ट्युटोरियलमध्ये, आपण मूलभूत वापराचे केस कव्हर करणार आहोत. आम्‍ही सध्‍याच्‍या कराराची जागा एकसमान कराराने (नवीन पत्‍त्‍यासह, परंतु समान कोडसह) बदलू. त्यानंतर, नवीन कराराचा मागोवा घेणाऱ्या "बेस" सबग्राफवर विद्यमान सबग्राफ कलम करा. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## विद्यमान सबग्राफ तयार करणे सबग्राफ तयार करणे हा आलेखाचा एक आवश्यक भाग आहे, ज्याचे [येथे](http://localhost:3000/en/cookbook/quick-start/) सखोल वर्णन केले आहे. या ट्यूटोरियलमध्ये वापरलेले विद्यमान सबग्राफ तयार आणि तैनात करण्यात सक्षम होण्यासाठी, खालील रेपो प्रदान केला आहे: diff --git a/website/pages/mr/cookbook/near.mdx b/website/pages/mr/cookbook/near.mdx index d14cc44cf75a..f8aa501820bb 100644 --- a/website/pages/mr/cookbook/near.mdx +++ b/website/pages/mr/cookbook/near.mdx @@ -44,8 +44,8 @@ title: NEAR वर सबग्राफ तयार करणे सबग्राफ विकासादरम्यान दोन प्रमुख आज्ञा आहेत: ```bash -$ ग्राफ codegen # मॅनिफेस्टमध्ये ओळखल्या गेलेल्या स्कीमा फाइलमधून प्रकार व्युत्पन्न करते -$ ग्राफ बिल्ड # असेंबलीस्क्रिप्ट फायलींमधून वेब असेंब्ली तयार करते आणि /बिल्ड फोल्डरमध्ये सर्व सबग्राफ फाइल्स तयार करते +$ graph codegen # मॅनिफेस्टमध्ये ओळखल्या गेलेल्या स्कीमा फाइलमधून प्रकार व्युत्पन्न करते +$ graph build # असेंबलीस्क्रिप्ट फायलींमधून वेब असेंब्ली तयार करते आणि /बिल्ड फोल्डरमध्ये सर्व सबग्राफ फाइल्स तयार करते ``` ### सबग्राफ मॅनिफेस्ट व्याख्या @@ -277,7 +277,7 @@ NEARサブグラフの「保留中」機能はまだサポートされていま ### माझा प्रश्न उत्तर दिला नाही, NEAR सबग्राफ तयार करण्यासाठी अधिक मदत कुठे मिळेल? -जर सबग्राफ डेव्हलपमेंटचा सामान्य प्रश्न असेल तर, बाकीच्या [डेव्हलपर दस्तऐवजीकरण](/cookbook/quick-start) मध्ये बरीच माहिती आहे. अन्यथा कृपया [द ग्राफ प्रोटोकॉल डिस्कॉर्ड](https://discord.gg/graphprotocol) मध्ये सामील व्हा आणि #जवळच्या चॅनेलवर विचारा किंवा near@thegraph.com वर ईमेल करा. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## संदर्भ diff --git a/website/pages/mr/cookbook/subgraph-debug-forking.mdx b/website/pages/mr/cookbook/subgraph-debug-forking.mdx index babcd8fe7ba0..ae08df06ed38 100644 --- a/website/pages/mr/cookbook/subgraph-debug-forking.mdx +++ b/website/pages/mr/cookbook/subgraph-debug-forking.mdx @@ -90,7 +90,7 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -1. काळजीपूर्वक तपासणी केल्यानंतर माझ्या लक्षात आले की माझ्या दोन हँडलरमध्ये `Gravatar` चे अनुक्रमणिका करताना वापरल्या जाणार्‍या `id` प्रस्तुतीकरणांमध्ये काही जुळत नाही. `HandleNewGravatar` हे हेक्स (`event.params.id.toHex()` मध्ये रूपांतरित करत असताना, `handleUpdatedGravatar` int32 (`इव्हेंट) वापरते. params.id.toI32()`) ज्यामुळे `HandleUpdatedGravatar` "Gravatar सापडला नाही!" ने घाबरून जातो. मी त्या दोघांना `id` हेक्समध्ये रूपांतरित करतो. +1. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. 2. मी बदल केल्यानंतर मी माझा सबग्राफ स्थानिक ग्राफ नोडवर तैनात करतो,**_अयशस्वी सबग्राफ फोर्किंग_** आणि सेटिंग `dataSources.source.startBlock``subgraph.yaml` मध्ये `6190343` ला: ```bash diff --git a/website/pages/mr/cookbook/substreams-powered-subgraphs.mdx b/website/pages/mr/cookbook/substreams-powered-subgraphs.mdx index c63cea35c2cf..6b84c84358c8 100644 --- a/website/pages/mr/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/mr/cookbook/substreams-powered-subgraphs.mdx @@ -1,30 +1,30 @@ --- -title: सबस्ट्रीम-चालित सबग्राफ +title: Substreams-powered subgraphs --- -[Substreams](/substreams) ब्लॉकचेन डेटावर प्रक्रिया करण्यासाठी एक नवीन फ्रेमवर्क आहे, जो ग्राफ नेटवर्कसाठी स्ट्रीमिंगफास्टने विकसित केला आहे. सबस्ट्रीम्स मॉड्युल्स घटक बदल आउटपुट करू शकतात, जे सबग्राफ घटकांशी सुसंगत आहेत. सबग्राफ अशा सबस्ट्रीम मॉड्यूलचा डेटा स्रोत म्हणून वापर करू शकतो, ज्यामुळे सबग्राफ डेव्हलपरसाठी सबस्ट्रीम्सचा इंडेक्सिंग गती आणि अतिरिक्त डेटा येतो. +[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. -## आवश्यकता +## Requirements -या कूकबुकसाठी [yarn](https://yarnpkg.com/), [स्थानिक सबस्ट्रीम डेव्हलपमेंटसाठी आवश्यक असलेले अवलंबित्व](https://substreams.streamingfast.io/developers-guide/installation-requirements) आणि नवीनतम आवृत्ती आवश्यक आहे आलेख CLI (>=0.52.0): +This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): ``` npm install -g @graphprotocol/graph-cli ``` -## कूकबुक मिळवा +## Get the cookbook -> हे कूकबुक हे [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). ``` graph init --from-example substreams-powered-subgraph ``` -## सबस्ट्रीम पॅकेज परिभाषित करणे +## Defining a Substreams package -सबस्ट्रीम पॅकेज प्रकारांचे बनलेले आहे ([Protocol Buffers](https://protobuf.dev/) म्हणून परिभाषित केलेले), मॉड्यूल (रस्टमध्ये लिहिलेले), आणि एक `substreams.yaml` फाइल जी प्रकारांचा संदर्भ देते आणि मॉड्यूल कसे ट्रिगर केले जातात ते निर्दिष्ट करते. [सबस्ट्रीम्स डेव्हलपमेंटबद्दल अधिक जाणून घेण्यासाठी सबस्ट्रीम दस्तऐवजीकरणाला भेट द्या](/substreams), आणि awesome-substreams](https://github.com/pinax-network/awesome-substreams) आणि [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) उदाहरणांसाठी अधिक तपासा. +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. -प्रश्नातील सबस्ट्रीम्स पॅकेज मेननेट इथरियमवर कॉन्ट्रॅक्ट डिप्लॉयमेंट शोधते, सर्व नवीन तैनात केलेल्या कॉन्ट्रॅक्ट्ससाठी क्रिएशन ब्लॉक आणि टाइमस्टॅम्पचा मागोवा घेते. हे करण्यासाठी, `/proto/example.proto` मध्ये एक समर्पित `करार` प्रकार आहे ([प्रोटोकॉल बफर परिभाषित करण्याबद्दल अधिक जाणून घ्या](https://protobuf.dev/programming-guides/proto3/#simple)): +The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -सबस्ट्रीम पॅकेजचे मुख्य तर्क हे `lib.rs` मधील `map_contract` मॉड्यूल आहे, जे प्रत्येक ब्लॉकवर प्रक्रिया करते, परत न आलेले कॉल तयार करण्यासाठी फिल्टर करते, `Contracts` परत करते: +The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result `substreams_entity_change` क्रेटमध्ये फक्त अस्तित्वातील बदल जनरेट करण्यासाठी समर्पित `टेबल्स` फंक्शन आहे ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). व्युत्पन्न केलेले घटक बदल संबंधित सबग्राफच्या `subgraph.graphql` मध्ये परिभाषित केलेल्या `schema.graphql` घटकांशी सुसंगत असले पाहिजेत. +> The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. ``` #[substreams::handlers::map] @@ -88,7 +88,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -हे सबस्ट्रीम पॅकेज सबग्राफद्वारे वापरण्यासाठी तयार करण्यासाठी, तुम्ही खालील आदेश चालवा: +To prepare this Substreams package for consumption by a subgraph, you must run the following commands: ```bash yarn substreams:protogen # generates types in /src/pb @@ -147,19 +147,19 @@ yarn substreams:package # packages the substreams in a .spkg file # alternatively, yarn substreams:prepare calls all of the above commands ``` -> जर तुम्हाला अंतर्निहित सबस्ट्रीम कमांड्स समजून घ्यायच्या असतील तर या स्क्रिप्ट्स `package.json` फाईलमध्ये परिभाषित केल्या आहेत +> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands -हे `substreams.yaml` मधील पॅकेज नाव आणि आवृत्तीवर आधारित `spkg` फाइल तयार करते. `spkg` फाईलमध्ये ग्राफ नोडला हे सबस्ट्रीम पॅकेज अंतर्भूत करण्यासाठी आवश्यक असलेली सर्व माहिती आहे. +This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. -> तुम्ही सबस्ट्रीम पॅकेज अपडेट केल्यास, तुम्ही केलेल्या बदलांवर अवलंबून, तुम्हाला वरीलपैकी काही किंवा सर्व कमांड्स चालवाव्या लागतील जेणेकरून `spkg` अद्ययावत असेल. +> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. -## सबस्ट्रीम-सक्षम सबग्राफ परिभाषित करणे +## Defining a Substreams-powered subgraph -सबस्ट्रीम-समर्थित सबग्राफ डेटा स्त्रोताचा एक नवीन \`प्रकार' सादर करतात, "सबस्ट्रीम". अशा सबग्राफमध्ये फक्त एक डेटा स्रोत असू शकतो. +Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. -या डेटा स्रोताने अनुक्रमित नेटवर्क, सबस्ट्रीम पॅकेज (`spkg`) सापेक्ष फाइल स्थान म्हणून निर्दिष्ट करणे आवश्यक आहे आणि सबस्ट्रीम पॅकेजमधील मॉड्यूल जे सबग्राफ-सुसंगत घटक बदल घडवून आणते (या प्रकरणात वरील सबस्ट्रीम पॅकेजमधून `map_entity_changes`)). मॅपिंग निर्दिष्ट केले आहे, परंतु फक्त मॅपिंग प्रकार ("substreams/graph-entities") आणि apiVersion ओळखते. +This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. -> सध्या सबग्राफ स्टुडिओ आणि ग्राफ नेटवर्क सबस्ट्रीम-समर्थित सबग्राफला समर्थन देतात जे `मेननेट` (Mainnet Ethereum) इंडेक्स करतात. +> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). ```yaml specVersion: 0.0.4 @@ -180,7 +180,7 @@ dataSources: apiVersion: 0.0.5 ``` -`subgraph.yaml` स्कीमा फाईलचाही संदर्भ देते. या फाईलच्या आवश्यकता अपरिवर्तित आहेत, परंतु निर्दिष्ट केलेल्या घटक `subgraph.yaml` मध्ये संदर्भित सबस्ट्रीम मॉड्यूलद्वारे निर्मित घटक बदलांशी सुसंगत असणे आवश्यक आहे. +The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. ```graphql type Contract @entity { @@ -194,9 +194,9 @@ type Contract @entity { } ``` -वरील दिलेले, सबग्राफ डेव्हलपर हे सबस्ट्रीम-सक्षम सबग्राफ उपयोजित करण्यासाठी ग्राफ CLI वापरू शकतात. +Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. -> सबस्ट्रीम-सक्षम सबग्राफ इंडेक्सिंग मेननेट इथरियम [Subgraph Studio](https://thegraph.com/studio/) मध्ये तैनात केले जाऊ शकतात. +> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). ```bash yarn install # install graph-cli @@ -204,11 +204,11 @@ yarn subgraph:build # build the subgraph yarn subgraph:deploy # deploy the subgraph ``` -बस एवढेच! तुम्ही सबस्ट्रीम-संचालित सबग्राफ तयार आणि तैनात केले आहे. +That's it! You have built and deployed a Substreams-powered subgraph. -## सबस्ट्रीम-समर्थित सबग्राफ सेवा देत आहे +## Serving Substreams-powered subgraphs -सबस्ट्रीम-संचालित सबग्राफ सर्व्ह करण्यासाठी, ग्राफ नोड संबंधित नेटवर्कसाठी सबस्ट्रीम प्रदात्यासह कॉन्फिगर केले जाणे आवश्यक आहे, तसेच चेन हेडचा मागोवा घेण्यासाठी फायरहोस किंवा RPC. हे प्रदाते `config.toml` फाईलद्वारे कॉन्फिगर केले जाऊ शकतात: +In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: ```toml [chains.mainnet] diff --git a/website/pages/mr/cookbook/upgrading-a-subgraph.mdx b/website/pages/mr/cookbook/upgrading-a-subgraph.mdx index 623fe448de16..0ba97bb23d22 100644 --- a/website/pages/mr/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/mr/cookbook/upgrading-a-subgraph.mdx @@ -1,5 +1,5 @@ --- -title: ग्राफ नेटवर्कवर विद्यमान सबग्राफ श्रेणीसुधारित करणे +title: Upgrading an Existing Subgraph to The Graph Network --- ## परिचय @@ -11,11 +11,11 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### पूर्वतयारी - तुम्ही होस्ट केलेल्या सेवेवर एक सबग्राफ आधीच तैनात केला आहे. -- सबग्राफ ग्राफ नेटवर्कवर उपलब्ध (किंवा बीटामध्ये उपलब्ध) साखळी अनुक्रमित करत आहे. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. -## ग्राफ नेटवर्कवर विद्यमान सबग्राफ श्रेणीसुधारित करणे +## Upgrading an Existing Subgraph to The Graph Network > You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). @@ -160,54 +160,54 @@ graph deploy --studio > Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). -अद्यतनासाठी GRT ला सबग्राफच्या जुन्या आवृत्तीवरून नवीन आवृत्तीवर स्थलांतरित करणे आवश्यक आहे. याचा अर्थ असा की प्रत्येक अपडेटसाठी, नवीन बाँडिंग वक्र तयार केले जाईल (बॉन्डिंग वक्रांवर अधिक [here](/network/curating#bonding-curve-101)). +An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. चला एक उदाहरण बनवूया, जर तुमचा सबग्राफ सक्रियपणे क्युरेट केला जात असेल तरच असे होईल: - सबग्राफच्या v1 वर 100,000 GRT स्वयं-स्थलांतर वापरून सिग्नल केला जातो -- V2 वर मालक अद्यतने. 100,000 GRT नवीन बाँडिंग वक्रमध्ये स्थलांतरित केले जाते, जेथे 97,500 GRT नवीन वक्रमध्ये टाकले जातात आणि 2,500 GRT बर्न केले जातात -- आपल्याला नंतरच्या अद्यतनासाठी मालकाने १२५० GRT बर्न करण्याची आवश्यकता असते, तिथे ५०% फीसाच्या पैकी ज्यासाठी भरण करावं लागतं. आपल्याला अद्यतन सफलतेने करायचं आहे तर तुमच्या वॉलेटमध्ये आपलं हे असलं पाहिजे, नाहीतर अद्यतन सफलतेने होईल. हे अद्यतन करण्याचं प्रक्रिया स्वतंत्र ट्रांझेक्शनमध्ये घडत. +- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned +- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. -_सध्या ही यंत्रणा नेटवर्कवर लाइव्ह असताना, समुदाय सध्या सबग्राफ डेव्हलपरसाठी अपडेटची किंमत कमी करण्याच्या मार्गांवर चर्चा करत आहे._ +_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ ### सबग्राफची स्थिर आवृत्ती राखणे -आपल्या सबग्राफला अनेक बदल करत असल्यास, ती सध्याच्या खर्चापेक्षा नियमितपणे अद्यतन करणं व अद्यतन खर्च देणं उत्तम विचार नाही. आपल्या सबग्राफचं स्थिर व तुटणार नाहीये असंतुलनीय आणि संप्रेषणशी संविधानिक आवृत्ती धारणे आवश्यक आहे, नित्यपणे खर्चाच्या परिप्रेक्ष्याच्या पार्श्वभूमित नाही पण इंडेक्सर्सला त्यांच्या समक्रमण कालांमध्ये विश्वास असावं हे सांगण्यात येतं. अद्यतन साठी तुम्ही योजना बनवित असल्यास, इंडेक्सर्सला संकेत द्यायला आवश्यक आहे, त्यामुळे इंडेक्सर्सच्या समक्रमण कालांना परिणाम होणार नाही. आपल्या सबग्राफसचं आवृत्तीचं नियंत्रण करण्यासाठी Discord[#Indexers channel](https://discord.gg/JexvtHa7dq) वापरा, इंडेक्सर्सला जाणवता येतेय की आपल्याला सबग्राफचं आवृत्तीकरण करण्यात आलंय. +If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. -सबग्राफ बाह्यिक डेव्हलपर्सचं वापर करणारे खुले एपीआयचं आहे. खुले एपीआयसाठी नियमितपणे पालन करणं आवश्यक आहे जेणेकरून ते बाह्यिक डेव्हलपर्सचं अनुप्रयोग विचलित नाहीत. The Graph नेटवर्कमध्ये सबग्राफ डेव्हलपरला नवीन सबग्राफ इंडेक्सर्सला विचार करायला गरजेचं आहे आणि त्याच्या सोबत इतर डेव्हलपर्सचं सोबत आपलं सबग्राफचं वापरणं पण विचारावं लागतं. +Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. ### सबग्राफचा मेटाडेटा अद्यतनित करणे तुम्ही नवीन आवृत्ती प्रकाशित न करता तुमच्या सबग्राफचा मेटाडेटा अपडेट करू शकता. मेटाडेटामध्ये सबग्राफ नाव, प्रतिमा, वर्णन, वेबसाइट URL, स्त्रोत कोड URL आणि श्रेणी समाविष्ट आहेत. विकासक हे सबग्राफ स्टुडिओमध्ये त्यांचे सबग्राफ तपशील अपडेट करून करू शकतात जिथे तुम्ही सर्व लागू फील्ड संपादित करू शकता. -सुनिश्चित करा की एक्सप्लोररमध्ये सबग्राफची तपशील अद्यतनित केली आहे हे तपासा आणि सेव्ह वर क्लिक करा. जर तपशील अद्यतनित करण्यासाठी हे चेक केलं असेल तर, एक ऑन-चेन ट्रांझेक्शन तयार केला जाईल, ज्याने नवीन विचारलेल्या तपशीलांचं एक्सप्लोररमध्ये अद्यतनित केलं जातं आणि नवीन डिप्लॉयमेंटसह नवीन आवृत्ती प्रकाशित करण्याची आवश्यकता नाही. +Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. ## ग्राफ नेटवर्कवर सबग्राफ तैनात करण्यासाठी सर्वोत्तम पद्धती 1. सबग्राफ डेव्हलपमेंटसाठी ईएनएस नावाचा लाभ घेणे: - Set up your ENS [here](https://app.ens.domains/) -- तुमचे ENS नाव तुमच्या सेटिंग्जमध्ये जोडा [here](https://thegraph.com/explorer/settings?view=display-name). +- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). 2. तुमचे प्रोफाईल जितके अधिक भरले जातील, तुमचे सबग्राफ अनुक्रमित आणि क्युरेट होण्याची शक्यता तितकी चांगली. ## द ग्राफ नेटवर्कवरील सबग्राफ अपवाद करणे -तुमचा सबग्राफ नापसंत करण्यासाठी [here](/managing/deprecating-a-subgraph) पायऱ्या फॉलो करा आणि तो ग्राफ नेटवर्कमधून काढून टाका. +Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. ## ग्राफ नेटवर्कवर सबग्राफ + बिलिंगची चौकशी करणे -विकासकांना त्यांचे सबग्राफ कोणत्याही निर्बंधांशिवाय उपयोजित करण्याची परवानगी देण्यासाठी होस्ट केलेली सेवा सेट केली गेली. +The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. -ग्राफ नेटवर्क खऱ्या अर्थाने विकेंद्रित होण्यासाठी, प्रोटोकॉलच्या प्रोत्साहनांचा मुख्य भाग म्हणून क्वेरी फी भरावी लागेल. API चे सदस्यत्व घेणे आणि क्वेरी फी भरण्याबद्दल अधिक माहितीसाठी, बिलिंग दस्तऐवज [here](/billing/) पहा. +In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). ### नेटवर्कवरील क्वेरी शुल्काचा अंदाज लावा उत्पादन UI मधील हे थेट वैशिष्ट्य नसले तरी, तुम्ही दर महिन्याला द्यायला तयार असलेली रक्कम घेऊन आणि तुमच्या अपेक्षित क्वेरी व्हॉल्यूमने विभाजित करून प्रति क्वेरी तुमचे कमाल बजेट सेट करू शकता. -तुमचं क्वेरी बजेट निर्धारित करण्याचं आपल्याचं अधिकार आहे, पण नकारारणी आहे की कोणत्याही इंडेक्सर तुमच्या दिलेल्या किंमतावर क्वेरीसाठी सेवा देता येईल. जर गेटवे तुमच्याच किंमतावर अथवा त्यापेक्षा कमी दरावर तुमचं क्वेरी इंडेक्सरसोबत सापडलं, तर तुम्ही तुमच्या बजेट आणि त्यांचं किंमताचं अंतर भरायला लागेल. या परिणामाने, कमी क्वेरी दराने तुमच्या उपलब्ध इंडेक्सर्सचं संख्या कमी होईल, ज्यामुळे तुम्हाला प्राप्य सेवा गुणवत्तेवर प्रभाव पडू शकतो. उच्च क्वेरी फीस असल्याचं फायदेशीर आहे, कारण ही तुमच्या सबग्राफला क्युरेशन आणि विख्यात इंडेक्सर्सकडे आकर्षित करू शकतं. +While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. लक्षात ठेवा की ही एक गतिमान आणि वाढणारी बाजारपेठ आहे, परंतु तुम्ही त्याच्याशी कसा संवाद साधता हे तुमच्या नियंत्रणात आहे. प्रोटोकॉल किंवा गेटवेमध्ये कमाल किंवा किमान किंमत निर्दिष्ट केलेली नाही. उदाहरणार्थ, तुम्ही खाली नेटवर्कवर (दर आठवड्याच्या आधारावर) काही डॅप्सने दिलेली किंमत पाहू शकता. शेवटचा स्तंभ पहा, जो GRT मध्ये क्वेरी शुल्क दर्शवितो. @@ -215,11 +215,11 @@ _सध्या ही यंत्रणा नेटवर्कवर ला ## अतिरिक्त संसाधने -तुम्ही अजूनही गोंधळलेले असाल तर घाबरू नका! खालील संसाधने पहा किंवा खालील विकेंद्रीकृत नेटवर्कवर सबग्राफ श्रेणीसुधारित करण्यासाठी आमचे व्हिडिओ मार्गदर्शक पहा: +If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: -- [ग्राफ नेटवर्क कॉन्ट्रॅक्ट्स](https://github.com/graphprotocol/contracts) -- [क्युरेशन कॉन्ट्रॅक्ट](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - अंतर्निहित करार ज्याला GNS गुंडाळते +- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) +- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [सबग्राफ स्टुडिओ दस्तऐवजीकरण](/उपयोजन/सबग्राफ-स्टुडिओ) +- [Subgraph Studio documentation](/deploying/subgraph-studio) diff --git a/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx index 4c349be873be..affbbf51215f 100644 --- a/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/mr/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: सबग्राफ स्टुडिओमध्ये सबग्राफ तैनात करणे --- -> तुमचा सबग्राफ ज्या नेटवर्कवरून डेटा अनुक्रमित करत आहे ते विकेंद्रित नेटवर्कवर [समर्थित](/developing/supported-chains) असल्याची खात्री करा. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ उपयोजित करण्यासाठी या चरण आहेत: diff --git a/website/pages/mr/deploying/hosted-service.mdx b/website/pages/mr/deploying/hosted-service.mdx index 67f7b47fda30..d46269587b69 100644 --- a/website/pages/mr/deploying/hosted-service.mdx +++ b/website/pages/mr/deploying/hosted-service.mdx @@ -4,9 +4,9 @@ title: होस्ट केलेली सेवा काय आहे? > Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). -हा विभाग तुम्हाला [होस्ट केलेल्या सेवे](https://thegraph.com/hosted-service/) वर सबग्राफ उपयोजित करून घेऊन जाईल. +This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). -होस्टेड सेव्हिसवर आपलं खातं नसल्यास, आपण आपल्या GitHub खात्याने साइन अप करू शकता. एकदा प्रमाणित केल्यानंतर, आपण युआयचा वापर करून युआईद्वारे सबग्रॅफ तयार करू शकता आणि आपल्या टर्मिनलमधून ते डिप्लॉय करू शकता. होस्टेड सेव्हिसला कयंडा, पॉलिगॉन, ग्नोसिस चेन, बीएनबी चेन, ऑप्टिमिझम, अर्बिट्रम, आणि इतर अनेक नेटवर्कसमवेत समर्थन केलं आहे. +If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. सर्वसमावेशक सूचीसाठी, [समर्थित नेटवर्क](/developing/supported-networks/#hosted-service) पहा. @@ -16,7 +16,7 @@ title: होस्ट केलेली सेवा काय आहे? ### विद्यमान करारातून -तुमच्‍या पसंतीच्या नेटवर्कवर तुमच्‍याकडे आधीपासून स्‍मार्ट कॉन्ट्रॅक्ट डिप्लॉय केले असल्‍यास, या कॉन्ट्रॅक्टमधून नवीन सबग्राफ बूटस्ट्रॅप करणे हा होस्ट केलेल्या सेवेला प्रारंभ करण्‍याचा एक चांगला मार्ग असू शकतो. +If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. तुम्ही ही कमांड सबग्राफ तयार करण्यासाठी वापरू शकता जे विद्यमान करारातील सर्व इव्हेंट्स अनुक्रमित करते. हे [Etherscan](https://etherscan.io/) वरून करार ABI आणण्याचा प्रयत्न करेल. @@ -46,6 +46,17 @@ graph init \ उदाहरण सबग्राफ हे Dani Grant च्या गुरुत्वाकर्षण करारावर आधारित आहे जे वापरकर्ता अवतार व्यवस्थापित करते आणि `NewGravatar` किंवा `UpdateGravatar` इव्हेंट जेव्हाही अवतार तयार किंवा अपडेट केले जातात. सबग्राफ ग्राफ नोड स्टोअरमध्ये `Gravatar` संस्था लिहून आणि हे इव्हेंटनुसार अपडेट केले जातील याची खात्री करून हे इव्हेंट हाताळते. तुमच्या स्मार्ट कॉन्ट्रॅक्टमधील कोणत्या इव्हेंटकडे लक्ष द्यावे, मॅपिंग आणि बरेच काही अधिक चांगल्या प्रकारे समजून घेण्यासाठी [सबग्राफ मॅनिफेस्ट](/developing/creating-a-subgraph#the-subgraph-manifest) वर सुरू ठेवा. -## होस्ट केलेल्या सेवेवर समर्थित नेटवर्क +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## Supported Networks on the hosted service तुम्ही समर्थित नेटवर्कची सूची [येथे](/developing/supported-networks) शोधू शकता. diff --git a/website/pages/mr/deploying/subgraph-studio.mdx b/website/pages/mr/deploying/subgraph-studio.mdx index 79ceb9fb0f82..a6f3467d1128 100644 --- a/website/pages/mr/deploying/subgraph-studio.mdx +++ b/website/pages/mr/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ title: सबग्राफ स्टुडिओ कसा वापराय 1. तुमच्या वॉलेटसह साइन इन करा - तुम्ही हे MetaMask किंवा WalletConnect द्वारे करू शकता 1. एकदा तुम्ही साइन इन केल्यानंतर, तुम्हाला तुमच्या खात्याच्या मुख्यपृष्ठावर तुमची अनन्य उपयोजन की दिसेल. हे तुम्हाला तुमचे सबग्राफ प्रकाशित करण्यास किंवा तुमच्या API की + बिलिंग व्यवस्थापित करण्यास अनुमती देईल. तुमच्याकडे एक अनन्य डिप्लॉय की असेल जी तुम्हाला वाटत असेल की ती तडजोड केली गेली असेल तर ती पुन्हा व्युत्पन्न केली जाऊ शकते. -## सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ कसा तयार करायचा +## How to Create a Subgraph in Subgraph Studio -सर्वोत्तम भाग! तुम्ही प्रथम सबग्राफ तयार करता तेव्हा, तुम्हाला ते भरण्यासाठी निर्देशित केले जाईल: - -- तुमचे सबग्राफ नाव -- प्रतिमा -- वर्णन -- श्रेणी (उदा. `DeFi`, `NFTs`, `Governance`) -- संकेतस्थळ + ## ग्राफ नेटवर्कसह सबग्राफ सुसंगतता diff --git a/website/pages/mr/developing/creating-a-subgraph.mdx b/website/pages/mr/developing/creating-a-subgraph.mdx index 691af692f47e..2b0aa0251ccc 100644 --- a/website/pages/mr/developing/creating-a-subgraph.mdx +++ b/website/pages/mr/developing/creating-a-subgraph.mdx @@ -2,23 +2,23 @@ title: सबग्राफ तयार करणे --- -सबग्राफ ब्लॉकचेनमधून डेटा काढतो, त्यावर प्रक्रिया करतो आणि तो संग्रहित करतो जेणेकरून GraphQL द्वारे सहजपणे क्वेरी करता येईल. +A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. ![सबग्राफ परिभाषित करणे](/img/defining-a-subgraph.png) -सबग्राफ व्याख्येमध्ये काही फाइल्स असतात: +The subgraph definition consists of a few files: -- `subgraph.yaml`: सबग्राफ मॅनिफेस्ट असलेली YAML फाइल +- `subgraph.yaml`: a YAML file containing the subgraph manifest - `schema.graphql`: एक GraphQL स्कीमा जो तुमच्या सबग्राफसाठी कोणता डेटा संग्रहित केला जातो आणि GraphQL द्वारे त्याची क्वेरी कशी करावी हे परिभाषित करते -- `AssemblyScript मॅपिंग`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) कोड जो इव्हेंट डेटामधून तुमच्या स्कीमामध्ये परिभाषित केलेल्या घटकांमध्ये अनुवादित करतो (उदा. `mapping.ts` या ट्युटोरियलमध्ये) +- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) -> ग्राफच्या विकेंद्रीकृत नेटवर्कवर तुमचा सबग्राफ वापरण्यासाठी, तुम्हाला [एपीआय की तयार करणे](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key) आवश्यक असेल. तुम्ही तुमच्या सबग्राफमध्ये किमान [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network) सह [सिग्नल जोडावे](/network/curating/#how-to-signal) अशी शिफारस केली जाते. +> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). तुम्ही मॅनिफेस्ट फाइलच्या सामग्रीबद्दल तपशीलात जाण्यापूर्वी, तुम्हाला [ग्राफ CLI](https://github.com/graphprotocol/graph-cli) स्थापित करणे आवश्यक आहे जे तुम्हाला तयार करण्यासाठी आवश्यक असेल. आणि सबग्राफ तैनात करा. -## ग्राफ CLI स्थापित करा +## Install the Graph CLI आलेख CLI JavaScript मध्ये लिहिलेले आहे, आणि ते वापरण्यासाठी तुम्हाला `यार्न` किंवा `npm` स्थापित करावे लागेल; असे गृहीत धरले जाते की तुमच्याकडे पुढील गोष्टींमध्ये सूत आहे. @@ -38,7 +38,7 @@ npm install -g @graphprotocol/graph-cli एकदा स्थापित केल्यानंतर, `graph init` कमांडचा वापर नवीन सबग्राफ प्रोजेक्ट सेट करण्यासाठी केला जाऊ शकतो, एकतर विद्यमान करारातून किंवा उदाहरण सबग्राफमधून. ही कमांड `graph init --product subgraph-studio` मध्ये पास करून सबग्राफ स्टुडिओवर सबग्राफ तयार करण्यासाठी वापरली जाऊ शकते. जर तुमच्याकडे आधीपासून तुमच्या पसंतीच्या नेटवर्कवर स्मार्ट कॉन्ट्रॅक्ट तैनात केले असेल, तर त्या कॉन्ट्रॅक्टमधून नवीन सबग्राफ बूटस्ट्रॅप करणे हा सुरू करण्याचा एक चांगला मार्ग असू शकतो. -## विद्यमान करारातून +## From An Existing Contract खालील कमांड एक सबग्राफ तयार करते जे विद्यमान कराराच्या सर्व घटनांना अनुक्रमित करते. ते इथरस्कॅन वरून ABI करार मिळवण्याचा प्रयत्न करते आणि स्थानिक फाइल मार्गाची विनंती करण्यासाठी परत येते. पर्यायी युक्तिवादांपैकी कोणतेही गहाळ असल्यास, ते तुम्हाला परस्परसंवादी फॉर्ममधून घेऊन जाते. @@ -51,9 +51,9 @@ graph init \ [] ``` -सबग्राफ स्टुडिओमध्ये `` हा तुमच्या सबग्राफचा आयडी आहे, तो तुमच्या सबग्राफ तपशील पेजवर आढळू शकतो. +The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. -## उदाहरणाच्या सबग्राफमधून +## From An Example Subgraph दुसरा मोड `graph init` सपोर्ट करतो तो उदाहरण सबग्राफमधून नवीन प्रोजेक्ट तयार करतो. खालील कमांड हे करते: @@ -63,9 +63,9 @@ graph init \ उदाहरण सबग्राफ हे Dani Grant च्या ग्रॅव्हिटी कॉन्ट्रॅक्टवर आधारित आहे जे वापरकर्ता अवतार व्यवस्थापित करते आणि `NewGravatar` किंवा `UpdateGravatar` इव्हेंट जेव्हाही अवतार तयार किंवा अपडेट केले जातात. सबग्राफ ग्राफ नोड स्टोअरमध्ये `Gravatar` संस्था लिहून आणि हे इव्हेंटनुसार अपडेट केले जातील याची खात्री करून हे इव्हेंट हाताळते. या उदाहरणासाठी सबग्राफ मॅनिफेस्ट बनवणार्‍या फाइल्सवर पुढील विभाग जातील. -## विद्यमान सबग्राफमध्ये नवीन डेटास्रोत जोडा +## Add New dataSources To An Existing Subgraph -`v0.31.0` पासून `graph-cli` `graph add` आदेशाद्वारे विद्यमान सबग्राफमध्ये नवीन डेटास्रोत जोडण्यास समर्थन देते. +Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. ```sh graph add
[] @@ -80,12 +80,12 @@ Options: `add` कमांड इथरस्कॅनमधून ABI आणेल (जोपर्यंत ABI पथ `--abi` पर्यायाने निर्दिष्ट केला जात नाही), आणि नवीन `डेटास्रोत` तयार करेल. > त्याच प्रकारे `graph init` कमांड `डेटास्रोत` `---करारातून` तयार करते, त्यानुसार स्कीमा आणि मॅपिंग अद्यतनित करते. -`--merge-entities` पर्याय विकसकाला `संस्था` आणि `इव्हेंट` नावातील विरोधाभास कसे हाताळायचे आहेत हे ओळखतो: +The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: -- `सत्य` असल्यास: नवीन `डेटास्रोत` ने विद्यमान `इव्हेंटहँडलर्स` वापरावे & `संस्था`. -- जर `असत्य`: एक नवीन अस्तित्व & इव्हेंट हँडलर `${dataSourceName}{EventName}` सह तयार केला पाहिजे. +- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. +- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. -कराराचा `पत्ता` संबंधित नेटवर्कसाठी `networks.json` वर लिहिला जाईल. +The contract `address` will be written to the `networks.json` for the relevant network. > **टीप:** परस्परसंवादी क्ली वापरताना, यशस्वीरित्या `ग्राफ इनिट` चालवल्यानंतर, तुम्हाला एक नवीन `डेटास्रोत` जोडण्यासाठी सूचित केले जाईल. @@ -93,7 +93,7 @@ Options: सबग्राफ मॅनिफेस्ट `subgraph.yaml` स्मार्ट कॉन्ट्रॅक्ट्स तुमच्या सबग्राफ इंडेक्सेस परिभाषित करतो, या कॉन्ट्रॅक्टमधील कोणत्या इव्हेंट्सकडे लक्ष द्यायचे आणि ग्राफ नोड स्टोअर करत असलेल्या आणि क्वेरी करण्याची परवानगी देणार्‍या घटकांसाठी इव्हेंट डेटा कसा मॅप करायचा. सबग्राफ मॅनिफेस्टसाठी संपूर्ण तपशील [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) आढळू शकतात. -सबग्राफच्या उदाहरणासाठी, `subgraph.yaml` आहे: +For the example subgraph, `subgraph.yaml` is: ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,9 +144,9 @@ dataSources: मॅनिफेस्टसाठी अद्यतनित करण्याच्या महत्त्वाच्या नोंदी आहेत: -- `वर्णन`: सबग्राफ काय आहे याचे मानवी वाचनीय वर्णन. जेव्हा सबग्राफ होस्ट केलेल्या सेवेवर तैनात केला जातो तेव्हा हे वर्णन ग्राफ एक्सप्लोररद्वारे प्रदर्शित केले जाते. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. -- `रेपॉजिटरी`: रिपॉझिटरीची URL जिथे सबग्राफ मॅनिफेस्ट आढळू शकते. हे ग्राफ एक्सप्लोररद्वारे देखील प्रदर्शित केले जाते. +- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. - `वैशिष्ट्ये`: सर्व वापरलेल्या [वैशिष्ट्य](#experimental-features) नावांची सूची. @@ -146,7 +154,11 @@ dataSources: - `dataSources.source.startBlock`: ब्लॉकची पर्यायी संख्या ज्यावरून डेटा स्रोत अनुक्रमणिका सुरू करतो. बहुतेक प्रकरणांमध्ये, आम्ही ज्या ब्लॉकमध्ये करार तयार केला होता तो वापरण्याचा सल्ला देतो. -- `dataSources.mapping.entities`: डेटा स्रोत स्टोअरमध्ये लिहित असलेल्या संस्था. प्रत्येक घटकासाठी स्कीमा schema.graphql फाइलमध्ये परिभाषित केले आहे. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + +- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: स्त्रोत करारासाठी एक किंवा अधिक नावाच्या ABI फाइल्स तसेच तुम्ही मॅपिंगमधून परस्परसंवाद करता अशा इतर स्मार्ट करारांसाठी. @@ -158,19 +170,19 @@ dataSources: एकच सबग्राफ एकाधिक स्मार्ट कॉन्ट्रॅक्ट्समधील डेटा अनुक्रमित करू शकतो. प्रत्येक करारासाठी एक एंट्री जोडा ज्यामधून डेटा `डेटास्रोत` अॅरेमध्ये अनुक्रमित करणे आवश्यक आहे. -ब्लॉकमधील डेटा स्रोतासाठी ट्रिगर खालील प्रक्रिया वापरून क्रमबद्ध केले जातात: +The triggers for a data source within a block are ordered using the following process: -1. इव्हेंट आणि कॉल ट्रिगर प्रथम ब्लॉकमधील व्यवहार निर्देशांकानुसार ऑर्डर केले जातात. +1. Event and call triggers are first ordered by transaction index within the block. 2. समान व्यवहारामधील इव्हेंट आणि कॉल ट्रिगर्स एक नियम वापरून ऑर्डर केले जातात: प्रथम इव्हेंट ट्रिगर नंतर कॉल ट्रिगर, प्रत्येक प्रकार मॅनिफेस्टमध्ये परिभाषित केलेल्या क्रमाचा आदर करतो. -3. ब्लॉक ट्रिगर इव्हेंट आणि कॉल ट्रिगर्सनंतर चालवले जातात, ते मॅनिफेस्टमध्ये परिभाषित केलेल्या क्रमाने. +3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. -हे ऑर्डरिंग नियम बदलू शकतात. +These ordering rules are subject to change. ### ABIs मिळवणे -ABI फाइल(ल्या) तुमच्या कराराशी जुळल्या पाहिजेत. ABI फाइल्स मिळविण्याचे काही मार्ग आहेत: +The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: -- तुम्ही तुमचा स्वतःचा प्रकल्प तयार करत असल्यास, तुम्हाला तुमच्या सर्वात वर्तमान ABI मध्ये प्रवेश मिळण्याची शक्यता आहे. +- If you are building your own project, you will likely have access to your most current ABIs. - तुम्ही सार्वजनिक प्रकल्पासाठी सबग्राफ तयार करत असल्यास, तुम्ही तो प्रकल्प तुमच्या संगणकावर डाउनलोड करू शकता आणि [`ट्रफल कंपाइल`](https://truffleframework.com/docs/truffle/overview) वापरून किंवा कंपाइल करण्यासाठी सोल्क वापरून ABI मिळवू शकता. - तुम्ही [Etherscan](https://etherscan.io/) वर ABI देखील शोधू शकता, परंतु हे नेहमीच विश्वसनीय नसते, कारण तेथे अपलोड केलेला ABI कालबाह्य असू शकतो. तुमच्याकडे योग्य ABI असल्याची खात्री करा, अन्यथा तुमचा सबग्राफ चालवणे अयशस्वी होईल. @@ -186,7 +198,7 @@ The Graph सह, तुम्ही फक्त `schema.graphql` मध्य ### उत्तम उदाहरण -खाली दिलेला `Gravatar` घटक एका Gravatar ऑब्जेक्टभोवती संरचित आहे आणि एखाद्या घटकाची व्याख्या कशी करता येईल याचे उत्तम उदाहरण आहे. +The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. ```graphql type Gravatar @entity(immutable: true) { @@ -234,20 +246,21 @@ type GravatarDeclined @entity { #### ग्राफक्यूएल समर्थित स्केलर -आम्ही आमच्या GraphQL API मध्ये खालील स्केलरला समर्थन देतो: +We support the following scalars in our GraphQL API: | प्रकार | वर्णन | | --- | --- | -| `बाइट्स` | बाइट अॅरे, हेक्साडेसिमल स्ट्रिंग म्हणून प्रस्तुत केले जाते. सामान्यतः इथरियम हॅश आणि पत्त्यांसाठी वापरले जाते. | -| `स्ट्रिंग` | `स्ट्रिंग` मूल्यांसाठी स्केलर. शून्य वर्ण समर्थित नाहीत आणि आपोआप काढले जातात. | +| `बाइट्स` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `स्ट्रिंग` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `बुलियन` | `बूलियन` मूल्यांसाठी स्केलर. | | `इंट` | GraphQL spec `Int` ला ३२ बाइट्सचा आकार ठरवतो. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | मोठे पूर्णांक. इथरियमच्या `uint32`, `int64`, `uint64`, ..., `uint256` प्रकारांसाठी वापरले जाते. टीप: `uint32` खाली सर्व काही, जसे की `int32`, `uint24` किंवा `int8` `i32` म्हणून प्रस्तुत केले जाते 0>. | | `बिग डेसिमल` | `BigDecimal` उच्च सुस्पष्टता दशांश एक महत्त्वपूर्ण आणि घातांक म्हणून प्रस्तुत केले जाते. घातांक श्रेणी −6143 ते +6144 पर्यंत आहे. 34 लक्षणीय अंकांपर्यंत पूर्णांक. | #### एनम्स -तुम्ही स्कीमामध्ये enums देखील तयार करू शकता. Enums मध्ये खालील वाक्यरचना आहे: +You can also create enums within a schema. Enums have the following syntax: ```graphql enum TokenStatus { @@ -265,11 +278,11 @@ enum TokenStatus { एखाद्या घटकाचा तुमच्या स्कीमामधील एक किंवा अधिक इतर घटकांशी संबंध असू शकतो. हे नातेसंबंध तुमच्या प्रश्नांमध्ये असू शकतात. आलेखामधील संबंध दिशाहीन आहेत. नात्याच्या "शेवट" वर एकदिशात्मक संबंध परिभाषित करून द्विदिशात्मक संबंधांचे अनुकरण करणे शक्य आहे. -नातेसंबंध हे इतर कोणत्याही क्षेत्राप्रमाणेच घटकांवर परिभाषित केले जातात शिवाय निर्दिष्ट केलेला प्रकार दुसर्‍या घटकाचा आहे. +Relationships are defined on entities just like any other field except that the type specified is that of another entity. #### वन-टू-वन संबंध -`TransactionReceipt` घटक प्रकारासह पर्यायी वन-टू-वन संबंधांसह `व्यवहार` अस्तित्व प्रकार परिभाषित करा: +Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: ```graphql type Transaction @entity(immutable: true) { @@ -285,7 +298,7 @@ type TransactionReceipt @entity(immutable: true) { #### एक-ते-अनेक संबंध -टोकन अस्तित्व प्रकारासह आवश्यक एक-ते-अनेक संबंधांसह `टोकन बॅलन्स` अस्तित्व प्रकार परिभाषित करा: +Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: ```graphql type Token @entity(immutable: true) { @@ -307,7 +320,7 @@ type TokenBalance @entity { #### उदाहरण -आम्ही `tokenBalances` फील्ड मिळवून टोकनमधून टोकनसाठी शिल्लक उपलब्ध करू शकतो: +We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: ```graphql type Token @entity(immutable: true) { @@ -344,7 +357,7 @@ type User @entity { } ``` -हे नातेसंबंध संचयित करण्याचा अधिक कार्यक्षम मार्ग म्हणजे मॅपिंग टेबलद्वारे ज्यामध्ये प्रत्येक `वापरकर्ता` / `संस्था` जोडीसाठी एक एंट्री आहे जसे की स्कीमा +A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like ```graphql type Organization @entity { @@ -366,7 +379,7 @@ type UserOrganization @entity { } ``` -या दृष्टिकोनासाठी क्वेरी पुनर्प्राप्त करण्यासाठी एका अतिरिक्त स्तरावर उतरणे आवश्यक आहे, उदाहरणार्थ, वापरकर्त्यांसाठी संस्था: +This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: ```graphql query usersWithOrganizations { @@ -401,7 +414,7 @@ type MyFirstEntity @entity { पूर्ण मजकूर क्वेरी व्याख्येमध्ये क्वेरीचे नाव, मजकूर फील्डवर प्रक्रिया करण्यासाठी वापरला जाणारा भाषा शब्दकोष, परिणाम ऑर्डर करण्यासाठी वापरले जाणारे रँकिंग अल्गोरिदम आणि शोधामध्ये समाविष्ट फील्ड समाविष्ट असतात. प्रत्येक फुलटेक्स्ट क्वेरी एकाधिक फील्डमध्ये असू शकते, परंतु सर्व समाविष्ट फील्ड एकाच घटक प्रकारातील असणे आवश्यक आहे. -पूर्ण मजकूर क्वेरी जोडण्यासाठी, GraphQL स्कीमामध्ये फुलटेक्स्ट निर्देशासह `_Schema__` प्रकार समाविष्ट करा. +To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. ```graphql type _Schema_ @@ -424,7 +437,7 @@ type Band @entity { } ``` -उदाहरण `bandSearch` फील्ड `नाव`, `वर्णनबँड` घटक फिल्टर करण्यासाठी क्वेरींमध्ये वापरले जाऊ शकते >, आणि `जैव` फील्ड. पूर्ण मजकूर शोध API आणि अधिक उदाहरण वापराच्या वर्णनासाठी [GraphQL API - क्वेरी](/querying/graphql-api#queries) वर जा. +The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. ```graphql query { @@ -479,7 +492,7 @@ query { `mapping.eventHandlers` अंतर्गत `subgraph.yaml` मध्ये परिभाषित केलेल्या प्रत्येक इव्हेंट हँडलरसाठी, त्याच नावाचे निर्यात केलेले कार्य तयार करा. प्रत्येक हँडलरने हाताळल्या जात असलेल्या इव्हेंटच्या नावाशी संबंधित प्रकारासह `इव्हेंट` नावाचा एकच पॅरामीटर स्वीकारला पाहिजे. -उदाहरणाच्या सबग्राफमध्ये, `src/mapping.ts` मध्ये `NewGravatar` आणि `UpdatedGravatar` इव्हेंटसाठी हँडलर आहेत: +In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -595,7 +608,7 @@ dataSources: handler: handleNewExchange ``` -### डायनॅमिकली तयार केलेल्या करारांसाठी डेटा स्रोत टेम्पलेट्स +### Data Source Templates for Dynamically Created Contracts त्यानंतर, तुम्ही मॅनिफेस्टमध्ये _डेटा स्रोत टेम्पलेट्स_ जोडता. हे नियमित डेटा स्रोतांसारखेच आहेत, त्याशिवाय त्यांना `स्रोत` अंतर्गत पूर्व-परिभाषित करार पत्ता नाही. सामान्यत:, तुम्ही पालक कराराद्वारे व्यवस्थापित किंवा संदर्भित केलेल्या प्रत्येक प्रकारच्या उप-करारासाठी एक टेम्पलेट परिभाषित कराल. @@ -663,7 +676,7 @@ export function handleNewExchange(event: NewExchange): void { } ``` -`एक्सचेंज` टेम्प्लेटच्या मॅपिंगमध्ये, संदर्भ नंतर प्रवेश केला जाऊ शकतो: +Inside a mapping of the `Exchange` template, the context can then be accessed: ```typescript import { dataSource } from '@graphprotocol/graph-ts' @@ -672,7 +685,7 @@ let context = dataSource.context() let tradingPair = context.getString('tradingPair') ``` -सर्व मूल्य प्रकारांसाठी `setString` आणि `getString` सारखे सेटर आणि गेटर्स आहेत. +There are setters and getters like `setString` and `getString` for all value types. ## ब्लॉक सुरू करा @@ -702,11 +715,11 @@ dataSources: handler: handleNewEvent ``` -> **टीप:** इथरस्कॅनवर कॉन्ट्रॅक्ट क्रिएशन ब्लॉक त्वरीत शोधला जाऊ शकतो: +> **Note:** The contract creation block can be quickly looked up on Etherscan: > -> 1. शोध बारमध्ये त्याचा पत्ता प्रविष्ट करून करार शोधा. -> 2. `Contract Creator` विभागातील निर्मिती व्यवहार हॅशवर क्लिक करा. -> 3. व्यवहार तपशील पृष्ठ लोड करा जिथे तुम्हाला त्या करारासाठी प्रारंभ ब्लॉक मिळेल. +> 1. Search for the contract by entering its address in the search bar. +> 2. Click on the creation transaction hash in the `Contract Creator` section. +> 3. Load the transaction details page where you'll find the start block for that contract. ## हँडलर्सना कॉल करा @@ -718,7 +731,7 @@ dataSources: ### कॉल हँडलरची व्याख्या -तुमच्‍या मॅनिफेस्‍टमध्‍ये कॉल हँडलर परिभाषित करण्‍यासाठी, तुम्ही ज्या डेटा स्रोताची सदस्यता घेऊ इच्छिता त्याखाली फक्त `कॉल हँडलर` अॅरे जोडा. +To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. ```yaml dataSources: @@ -770,14 +783,16 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { ### समर्थित फिल्टर +#### Call Filter + ```yaml filter: kind: call ``` -_परिभाषित हँडलरला प्रत्येक ब्लॉकसाठी एकदा कॉल केला जाईल ज्यात हँडलरने परिभाषित केलेल्या कॉन्ट्रॅक्टला (डेटा स्त्रोत) कॉल असेल._ +_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ -> **टीप:** `कॉल` फिल्टर सध्या पॅरिटी ट्रेसिंग API वर अवलंबून आहे. काही नेटवर्क, जसे की BNB चेन आणि आर्बिट्रम, या API चे समर्थन करत नाहीत. जर या नेटवर्कपैकी एक सबग्राफ अनुक्रमणिकामध्ये `कॉल` फिल्टरसह एक किंवा अधिक ब्लॉक हँडलर असतील, तर ते समक्रमण सुरू होणार नाही. +> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. ब्लॉक हँडलरसाठी फिल्टरची अनुपस्थिती हे सुनिश्चित करेल की हँडलरला प्रत्येक ब्लॉक म्हटले जाईल. डेटा स्त्रोतामध्ये प्रत्येक फिल्टर प्रकारासाठी फक्त एक ब्लॉक हँडलर असू शकतो. @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### मॅपिंग कार्य मॅपिंग फंक्शनला त्याचा एकमेव युक्तिवाद म्हणून `ethereum.Block` प्राप्त होईल. इव्हेंटसाठी मॅपिंग फंक्शन्सप्रमाणे, हे फंक्शन स्टोअरमधील विद्यमान सबग्राफ घटकांमध्ये प्रवेश करू शकते, स्मार्ट कॉन्ट्रॅक्ट कॉल करू शकते आणि संस्था तयार किंवा अद्यतनित करू शकते. @@ -833,11 +887,11 @@ eventHandlers: जेव्हा स्वाक्षरी आणि विषय 0 दोन्ही जुळतात तेव्हाच इव्हेंट ट्रिगर केला जाईल. डीफॉल्टनुसार, `topic0` इव्हेंट स्वाक्षरीच्या हॅशच्या समान आहे. -## इव्हेंट हँडलर्समधील व्यवहाराच्या पावत्या +## Transaction Receipts in Event Handlers `specVersion` `0.0.5` आणि `apiVersion` `0.0.7` पासून प्रारंभ करून, इव्हेंट हँडलर्सना पावतीवर प्रवेश असू शकतो व्यवहार ज्याने त्यांना उत्सर्जित केले. -असे करण्यासाठी, नवीन `पावती: सत्य` की सह सबग्राफ मॅनिफेस्टमध्ये इव्हेंट हँडलर घोषित करणे आवश्यक आहे, जे पर्यायी आहे आणि डीफॉल्ट असत्य आहे. +To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. ```yaml eventHandlers: @@ -859,7 +913,7 @@ eventHandlers: | [कलम करणे](#grafting-onto-existing-subgraphs) | `कलम करणे` | | [इथरियम करारांवर आयपीएफएस](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | -उदाहरणार्थ, जर सबग्राफ **पूर्ण-मजकूर शोध** आणि **नॉन-फेटल एरर** वैशिष्ट्ये वापरत असेल, तर मॅनिफेस्टमधील `वैशिष्ट्ये` फील्ड हे असावे: +For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: ```yaml specVersion: 0.0.4 @@ -890,7 +944,7 @@ dataSources: ... > **टीप:** ग्राफ नेटवर्क अद्याप घातक नसलेल्या त्रुटींना समर्थन देत नाही आणि विकासकांनी स्टुडिओद्वारे नेटवर्कवर ती कार्यक्षमता वापरून सबग्राफ उपयोजित करू नये. -गैर-घातक त्रुटी सक्षम करण्यासाठी सबग्राफ मॅनिफेस्टवर खालील वैशिष्ट्य ध्वज सेट करणे आवश्यक आहे: +Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: ```yaml specVersion: 0.0.4 @@ -934,9 +988,11 @@ _meta { ### विद्यमान सबग्राफवर कलम करणे +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + जेव्हा सबग्राफ प्रथम उपयोजित केला जातो, तेव्हा तो संबंधित साखळीच्या उत्पत्ती ब्लॉकवर (किंवा प्रत्येक डेटा स्त्रोतासह परिभाषित केलेल्या `startBlock` वर) काही परिस्थितींमध्ये इव्हेंट्सचे अनुक्रमणिका सुरू करतो; विद्यमान सबग्राफमधील डेटा पुन्हा वापरणे आणि नंतरच्या ब्लॉकमध्ये अनुक्रमणिका सुरू करणे फायदेशीर आहे. अनुक्रमणिकेच्या या मोडला _ग्राफ्टिंग_ म्हणतात. उदाहरणार्थ, मॅपिंगमध्ये भूतकाळातील साध्या चुका लवकर मिळवण्यासाठी किंवा विद्यमान सबग्राफ अयशस्वी झाल्यानंतर तात्पुरते काम करण्यासाठी विकासादरम्यान ग्राफ्टिंग उपयुक्त आहे. -सबग्राफ बेस सबग्राफवर ग्राफ्ट केला जातो जेव्हा `subgraph.yaml` मधील सबग्राफ मॅनिफेस्टमध्ये शीर्ष-स्तरावर `ग्राफ्ट` ब्लॉक असतो: +A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: ```yaml description: ... @@ -959,11 +1015,11 @@ graft: - हे इंटरफेस जोडते किंवा काढून टाकते - कोणत्या घटकासाठी इंटरफेस लागू केला जातो ते बदलते -> **[वैशिष्ट्य व्यवस्थापन](#experimental-features):** `ग्राफ्टिंग` सबग्राफ मॅनिफेस्टमध्ये `वैशिष्ट्ये` अंतर्गत घोषित करणे आवश्यक आहे. +> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. ## फाइल डेटा स्रोत -फाईल डेटा स्रोत ही एक नवीन सबग्राफ कार्यक्षमता आहे जी आयपीएफएसपासून सुरू होणार्‍या, मजबूत, वाढवता येण्याजोग्या पद्धतीने अनुक्रमणिकेदरम्यान ऑफ-चेन डेटामध्ये प्रवेश करण्यासाठी आहे. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > हे ऑफ-चेन डेटाच्या निर्धारवादी अनुक्रमणिकेसाठी तसेच अनियंत्रित HTTP-स्रोत डेटाच्या संभाव्य परिचयासाठी देखील पाया घालते. @@ -975,17 +1031,17 @@ graft: > हे विद्यमान `ipfs.cat` API बदलते -### अपग्रेड मार्गदर्शक +### Upgrade guide #### `graph-ts` आणि `graph-cli` अपडेट करा -फाइल डेटा स्रोतांना ग्राफ-ts >=0.29.0 आणि ग्राफ-cli>=0.33.1 आवश्यक आहे +File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 -#### नवीन अस्तित्व प्रकार जोडा जो फाइल सापडल्यावर अपडेट केला जाईल +#### Add a new entity type which will be updated when files are found -फाइल डेटा स्रोत साखळी-आधारित घटकांमध्ये प्रवेश करू शकत नाहीत किंवा अद्यतनित करू शकत नाहीत, परंतु फाइल विशिष्ट घटक अद्यतनित करणे आवश्यक आहे. +File data sources cannot access or update chain-based entities, but must update file specific entities. -याचा अर्थ असा असू शकतो की विद्यमान संस्थांमधून फील्ड एकत्र जोडलेल्या वेगळ्या संस्थांमध्ये विभाजित करणे. +This may mean splitting out fields from existing entities into separate entities, linked together. मूळ एकत्रित अस्तित्व: @@ -1028,11 +1084,11 @@ type TokenMetadata @entity { जर मूळ घटक आणि परिणामी फाइल डेटा स्रोत घटक यांच्यातील संबंध 1:1 असेल तर, आयपीएफएस सीआयडी लुकअप म्हणून वापरून मूळ घटकाला परिणामी फाइल घटकाशी जोडणे हा सर्वात सोपा नमुना आहे. तुम्हाला तुमच्या नवीन फाइल-आधारित घटकांचे मॉडेलिंग करण्यात अडचण येत असल्यास Discord वर संपर्क साधा! -> तुम्ही या नेस्टेड घटकांच्या आधारे मूळ घटक फिल्टर करण्यासाठी [नेस्टेड फिल्टर](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) वापरू शकता. +> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### `kind: file/ipfs` सह एक नवीन टेम्पलेट डेटा स्रोत जोडा +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` -हा डेटा स्त्रोत आहे जो जेव्हा स्वारस्य असलेली फाइल ओळखली जाईल तेव्हा तयार होईल. +This is the data source which will be spawned when a file of interest is identified. ```yaml templates: @@ -1050,15 +1106,15 @@ templates: file: ./abis/Token.json ``` -> सध्या `abis` आवश्यक आहेत, जरी फाइल डेटा स्रोतांमधून करार कॉल करणे शक्य नाही +> Currently `abis` are required, though it is not possible to call contracts from within file data sources फाइल डेटा स्रोताने विशेषत: सर्व घटक प्रकारांचा उल्लेख करणे आवश्यक आहे ज्यांच्याशी तो `संस्था` अंतर्गत संवाद साधेल. अधिक तपशीलांसाठी [मर्यादा](#Limitations) पहा. #### फाइल्सवर प्रक्रिया करण्यासाठी नवीन हँडलर तयार करा -या हँडलरने एक `बाइट्स` पॅरामीटर स्वीकारला पाहिजे, जो फाइल सापडल्यावर त्यातील सामग्री असेल, ज्यावर नंतर प्रक्रिया केली जाऊ शकते. ही बर्‍याचदा JSON फाइल असेल, ज्यावर `graph-ts` मदतनीस ([दस्तऐवजीकरण](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)) सह प्रक्रिया केली जाऊ शकते. +This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). -वाचनीय स्ट्रिंग म्हणून फाइलचा CID खालीलप्रमाणे `डेटास्रोत` द्वारे प्रवेश केला जाऊ शकतो: +The CID of the file as a readable string can be accessed via the `dataSource` as follows: ```typescript const cid = dataSource.stringParam() @@ -1093,12 +1149,14 @@ export function handleMetadata(content: Bytes): void { #### आवश्यकतेनुसार फाईल डेटा स्रोत तयार करा -साखळी-आधारित हँडलर्सच्या अंमलबजावणीदरम्यान तुम्ही आता फाइल डेटा स्रोत तयार करू शकता: +You can now create file data sources during execution of chain-based handlers: + +- Import the template from the auto-generated `templates` +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave -- स्वयं-निर्मित `टेम्पलेट` मधून टेम्पलेट आयात करा -- मॅपिंगमधून `TemplateName.create(cid: string)` वर कॉल करा, जेथे cid वैध IPFS सामग्री अभिज्ञापक आहे +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> सध्या ग्राफ नोड [v0 आणि v1 सामग्री अभिज्ञापकांना](https://docs.ipfs.tech/concepts/content-addressing/), आणि निर्देशिकांसह सामग्री अभिज्ञापकांना समर्थन देते (उदा. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). उदाहरण: @@ -1129,25 +1187,25 @@ export function handleTransfer(event: TransferEvent): void { } ``` -हे एक नवीन फाइल डेटा स्रोत तयार करेल, जे ग्राफ नोडच्या कॉन्फिगर केलेल्या IPFS एंडपॉईंटचे मतदान करेल, तो सापडला नाही तर पुन्हा प्रयत्न करेल. फाइल सापडल्यावर, फाइल डेटा स्रोत हँडलर कार्यान्वित केला जाईल. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. -हे उदाहरण पालक `टोकन` घटक आणि परिणामी `टोकनमेटाडेटा` घटक यांच्यातील लुकअप म्हणून CID वापरत आहे. +This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. -> पूर्वी, फाईल आणण्यासाठी सबग्राफ डेव्हलपरने `ipfs.cat(CID)` ला कॉल केला असता +> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file -अभिनंदन, तुम्ही फाइल डेटा स्रोत वापरत आहात! +Congratulations, you are using file data sources! #### तुमचे सबग्राफ उपयोजित करत आहे -तुम्ही आता कोणत्याही ग्राफ नोडवर तुमचा सबग्राफ `बिल्ड` आणि `डिप्लॉय` करू शकता >=v0.30.0-rc.0. +You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. -#### मर्यादा +#### Limitations फाइल डेटा स्रोत हँडलर आणि संस्था इतर सबग्राफ संस्थांपासून वेगळ्या केल्या जातात, ते कार्यान्वित केल्यावर ते निर्धारवादी आहेत याची खात्री करून आणि साखळी-आधारित डेटा स्रोतांचे दूषित होणार नाही याची खात्री करतात. विशिष्ट असणे: -- फाइल डेटा स्त्रोतांद्वारे तयार केलेल्या संस्था अपरिवर्तनीय आहेत आणि अद्यतनित केल्या जाऊ शकत नाहीत -- फाइल डेटा स्रोत हँडलर इतर फाइल डेटा स्रोतांमधून संस्थांमध्ये प्रवेश करू शकत नाहीत -- फाईल डेटा स्रोतांशी संबंधित संस्थांमध्ये साखळी-आधारित हँडलर्सद्वारे प्रवेश केला जाऊ शकत नाही +- Entities created by File Data Sources are immutable, and cannot be updated +- File Data Source handlers cannot access entities from other file data sources +- Entities associated with File Data Sources cannot be accessed by chain-based handlers > बहुतेक वापर-प्रकरणांसाठी ही मर्यादा समस्याप्रधान नसावी, परंतु काहींसाठी ते जटिलता आणू शकते. सबग्राफमध्‍ये तुमच्‍या फाईल-आधारित डेटाचे मॉडेल बनवण्‍यात तुम्‍हाला समस्या येत असल्‍यास कृपया डिस्‍कॉर्ड द्वारे संपर्क साधा! @@ -1161,11 +1219,11 @@ export function handleTransfer(event: TransferEvent): void { तुमच्याकडे अनेक वेळा रीफ्रेश केलेल्या संस्था असल्यास, IPFS हॅश वापरून अद्वितीय फाइल-आधारित संस्था तयार करा & एंटिटी आयडी आणि साखळी-आधारित घटकामध्ये व्युत्पन्न फील्ड वापरून त्यांचा संदर्भ द्या. -> आम्ही वरील शिफारसी सुधारण्यासाठी कार्य करत आहोत, त्यामुळे क्वेरी फक्त "सर्वात अलीकडील" आवृत्ती परत करतात +> We are working to improve the above recommendation, so queries only return the "most recent" version #### ओळखलेले समस्या -फाइल डेटा स्रोतांना सध्या ABI ची आवश्यकता आहे, जरी ABIs वापरले जात नसले तरी ([समस्या](https://github.com/graphprotocol/graph-cli/issues/961)). वर्कअराउंड म्हणजे कोणताही ABI जोडणे. +File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. फाइल डेटा स्रोतांसाठी हँडलर फाईल्समध्ये असू शकत नाहीत जे `eth_call` कॉन्ट्रॅक्ट बाइंडिंग आयात करतात, "अज्ञात आयात: `ethereum::ethereum.call` परिभाषित केले गेले नाहीत" ([ समस्या](https://github.com/graphprotocol/graph-cli/issues/4309)). वर्कअराउंड म्हणजे समर्पित फाइलमध्ये फाइल डेटा स्रोत हँडलर तयार करणे. diff --git a/website/pages/mr/developing/developer-faqs.mdx b/website/pages/mr/developing/developer-faqs.mdx index cf8e604586e5..0e7e5208490e 100644 --- a/website/pages/mr/developing/developer-faqs.mdx +++ b/website/pages/mr/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"or सध्या, dapp साठी शिफारस केलेला दृष्टीकोन म्हणजे फ्रंटएंडमध्ये की जोडणे आणि अंतिम वापरकर्त्यांसमोर ते उघड करणे. ते म्हणाले, तुम्ही ती की होस्टनावावर मर्यादित करू शकता, जसे की _yourdapp.io_ आणि सबग्राफ. गेटवे सध्या एज द्वारे चालवले जात आहे & नोड. गेटवेच्या जबाबदारीचा एक भाग म्हणजे अपमानास्पद वर्तनासाठी निरीक्षण करणे आणि दुर्भावनापूर्ण क्लायंटकडून रहदारी अवरोधित करणे. -## 25. होस्ट केलेल्या सेवेवर माझा वर्तमान सबग्राफ शोधण्यासाठी मी कुठे जाऊ? +## 25. Where do I go to find my current subgraph on the hosted service? -तुम्ही किंवा इतरांनी होस्ट केलेल्या सेवेवर तैनात केलेले सबग्राफ शोधण्यासाठी होस्ट केलेल्या सेवेकडे जा. तुम्ही ते [येथे](https://thegraph.com/hosted-service) शोधू शकता. +Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. होस्ट केलेली सेवा क्वेरी शुल्क आकारण्यास प्रारंभ करेल का? +## 26. Will the hosted service start charging query fees? -The Graph होस्टेड सेव्हिससाठी कधीही शुल्क घेतला जाणार नाही. The Graph हे एक डिसेंट्रलाइझ्ड प्रोटोकॉल आहे, आणि केंद्रीकृत सेवेसाठी शुल्क घेणे The Graph च्या मूल्यांसह सामंजस्यपूर्वक नाही. होस्टेड सेव्हिस हे डिसेंट्रलाइझ्ड नेटवर्कला पोहोचविण्यासाठी केलेलं एक तात्काळ कदम होतं. डेव्हलपर्सला आपल्याला सोबत सुगम आहेत तसेच त्यांच्या सबग्रॅफ डिसेंट्रलाइझ्ड नेटवर्कला अपग्रेड करण्यासाठी पुर्याद्वितीय काळ मिळविण्यात आलेलं आहे. +The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. होस्ट केलेली सेवा कधी बंद केली जाईल? +## 27. How do I update a subgraph on mainnet? -होस्ट केलेली सेवा 2023 मध्ये बंद होईल. घोषणा ब्लॉग पोस्ट [येथे](https://thegraph.com/blog/sunsetting-hosted-service) वाचा. होस्ट केलेल्या सेवेचा वापर करणार्‍या सर्व डॅप्सना विकेंद्रित नेटवर्कमध्ये अपग्रेड करण्यासाठी प्रोत्साहित केले जाते. विकासकांना त्यांचे सबग्राफ द ग्राफ नेटवर्कमध्ये अपग्रेड करण्यात मदत करण्यासाठी नेटवर्क अनुदान उपलब्ध आहे. तुमचे dapp सबग्राफ अपग्रेड करत असल्यास तुम्ही [येथे](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com) अर्ज करू शकता. - -## 28. मी मेननेटवर सबग्राफ कसा अपडेट करू? - -जर आपल्याला सबग्राफ डेव्हलपर आहात तर, आपण CLI वापरून सबग्राफ स्टूडिओमध्ये आपल्या सबग्राफचा एक नवीन आवृत्ती डिप्लॉय करू शकता. ह्या वेळेस ती खाजगी होईल, पण जर आपल्याला ती संतुष्टीसाठी आहे, तर आपण डिसेंट्रलायझ्ड ग्राफ एक्सप्लोररवर प्रकाशित करू शकता. ह्यामुळे, आपल्या सबग्राफच्या एक नवीन आवृत्ती तयार होईल, ज्यामध्ये क्युरेटर्स संकेतस्थळावर संकेतसंकेत सुरू करू शकतील. +If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/mr/developing/graph-ts/api.mdx b/website/pages/mr/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..e92d7cbf0bc3 --- /dev/null +++ b/website/pages/mr/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: असेंबलीस्क्रिप्ट API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +सबग्राफ मॅपिंग लिहिताना कोणते अंगभूत API वापरले जाऊ शकतात हे हे पृष्ठ दस्तऐवजीकरण करते. बॉक्सच्या बाहेर दोन प्रकारचे API उपलब्ध आहेत: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API संदर्भ + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Ethereum, JSON, GraphQL आणि असेंबलीस्क्रिप्ट सारख्या विविध प्रकारच्या सिस्टीममध्ये भाषांतर करण्यासाठी निम्न-स्तरीय आदिम. + +### आवृत्त्या + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| आवृत्ती | रिलीझ नोट्स | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
`ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
`etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### अंगभूत प्रकार + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +'@graphprotocol/graph-ts' वरून { ByteArray } आयात करा +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### बिग डेसिमल + +```typescript +'@graphprotocol/graph-ts' वरून { BigDecimal } आयात करा +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +'@graphprotocol/graph-ts' वरून { BigInt } आयात करा +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +'@graphprotocol/graph-ts' वरून { TypedMap } आयात करा +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### बाइट्स + +```typescript +'@graphprotocol/graph-ts' वरून { Bytes } आयात करा +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### पत्ता + +```typescript +'@graphprotocol/graph-ts' वरून { Address } आयात करा +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### स्टोअर API + +```typescript +'@graphprotocol/graph-ts' वरून { store } आयात करा +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### अंदाज निर्मिती करणे + +इथरियम इव्हेंटमधून संस्था तयार करण्यासाठी खालील एक सामान्य नमुना आहे. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +इतर घटकांशी टक्कर टाळण्यासाठी प्रत्येक घटकाकडे एक अद्वितीय आयडी असणे आवश्यक आहे. इव्हेंट पॅरामीटर्समध्ये वापरला जाऊ शकणारा एक अद्वितीय अभिज्ञापक समाविष्ट करणे सामान्य आहे. टीप: आयडी म्हणून ट्रान्झॅक्शन हॅश वापरणे हे गृहित धरते की समान व्यवहारातील इतर कोणत्याही इव्हेंटमध्ये या हॅशसह आयडी म्हणून अस्तित्व निर्माण होत नाही. + +#### स्टोअरमधून घटक लोड करत आहे + +एखादी संस्था आधीपासून अस्तित्वात असल्यास, ती खालील गोष्टींसह स्टोअरमधून लोड केली जाऊ शकते: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### ब्लॉकसह तयार केलेल्या संस्था शोधत आहे + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### विद्यमान घटक अद्यतनित करत आहे + +विद्यमान घटक अद्यतनित करण्याचे दोन मार्ग आहेत: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +व्युत्पन्न केलेल्या प्रॉपर्टी सेटरला धन्यवाद, बहुतेक प्रकरणांमध्ये गुणधर्म बदलणे सरळ पुढे आहे: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +खालील दोन सूचनांपैकी एकासह गुणधर्म अनसेट करणे देखील शक्य आहे: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### स्टोअरमधून एंटिटीस काढून टाकणे + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### इथरियम API + +Ethereum API स्मार्ट कॉन्ट्रॅक्ट्स, पब्लिक स्टेट व्हेरिएबल्स, कॉन्ट्रॅक्ट फंक्शन्स, इव्हेंट्स, व्यवहार, ब्लॉक्स आणि एन्कोडिंग/डिकोडिंग इथरियम डेटामध्ये प्रवेश प्रदान करते. + +#### इथरियम प्रकारांसाठी समर्थन + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +पुढील उदाहरण हे स्पष्ट करते. सारखी सबग्राफ स्कीमा दिली + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### इव्हेंट आणि ब्लॉक/व्यवहार डेटा + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### स्मार्ट कॉन्ट्रॅक्ट स्टेटमध्ये प्रवेश + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +कॉन्ट्रॅक्टमध्ये प्रवेश करणे हा एक सामान्य पॅटर्न आहे ज्यातून इव्हेंटची उत्पत्ती होते. हे खालील कोडसह साध्य केले आहे: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +सबग्राफचा भाग असलेला इतर कोणताही करार व्युत्पन्न केलेल्या कोडमधून आयात केला जाऊ शकतो आणि वैध पत्त्यावर बांधला जाऊ शकतो. + +#### रिव्हर्ट केलेले कॉल हाताळणे + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +लक्षात ठेवा की गेथ किंवा इन्फुरा क्लायंटशी कनेक्ट केलेला ग्राफ नोड सर्व रिव्हर्ट्स शोधू शकत नाही, जर तुम्ही यावर अवलंबून असाल तर आम्ही पॅरिटी क्लायंटशी कनेक्ट केलेला ग्राफ नोड वापरण्याची शिफारस करतो. + +#### एन्कोडिंग/डिकोडिंग ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +अधिक माहितीसाठी: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### लॉगिंग API + +```typescript +'@graphprotocol/graph-ts' वरून { log } आयात करा +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('प्रदर्शनासाठी संदेश: {}, {}, {}', [value.toString(), anotherValue.toString(), 'आधीपासूनच एक स्ट्रिंग']) +``` + +#### एक किंवा अधिक मूल्ये लॉग करणे + +##### एकल मूल्य लॉग करत आहे + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### विद्यमान अॅरेमधून एकल एंट्री लॉग करत आहे + +खालील उदाहरणामध्ये, तीन मूल्यांचा समावेश असूनही, केवळ वितर्क अॅरेचे पहिले मूल्य लॉग केले आहे. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### विद्यमान अॅरेमधून एकाधिक नोंदी लॉग करणे + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### विद्यमान अॅरेमधून विशिष्ट एंट्री लॉग करणे + +अॅरेमध्ये विशिष्ट मूल्य प्रदर्शित करण्यासाठी, अनुक्रमित मूल्य प्रदान करणे आवश्यक आहे. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### कार्यक्रम माहिती लॉगिंग + +खालील उदाहरण इव्हेंटमधील ब्लॉक नंबर, ब्लॉक हॅश आणि ट्रान्झॅक्शन हॅश लॉग करते: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +'@graphprotocol/graph-ts' वरून { ipfs } आयात करा +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +आयपीएफएस हॅश किंवा पथ दिल्यास, आयपीएफएस मधून फाइल वाचणे खालीलप्रमाणे केले जाते: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +'@graphprotocol/graph-ts' वरून { JSONValue, Value } आयात करा + +निर्यात कार्य प्रक्रिया आयटम(मूल्य: JSONValue, userData: मूल्य): void { + // व्यवहाराच्या तपशीलांसाठी JSONValue दस्तऐवजीकरण पहा + // JSON मूल्यांसह + द्या obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + जर (!id || !title) { + परत + } + + // कॉलबॅक देखील संस्था तयार करू शकतात + let newItem = newItem(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // पालकांना "parentId" वर सेट करा + newitem.save() +} + +// हे मॅपिंगमध्ये इव्हेंट हँडलरमध्ये ठेवा +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// वैकल्पिकरित्या, `ipfs.mapJSON` वापरा +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### क्रिप्टो API + +```typescript +'@graphprotocol/graph-ts' वरून { crypto } आयात करा +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +'@graphprotocol/graph-ts' वरून { json, JSONValueKind } आयात करा +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### प्रकार रूपांतरण संदर्भ + +| Source(s) | Destination | Conversion function | +| ----------------------- | ----------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | स्ट्रिंग (हेक्साडेसिमल) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | स्ट्रिंग (हेक्साडेसिमल) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| स्ट्रिंग (हेक्साडेसिमल) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### डेटा स्रोत मेटाडेटा + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### संस्था आणि डेटास्रोत संदर्भ + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/mr/developing/graph-ts/common-issues.mdx b/website/pages/mr/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..1f9a2b687cc4 --- /dev/null +++ b/website/pages/mr/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: सामान्य असेंब्लीस्क्रिप्ट समस्या +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/mr/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/mr/developing/substreams-powered-subgraphs-faq.mdx index cb5bfe8719ab..02592fd21457 100644 --- a/website/pages/mr/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/mr/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: सबस्ट्रीम-संचालित सबग्राफ FAQ +title: Substreams-powered subgraphs FAQ --- -## सबस्ट्रीम्स म्हणजे काय? +## What are Substreams? -[StreamingFast](https://www.streamingfast.io/) द्वारे विकसित केलेले, सबस्ट्रीम हे एक अपवादात्मक शक्तिशाली प्रोसेसिंग इंजिन आहे जे ब्लॉकचेन डेटाच्या समृद्ध प्रवाहाचा वापर करण्यास सक्षम आहे. सबस्ट्रीम्स तुम्हाला एंड-यूजर अॅप्लिकेशन्सद्वारे जलद आणि अखंड पचनासाठी ब्लॉकचेन डेटा परिष्कृत आणि आकार देण्यास अनुमती देतात. अधिक विशिष्‍टपणे, सबस्ट्रीम हे ब्लॉकचेन-अज्ञेयवादी, समांतर आणि स्‍ट्रीमिंग-प्रथम इंजिन आहे, जे ब्लॉकचेन डेटा ट्रान्स्फॉर्मेशन लेयर म्हणून काम करते. [Firehose](https://firehose.streamingfast.io/) द्वारे समर्थित, हे विकासकांना रस्ट मॉड्यूल्स लिहिण्यास, समुदाय मॉड्यूल तयार करण्यास, अत्यंत उच्च-कार्यक्षमता अनुक्रमणिका प्रदान करण्यास आणि [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) anywhere-substreams. +Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. -सबस्ट्रीम्सबद्दल अधिक जाणून घेण्यासाठी [Substreams Documentation](/substreams) वर जा. +Go to the [Substreams Documentation](/substreams) to learn more about Substreams. -## सबस्ट्रीम-सक्षम सबग्राफ काय आहेत? +## What are Substreams-powered subgraphs? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) सबग्राफच्या क्वेरीयोग्यतेसह सबस्ट्रीम्सची शक्ती एकत्र करतात. सबस्ट्रीम-संचालित सबग्राफ प्रकाशित करताना, सबस्ट्रीम्स ट्रान्सफॉर्मेशनद्वारे उत्पादित केलेला डेटा, [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), जे सबग्राफ घटकांशी सुसंगत आहेत. +[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. -जर तुम्ही सबग्राफ डेव्हलपमेंटशी आधीच परिचित असाल, तर लक्षात घ्या की सबस्ट्रीम-संचालित सबग्राफ नंतर विचारले जाऊ शकतात, जसे की ते असेंबलीस्क्रिप्ट ट्रान्सफॉर्मेशन लेयरद्वारे तयार केले गेले होते, जसे की डायनॅमिक आणि लवचिक GraphQL API प्रदान करणे. +If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. -## सबस्ट्रीम-सक्षम सबग्राफ सबग्राफपेक्षा वेगळे कसे आहेत? +## How are Substreams-powered subgraphs different from subgraphs? -सबग्राफ डेटासोर्सचे बनलेले असतात जे ऑन-चेन इव्हेंट निर्दिष्ट करतात आणि ते इव्हेंट असेंब्लीस्क्रिप्टमध्ये लिहिलेल्या हँडलर्सद्वारे कसे बदलले जावेत. या इव्हेंट्सवर क्रमाक्रमाने प्रक्रिया केली जाते, ज्या क्रमाने घटना साखळीवर घडतात त्यानुसार. +Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. -याउलट, सबस्ट्रीम-संचालित सबग्राफमध्ये एकच डेटासोर्स असतो जो सबस्ट्रीम पॅकेजचा संदर्भ देतो, ज्यावर ग्राफ नोडद्वारे प्रक्रिया केली जाते. उपप्रवाहांमध्ये पारंपारिक सबग्राफच्या तुलनेत अतिरिक्त ग्रॅन्युलर ऑन-चेन डेटामध्ये प्रवेश आहे आणि मोठ्या प्रमाणात समांतर प्रक्रियेचा फायदा देखील होऊ शकतो, ज्याचा अर्थ खूप वेगवान प्रक्रिया वेळ असू शकतो. +By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. -## सबस्ट्रीम-सक्षम सबग्राफ वापरण्याचे फायदे काय आहेत? +## What are the benefits of using Substreams-powered subgraphs? -सबस्ट्रीम-संचालित सबग्राफ्स सबग्राफच्या क्वेरीयोग्यतेसह सबस्ट्रीमचे सर्व फायदे एकत्र करतात. ते द ग्राफमध्ये अधिक संयोजनक्षमता आणि उच्च-कार्यक्षमता अनुक्रमणिका आणतात. ते नवीन डेटा वापर प्रकरणे देखील सक्षम करतात; उदाहरणार्थ, एकदा तुम्ही तुमचा सबस्ट्रीम-संचालित सबग्राफ तयार केल्यावर, तुम्ही तुमचे [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) वेगवेगळ्या मध्ये आउटपुट करण्यासाठी पुन्हा वापरू शकता. [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) जसे की PostgreSQL, MongoDB आणि Kafka. +Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. -## सबस्ट्रीमचे फायदे काय आहेत? +## What are the benefits of Substreams? -सबस्ट्रीम वापरण्याचे अनेक फायदे आहेत, यासह: +There are many benefits to using Substreams, including: -- कम्पोजेबल: तुम्ही लेगो ब्लॉक्स सारखे सबस्ट्रीम मॉड्यूल स्टॅक करू शकता आणि सार्वजनिक डेटा अधिक परिष्कृत करून समुदाय मॉड्यूल तयार करू शकता. +- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. -- उच्च-कार्यक्षमता अनुक्रमणिका: समांतर ऑपरेशन्सच्या मोठ्या प्रमाणावरील क्लस्टरद्वारे परिमाण जलद अनुक्रमणिकेचे ऑर्डर (बिगक्वेरीचा विचार करा). +- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). -- कुठेही सिंक करा: तुमचा डेटा तुम्हाला पाहिजे त्या ठिकाणी सिंक करा: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. -- प्रोग्राम करण्यायोग्य: एक्स्ट्रॅक्शन कस्टमाइझ करण्यासाठी कोड वापरा, ट्रान्सफॉर्मेशन-टाइम एग्रीगेशन करा आणि एकाधिक सिंकसाठी तुमचे आउटपुट मॉडेल करा. +- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. -- JSON RPC चा भाग म्हणून उपलब्ध नसलेल्या अतिरिक्त डेटामध्ये प्रवेश +- Access to additional data which is not available as part of the JSON RPC -- फायरहोसचे सर्व फायदे. +- All the benefits of the Firehose. -## फायरहोस म्हणजे काय? +## What is the Firehose? -[StreamingFast](https://www.streamingfast.io/) द्वारे विकसित, फायरहोस हा ब्लॉकचेन डेटा एक्स्ट्रॅक्शन लेयर आहे जो आधी न पाहिलेल्या गतीने ब्लॉकचेनच्या संपूर्ण इतिहासावर प्रक्रिया करण्यासाठी सुरवातीपासून डिझाइन केलेला आहे. फाइल-आधारित आणि स्ट्रीमिंग-प्रथम दृष्टीकोन प्रदान करणे, हे ओपन-सोर्स तंत्रज्ञानाच्या StreamingFast च्या संचचा मुख्य घटक आहे आणि सबस्ट्रीमचा पाया आहे. +Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. -फायरहोसबद्दल अधिक जाणून घेण्यासाठी [documentation](https://firehose.streamingfast.io/) वर जा. +Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. -## फायरहोसचे फायदे काय आहेत? +## What are the benefits of the Firehose? -फायरहोस वापरण्याचे अनेक फायदे आहेत, यासह: +There are many benefits to using Firehose, including: -- सर्वात कमी विलंब आणि मतदान नाही: स्ट्रीमिंग-फर्स्ट फॅशनमध्ये, फायरहोस नोड्स प्रथम ब्लॉक डेटा बाहेर ढकलण्यासाठी शर्यतीसाठी डिझाइन केलेले आहेत. +- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. -- डाउनटाइम प्रतिबंधित करा: उच्च उपलब्धतेसाठी जमिनीपासून डिझाइन केलेले. +- Prevents downtimes: Designed from the ground up for High Availability. -- कधीही बीट चुकवू नका: फायरहोस स्ट्रीम कर्सर काटे हाताळण्यासाठी आणि कोणत्याही स्थितीत तुम्ही जिथे सोडले होते तेथून सुरू ठेवण्यासाठी डिझाइन केले आहे. +- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. -- सर्वात श्रीमंत डेटा मॉडेल: सर्वोत्तम डेटा मॉडेल ज्यामध्ये शिल्लक बदल, संपूर्ण कॉल ट्री, अंतर्गत व्यवहार, लॉग, स्टोरेज बदल, गॅस खर्च आणि बरेच काही समाविष्ट आहे. +- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. -- फ्लॅट फाइल्सचा लाभ घेते: ब्लॉकचेन डेटा फ्लॅट फाइल्समध्ये काढला जातो, उपलब्ध सर्वात स्वस्त आणि सर्वात ऑप्टिमाइझ केलेले संगणकीय संसाधन. +- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. -## डेव्हलपर सबस्ट्रीम-संचालित सबग्राफ आणि सबस्ट्रीम बद्दल अधिक माहिती कोठे प्रवेश करू शकतात? +## Where can developers access more information about Substreams-powered subgraphs and Substreams? -[Substreams documentation](/substreams) तुम्हाला सबस्ट्रीम मॉड्यूल्स कसे तयार करायचे ते शिकवतील. +The [Substreams documentation](/substreams) will teach you how to build Substreams modules. -[Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) तुम्हाला ते ग्राफवर तैनात करण्यासाठी पॅकेज कसे करायचे ते दर्शवेल. +The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. -## सबस्ट्रीममध्ये रस्ट मॉड्यूलची भूमिका काय आहे? +## What is the role of Rust modules in Substreams? -रस्ट मॉड्यूल हे सबग्राफमधील असेंबलीस्क्रिप्ट मॅपर्सचे समतुल्य आहेत. ते WASM मध्ये अशाच प्रकारे संकलित केले जातात, परंतु प्रोग्रामिंग मॉडेल समांतर अंमलबजावणीसाठी परवानगी देते. ते आपण कच्च्या ब्लॉकचेन डेटावर लागू करू इच्छित असलेले परिवर्तन आणि एकत्रीकरण परिभाषित करतात. +Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. -## सबस्ट्रीम कंपोजेबल कशामुळे बनते? +## What makes Substreams composable? -सबस्ट्रीम वापरताना, रचना ट्रान्सफॉर्मेशन लेयरवर होते ज्यामुळे कॅशे केलेले मॉड्यूल्स पुन्हा वापरता येतात. +When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. -उदाहरण म्हणून, अॅलिस एक DEX किंमत मॉड्यूल तयार करू शकते, बॉब त्याच्या आवडीच्या काही टोकन्ससाठी व्हॉल्यूम एग्रीगेटर तयार करण्यासाठी त्याचा वापर करू शकतो आणि लिसा किंमत ओरॅकल तयार करण्यासाठी चार वैयक्तिक DEX किंमत मॉड्यूल एकत्र करू शकते. एकल सबस्ट्रीम विनंती या सर्व व्यक्तींचे मॉड्यूल पॅकेज करेल, त्यांना एकत्र जोडेल, डेटाचा अधिक शुद्ध प्रवाह ऑफर करेल. तो प्रवाह नंतर सबग्राफ तयार करण्यासाठी वापरला जाऊ शकतो आणि ग्राहकांकडून विचारला जाऊ शकतो. +As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. -## तुम्ही सबस्ट्रीम-संचालित सबग्राफ कसा तयार आणि तैनात करू शकता? +## How can you build and deploy a Substreams-powered Subgraph? -Substreams-powered Subgraph [defining](/cookbook/substreams-powered-subgraphs/) केल्यानंतर, तुम्ही ग्राफ CLI वापरू शकता तो [Subgraph Studio](https://thegraph.com/studio/) मध्ये उपयोजित करण्यासाठी. +After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). -## मला सबस्ट्रीम्स आणि सबस्ट्रीम-सक्षम सबग्राफची उदाहरणे कोठे मिळतील? +## Where can I find examples of Substreams and Substreams-powered subgraphs? -सबस्ट्रीम आणि सबस्ट्रीम-सक्षम सबग्राफची उदाहरणे शोधण्यासाठी तुम्ही [this Github repo](https://github.com/pinax-network/awesome-substreams) ला भेट देऊ शकता. +You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. -## ग्राफ नेटवर्कसाठी सबस्ट्रीम्स आणि सबस्ट्रीम-सक्षम सबग्राफचा अर्थ काय आहे? +## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? -एकात्मता अनेक फायद्यांचे आश्वासन देते, ज्यामध्ये अत्यंत उच्च-कार्यक्षमता अनुक्रमणिका आणि समुदाय मॉड्यूल्सचा फायदा घेऊन आणि त्यावर निर्माण करून अधिक संयोजनक्षमता समाविष्ट आहे. +The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. diff --git a/website/pages/mr/developing/supported-networks.mdx b/website/pages/mr/developing/supported-networks.mdx index a875053e6353..d033eeecc766 100644 --- a/website/pages/mr/developing/supported-networks.mdx +++ b/website/pages/mr/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## आलेख नोड -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. ग्राफ नोड फायरहोस एकत्रीकरणाद्वारे इतर प्रोटोकॉल देखील अनुक्रमित करू शकतो. NEAR, Arweave आणि Cosmos-आधारित नेटवर्कसाठी फायरहॉस इंटिग्रेशन तयार केले गेले आहेत. diff --git a/website/pages/mr/developing/unit-testing-framework.mdx b/website/pages/mr/developing/unit-testing-framework.mdx index 888ae93bf271..85fdc6571ca1 100644 --- a/website/pages/mr/developing/unit-testing-framework.mdx +++ b/website/pages/mr/developing/unit-testing-framework.mdx @@ -103,13 +103,13 @@ graph test path/to/file.test.ts **पर्याय:** ```sh --c, --coverage कव्हरेज मोडमध्ये चाचण्या चालवा --d, --docker डॉकर कंटेनरमध्ये चाचण्या चालवा (टीप: कृपया सबग्राफच्या रूट फोल्डरमधून कार्यान्वित करा) --f --force बायनरी: बायनरी पुन्हा डाउनलोड करते. डॉकर: डॉकरफाइल पुन्हा डाउनलोड करते आणि डॉकर प्रतिमा पुन्हा तयार करते. --h, --help वापर माहिती दाखवा --l, --logs OS, CPU मॉडेल आणि डाउनलोड url बद्दल कन्सोल माहितीवर लॉग (डीबगिंग हेतू) --r, --recompile फोर्सेस चाचण्या पुन्हा कंपाइल केल्या जातील --v, --version तुम्हाला डाउनलोड/वापरायची असलेली रस्ट बायनरीची आवृत्ती निवडा +-c, --coverage Run the tests in coverage mode +-d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) +-f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. +-h, --help Show usage information +-l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) +-r, --recompile Forces tests to be recompiled +-v, --version Choose the version of the rust binary that you want to be downloaded/used ``` ### डॉकर @@ -990,9 +990,9 @@ test('Data source simple mocking example', () => { ## चाचणी कव्हरेज -**मॅचस्टिक** वापरून, सबग्राफ डेव्हलपर एक स्क्रिप्ट चालवण्यास सक्षम आहेत जी लिखित युनिट चाचण्यांच्या चाचणी कव्हरेजची गणना करेल. +Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. -चाचणी कव्हरेज टूल संकलित चाचणी `wasm` बायनरी घेते आणि त्यांना `wat` फायलींमध्ये रूपांतरित करते, जे नंतर `सबग्राफमध्ये परिभाषित केलेले हँडलर्स आहेत की नाही हे पाहण्यासाठी सहजपणे तपासले जाऊ शकते..yaml` ला कॉल केले आहे. कोड कव्हरेज (आणि संपूर्ण चाचणी) असेंबलीस्क्रिप्ट आणि WebAssembly मध्ये अगदी सुरुवातीच्या टप्प्यात असल्याने, **Matchstick** शाखा कव्हरेज तपासू शकत नाही. त्याऐवजी, दिलेल्या हँडलरला कॉल केल्यास, त्याच्यासाठी इव्हेंट/फंक्शनची योग्य प्रकारे खिल्ली उडवली गेली आहे, या प्रतिपादनावर आम्ही अवलंबून आहोत. +The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. ### पूर्वतयारी @@ -1029,7 +1029,7 @@ test('Data source simple mocking example', () => { }, ``` -ते कव्हरेज टूल कार्यान्वित करेल आणि तुम्हाला टर्मिनलमध्ये असे काहीतरी दिसेल: +That will execute the coverage tool and you should see something like this in the terminal: ```sh $ आलेख चाचणी -c diff --git a/website/pages/mr/docsearch.json b/website/pages/mr/docsearch.json index 33d92c16b13d..df938f810065 100644 --- a/website/pages/mr/docsearch.json +++ b/website/pages/mr/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "शोधा", - "buttonAriaLabel": "शोधा" + "buttonText": "Search", + "buttonAriaLabel": "Search" }, "modal": { "searchBox": { - "resetButtonTitle": "क्वेरी साफ करा", - "resetButtonAriaLabel": "क्वेरी साफ करा", - "cancelButtonText": "रद्द करा", - "cancelButtonAriaLabel": "रद्द करा" + "resetButtonTitle": "Clear the query", + "resetButtonAriaLabel": "Clear the query", + "cancelButtonText": "Cancel", + "cancelButtonAriaLabel": "Cancel" }, "startScreen": { - "recentSearchesTitle": "अलीकडील", - "noRecentSearchesText": "कोणतेही अलीकडील शोध नाहीत", - "saveRecentSearchButtonTitle": "हा शोध जतन करा", - "removeRecentSearchButtonTitle": "हा शोध इतिहासातून काढून टाका", - "favoriteSearchesTitle": "आवडते", - "removeFavoriteSearchButtonTitle": "हा शोध इतिहासातून काढून टाका" + "recentSearchesTitle": "Recent", + "noRecentSearchesText": "No recent searches", + "saveRecentSearchButtonTitle": "Save this search", + "removeRecentSearchButtonTitle": "Remove this search from history", + "favoriteSearchesTitle": "Favorite", + "removeFavoriteSearchButtonTitle": "Remove this search from favorites" }, "errorScreen": { - "titleText": "परिणाम आणण्यात अक्षम", - "helpText": "तुम्हाला तुमचे नेटवर्क कनेक्शन तपासायचे असेल." + "titleText": "Unable to fetch results", + "helpText": "You might want to check your network connection." }, "footer": { - "selectText": "निवडण्यासाठी", - "selectKeyAriaLabel": "की प्रविष्ट करा", - "navigateText": "नेव्हिगेट करण्यासाठी", - "navigateUpKeyAriaLabel": "वर बाण", - "navigateDownKeyAriaLabel": "खाली बाण", - "closeText": "बंद", + "selectText": "to select", + "selectKeyAriaLabel": "Enter key", + "navigateText": "to navigate", + "navigateUpKeyAriaLabel": "Arrow up", + "navigateDownKeyAriaLabel": "Arrow down", + "closeText": "to close", "closeKeyAriaLabel": "Escape की", "searchByText": "द्वारे शोधा" }, "noResultsScreen": { - "noResultsText": "साठी कोणतेही परिणाम नाहीत", - "suggestedQueryText": "शोधण्याचा प्रयत्न करा", - "reportMissingResultsText": "या क्वेरीने निकाल दिला पाहिजे यावर विश्वास आहे?", - "reportMissingResultsLinkText": "आम्हाला कळू द्या." + "noResultsText": "No results for", + "suggestedQueryText": "Try searching for", + "reportMissingResultsText": "Believe this query should return results?", + "reportMissingResultsLinkText": "Let us know." } } } diff --git a/website/pages/mr/firehose.mdx b/website/pages/mr/firehose.mdx index 5e2b37ee4bb6..ee293eed5e96 100644 --- a/website/pages/mr/firehose.mdx +++ b/website/pages/mr/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### प्रारंभ करणे + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/mr/glossary.mdx b/website/pages/mr/glossary.mdx index 5d923b89cdec..92efb9cc0a2a 100644 --- a/website/pages/mr/glossary.mdx +++ b/website/pages/mr/glossary.mdx @@ -1,29 +1,31 @@ --- -title: शब्दकोष +title: Glossary --- -- **द ग्राफ**: डेटा इंडेक्सिंग आणि क्वेरी करण्यासाठी विकेंद्रित प्रोटोकॉल. +- **The Graph**: A decentralized protocol for indexing and querying data. - **क्वेरी**: डेटासाठी विनंती. द ग्राफच्या बाबतीत, क्वेरी ही सबग्राफमधील डेटाची विनंती आहे ज्याचे उत्तर इंडेक्सरद्वारे दिले जाईल. - **GraphQL**: API साठी क्वेरी भाषा आणि आपल्या विद्यमान डेटासह त्या क्वेरी पूर्ण करण्यासाठी रनटाइम. आलेख सबग्राफ क्वेरी करण्यासाठी GraphQL वापरतो. -- **अंतिमबिंदू**: एक URL जी सबग्राफ क्वेरी करण्यासाठी वापरली जाऊ शकते. सबग्राफ स्टुडिओसाठी चाचणी एंडपॉइंट आहे `https://api.studio.thegraph.com/query///` आणि ग्राफ एक्सप्लोरर एंडपॉइंट `https: //gateway.thegraph.com/api//subgraphs/id/`. ग्राफ एक्सप्लोरर एंडपॉईंटचा वापर ग्राफच्या विकेंद्रीकृत नेटवर्कवरील सबग्राफ्सची क्वेरी करण्यासाठी केला जातो. +- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. - **सबग्राफ**: ब्लॉकचेन डेटावर तयार केलेला सानुकूल API जो [GraphQL](https://graphql.org/) वापरून विचारला जाऊ शकतो. डेव्हलपर द ग्राफच्या विकेंद्रित नेटवर्कवर सबग्राफ तयार, तैनात आणि प्रकाशित करू शकतात. त्यानंतर, इंडेक्सर्स सबग्राफचे अनुक्रमणिका सुरू करू शकतात जेणेकरून ते सबग्राफ ग्राहकांकडून विचारले जातील. -- **होस्ट केलेली सेवा**: ग्राफचे विकेंद्रीकृत नेटवर्क त्याच्या सेवेची किंमत, सेवेची गुणवत्ता आणि विकासकाचा अनुभव परिपक्व करत असल्याने सबग्राफ तयार करण्यासाठी आणि क्वेरी करण्यासाठी तात्पुरती स्कॅफोल्ड सेवा. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **इंडेक्सर्स**: नेटवर्क सहभागी जे ब्लॉकचेनमधील डेटा इंडेक्स करण्यासाठी इंडेक्सिंग नोड्स चालवतात आणि GraphQL क्वेरी सर्व्ह करतात. - **इंडेक्सर रेव्हेन्यू स्ट्रीम्स**: GRT मध्ये इंडेक्सर्सना दोन घटकांसह पुरस्कृत केले जाते: क्वेरी फी रिबेट्स आणि इंडेक्सिंग रिवॉर्ड्स. - 1. **क्वेरी फी रिबेट्स**: नेटवर्कवर क्वेरी सर्व्ह करण्यासाठी सबग्राफ ग्राहकांकडून पेमेंट. + 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. 2. **इंडेक्सिंग रिवॉर्ड्स**: इंडेक्सर्सना अनुक्रमणिका सबग्राफसाठी प्राप्त होणारे पुरस्कार. इंडेक्सिंग रिवॉर्ड्स वार्षिक 3% GRT च्या नवीन जारी करून व्युत्पन्न केले जातात. - **इंडेक्सरचा सेल्फ स्टेक**: विकेंद्रीकृत नेटवर्कमध्ये भाग घेण्यासाठी इंडेक्सर्सची जीआरटीची रक्कम. किमान 100,000 GRT आहे आणि कोणतीही उच्च मर्यादा नाही. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **प्रतिनिधी**: नेटवर्क सहभागी जे GRT चे मालक आहेत आणि त्यांचे GRT इंडेक्सर्सना सोपवतात. हे इंडेक्सर्सना नेटवर्कवरील सबग्राफमध्ये त्यांची भागीदारी वाढविण्यास अनुमती देते. त्या बदल्यात, प्रतिनिधींना अनुक्रमणिका बक्षिसेचा एक भाग प्राप्त होतो जो इंडेक्सर्सना सबग्राफवर प्रक्रिया करण्यासाठी प्राप्त होतो. - **प्रतिनिधी कर**: प्रतिनिधींनी इंडेक्सर्सना GRT सोपवल्यावर 0.5% शुल्क. फी भरण्यासाठी वापरण्यात आलेला जीआरटी जळाला आहे. @@ -38,27 +40,21 @@ title: शब्दकोष - **सबग्राफ मॅनिफेस्ट**: एक JSON फाइल जी सबग्राफच्या GraphQL स्कीमा, डेटा स्रोत आणि इतर मेटाडेटाचे वर्णन करते. [येथे](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) एक उदाहरण आहे. -- **सवलत पूल**: एक आर्थिक सुरक्षितता उपाय ज्यामध्ये सबग्राफ ग्राहकांनी दिलेले क्वेरी शुल्क इंडेक्सर्सद्वारे क्वेरी शुल्क सवलत म्हणून दावा केला जात नाही तोपर्यंत धारण करतो. अवशिष्ट GRT जाळले आहे. - -- **युग**: नेटवर्कमधील वेळेचे एकक. एक युग सध्या 6,646 ब्लॉक किंवा अंदाजे 1 दिवस आहे. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **वाटप**: एक इंडेक्सर त्यांचा एकूण GRT स्टेक (प्रतिनिधींच्या स्टेकसह) ग्राफच्या विकेंद्रीकृत नेटवर्कवर प्रकाशित झालेल्या सबग्राफसाठी वाटप करू शकतो. वाटप चार टप्प्यांपैकी एका टप्प्यात अस्तित्वात आहे. 1. **सक्रिय**: ऑन-चेन तयार केल्यावर वाटप सक्रिय मानले जाते. याला वाटप उघडणे म्हणतात, आणि नेटवर्कला सूचित करते की इंडेक्सर सक्रियपणे अनुक्रमित करत आहे आणि विशिष्ट सबग्राफसाठी क्वेरी सर्व्ह करत आहे. सक्रिय वाटप सबग्राफवरील सिग्नल आणि वाटप केलेल्या GRT रकमेच्या प्रमाणात अनुक्रमणिका बक्षिसे जमा करतात. - 2. **बंद**: एक इंडेक्सर अलीकडील, आणि वैध, अनुक्रमणिकेचा पुरावा (POI) सबमिट करून दिलेल्या सबग्राफवर जमा झालेल्या अनुक्रमणिका पुरस्कारांवर दावा करू शकतो. हे वाटप बंद करणे म्हणून ओळखले जाते. वाटप बंद होण्यापूर्वी किमान एका युगासाठी खुले असले पाहिजे. जास्तीत जास्त वाटप कालावधी 28 युग आहे. जर इंडेक्सरने 28 युगांपलीकडे वाटप उघडले तर ते शिळे वाटप म्हणून ओळखले जाते. जेव्हा वाटप **बंद** स्थितीत असते, तेव्हा मच्छीमार खोटा डेटा सादर करण्यासाठी इंडेक्सरला आव्हान देण्यासाठी विवाद उघडू शकतो. - - 3. **अंतिम झाले**: विवाद कालावधी संपला आहे, आणि क्वेरी शुल्क सवलत अनुक्रमणिकांद्वारे दावा करण्यासाठी उपलब्ध आहेत. - - 4. **दावा केला**: वाटपाचा अंतिम टप्पा, सर्व पात्र बक्षिसे वितरीत केली गेली आहेत आणि त्याच्या क्वेरी शुल्क सवलतीचा दावा केला गेला आहे. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **सबग्राफ स्टुडिओ**: सबग्राफ तयार करणे, उपयोजित करणे आणि प्रकाशित करणे यासाठी एक शक्तिशाली डॅप. -- **मच्छिमार**: नेटवर्क सहभागी इंडेक्सर्सच्या क्वेरी प्रतिसाद आणि POI वर विवाद करू शकतात. याला मच्छिमार असणे म्हणतात. फिशरमनच्या बाजूने निकाली निघालेल्या वादाचा परिणाम इंडेक्सरला आर्थिक दंड आणि फिशरमनला पुरस्कारासह, अशा प्रकारे इंडेक्सर्सद्वारे नेटवर्कमध्ये केलेल्या इंडेक्सिंग आणि क्वेरी कार्याच्या अखंडतेला प्रोत्साहन देते. दंड (स्लॅशिंग) सध्या इंडेक्सरच्या स्व-स्टेकच्या 2.5% वर सेट केला आहे, कमी केलेल्या GRT पैकी 50% मच्छिमारांना जाईल आणि इतर 50% जाळले जाईल. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **मध्यस्थ**: मध्यस्थ हे गव्हर्नन्सद्वारे सेट केलेले नेटवर्क सहभागी असतात. लवादाची भूमिका अनुक्रमणिका आणि क्वेरी विवादांचे परिणाम ठरवणे आहे. ग्राफ नेटवर्कची उपयुक्तता आणि विश्वासार्हता वाढवणे हे त्यांचे ध्येय आहे. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **स्लॅशिंग**: इंडेक्सिंगचा चुकीचा पुरावा (POI) प्रदान केल्याबद्दल किंवा चुकीचा डेटा प्रदान केल्याबद्दल इंडेक्सर्स त्यांच्या स्टेक्ड GRT कमी करू शकतात. स्लॅशिंग टक्केवारी एक प्रोटोकॉल पॅरामीटर आहे जो सध्या इंडेक्सरच्या सेल्फ स्टेकच्या 2.5% वर सेट केला आहे. कमी केलेल्या GRT पैकी 50% मच्छिमारांना जाते ज्याने चुकीचा डेटा किंवा चुकीचा POI विवादित केला. उर्वरित 50% जळाले आहे. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **इंडेक्सिंग रिवॉर्ड्स**: इंडेक्सर्सना अनुक्रमणिका सबग्राफसाठी प्राप्त होणारे पुरस्कार. इंडेक्सिंग रिवॉर्ड्स GRT मध्ये वितरीत केले जातात. @@ -66,7 +62,7 @@ title: शब्दकोष - **GRT**: आलेखाचे कार्य उपयुक्तता टोकन. GRT नेटवर्क सहभागींना नेटवर्कमध्ये योगदान देण्यासाठी आर्थिक प्रोत्साहन देते. -- **POI किंवा इंडेक्सिंगचा पुरावा**: जेव्हा एखादा इंडेक्सर त्यांचे वाटप बंद करतो आणि दिलेल्या सबग्राफवर त्यांच्या जमा झालेल्या इंडेक्सर रिवॉर्ड्सवर दावा करू इच्छितो, तेव्हा त्यांनी इंडेक्सिंगचा वैध आणि अलीकडील पुरावा प्रदान करणे आवश्यक आहे ( POI). मच्छिमार इंडेक्सरद्वारे प्रदान केलेल्या पीओआयवर विवाद करू शकतात. मच्छिमारांच्या बाजूने विवाद सोडवला गेल्यास इंडेक्सर कमी होईल. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **ग्राफ नोड**: ग्राफ नोड हा घटक आहे जो सबग्राफ अनुक्रमित करतो आणि परिणामी डेटा GraphQL API द्वारे क्वेरीसाठी उपलब्ध करतो. हे इंडेक्सर स्टॅकसाठी मध्यवर्ती आहे आणि यशस्वी इंडेक्सर चालवण्यासाठी ग्राफ नोडचे योग्य ऑपरेशन महत्वाचे आहे. @@ -80,10 +76,10 @@ title: शब्दकोष - **कूलडाउन कालावधी**: इंडेक्सर ज्याने त्यांचे प्रतिनिधीत्व पॅरामीटर्स बदलले आहेत तोपर्यंत तो पुन्हा करू शकतो. -- **L2 हस्तांतरण साधने**: स्मार्ट करार आणि UI जे नेटवर्क सहभागींना Ethereum mainnet वरून Arbitrum One वर हस्तांतरित करण्यास सक्षम करतात. नेटवर्क सहभागी प्रतिनिधी जीआरटी, सबग्राफ, क्युरेशन शेअर्स आणि इंडेक्सरचा सेल्फ स्टेक हस्तांतरित करू शकतात. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- ग्राफ नेटवर्कवर सबग्राफ **_अपग्रेड करणे_**: होस्ट केलेल्या सेवेमधून ग्राफ नेटवर्कवर सबग्राफ हलवण्याची प्रक्रिया. +- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. -- सबग्राफ **_अपडेट करत आहे_**: सबग्राफ मॅनिफेस्ट, स्कीमा किंवा अपडेट्ससह नवीन सबग्राफ आवृत्ती रिलीझ करण्याची प्रक्रिया मॅपिंग. +- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **स्थलांतर**: क्युरेशन शेअर्सची प्रक्रिया सबग्राफच्या जुन्या आवृत्तीवरून सबग्राफच्या नवीन आवृत्तीवर हलते (म्हणजे, क्युरेशन शेअर्स नवीनतम आवृत्तीवर जातात जेव्हा v0.0.1 v0.0.2 वर अद्यतनित केले आहे). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/mr/graphcast.mdx b/website/pages/mr/graphcast.mdx index 251f7e78b06a..53dcf8cd25c7 100644 --- a/website/pages/mr/graphcast.mdx +++ b/website/pages/mr/graphcast.mdx @@ -1,21 +1,21 @@ --- -title: ग्राफकास्ट +title: Graphcast --- ## परिचय -तुम्ही तुमच्या सहकारी इंडेक्सर्सकडून स्वयंचलित पद्धतीने काही शिकू इच्छिता किंवा शेअर करू इच्छिता, परंतु ते खूप त्रासदायक आहे किंवा खूप गॅस खर्च होतो? +Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? सध्या, इतर नेटवर्क सहभागींना माहिती प्रसारित करण्याची किंमत इथरियम ब्लॉकचेनवरील गॅस फीद्वारे निर्धारित केली जाते. ग्राफकास्ट पर्यायी विकेंद्रित, वितरित पीअर-टू-पीअर (P2P) संप्रेषण साधन म्हणून कार्य करून या समस्येचे निराकरण करते जे संपूर्ण नेटवर्कवरील इंडेक्सर्सना रिअल टाइममध्ये माहितीची देवाणघेवाण करण्यास अनुमती देते. P2P संदेशांची देवाणघेवाण करण्याची किंमत शून्याच्या जवळ आहे, कोणत्याही डेटा अखंडतेची हमी नसतानाही. तरीसुद्धा, ग्राफकास्टचे उद्दिष्ट आहे की मेसेज वैधता हमी प्रदान करणे (म्हणजे संदेश वैध आहे आणि एखाद्या ज्ञात प्रोटोकॉल सहभागीने स्वाक्षरी केलेला आहे) प्रतिष्ठा मॉडेल्सच्या खुल्या डिझाइन स्पेससह. ग्राफकास्ट SDK (सॉफ्टवेअर डेव्हलपमेंट किट) विकसकांना रेडिओ तयार करण्यास अनुमती देते, जे गॉसिप-शक्तीवर चालणारे अनुप्रयोग आहेत जे निर्देशांक दिलेल्या उद्देशासाठी चालवू शकतात. खालील वापराच्या प्रकरणांसाठी काही रेडिओ तयार करण्याचा आमचा मानस आहे (किंवा रेडिओ तयार करू इच्छिणाऱ्या इतर विकासकांना/संघांना समर्थन पुरवणे): -- सबग्राफ डेटा अखंडतेचे रिअल-टाइम क्रॉस-चेकिंग ([POI रेडिओ](https://docs.graphops.xyz/graphcast/radios/poi-radio)). -- इतर इंडेक्सर्सकडून वार्प सिंक सबग्राफ, सबस्ट्रीम आणि फायरहोस डेटासाठी लिलाव आणि समन्वय आयोजित करणे. -- सबग्राफ रिक्वेस्ट व्हॉल्यूम, फी व्हॉल्यूम इ. सह सक्रिय क्वेरी विश्लेषणावर स्वयं-अहवाल. -- सबग्राफ इंडेक्सिंग वेळ, हँडलर गॅस कॉस्ट, इंडेक्सिंग एरर आल्या इत्यादींसह इंडेक्सिंग अॅनालिटिक्सवर स्व-रिपोर्टिंग. -- आलेख-नोड आवृत्ती, पोस्टग्रेस आवृत्ती, इथरियम क्लायंट आवृत्ती इ. सह स्टॅक माहितीवर स्वयं-अहवाल. +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). +- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. +- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. +- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. +- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. -### अधिक जाणून घ्या +### Learn More तुम्हाला ग्राफकास्टबद्दल अधिक जाणून घ्यायचे असल्यास, [येथे दस्तऐवजीकरण पहा.](https://docs.graphops.xyz/graphcast/intro) diff --git a/website/pages/mr/index.json b/website/pages/mr/index.json index 1cf2283bf21d..56bb0de7aa67 100644 --- a/website/pages/mr/index.json +++ b/website/pages/mr/index.json @@ -23,8 +23,8 @@ "description": "सबग्राफ तयार करण्यासाठी स्टुडिओ वापरा" }, "migrateFromHostedService": { - "title": "होस्ट केलेल्या सेवेमधून स्थलांतर करा", - "description": "ग्राफ नेटवर्कवर सबग्राफ स्थलांतरित करणे" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "होस्ट केलेली सेवा", - "description": "होस्ट केलेल्या सेवेवर सबग्राफ तयार करा आणि एक्सप्लोर करा" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "समर्थित नेटवर्क", - "description": "ग्राफ नेटवर्क आणि होस्ट केलेल्या सेवेवर आलेख खालील नेटवर्कना समर्थन देतो.", - "graphNetworkAndHostedService": "आलेख नेटवर्क आणि होस्ट केलेली सेवा", - "hostedService": "होस्ट केलेली सेवा", - "betaWarning": "बीटा मध्ये." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/mr/mips-faqs.mdx b/website/pages/mr/mips-faqs.mdx index 694f624e1c7e..b15170173b84 100644 --- a/website/pages/mr/mips-faqs.mdx +++ b/website/pages/mr/mips-faqs.mdx @@ -4,122 +4,124 @@ title: MIPs FAQs ## परिचय -ग्राफ इकोसिस्टममध्ये सहभागी होण्याची ही एक रोमांचक वेळ आहे! [ग्राफ डे 2022](https://thegraph.com/graph-day/2022/) दरम्यान यानिव ताल यांनी [होस्ट केलेल्या सेवेच्या सूर्यास्ताची](https://thegraph.com/blog/sunsetting-hosted-service/) घोषणा केली), एक क्षण ज्यासाठी ग्राफ इकोसिस्टम अनेक वर्षांपासून काम करत आहे. +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! -होस्ट केलेल्या सेवेच्या सूर्यास्तासाठी आणि त्यातील सर्व क्रियाकलाप विकेंद्रित नेटवर्कमध्ये स्थलांतरित करण्यासाठी, ग्राफ फाउंडेशनने [मायग्रेशन इन्फ्रास्ट्रक्चर प्रोव्हायडर्स (MIPs) कार्यक्रम] \(https://thegraph.com/blog/mips-multi) जाहीर केला आहे -चेन-इंडेक्सिंग-प्रोत्साहन-कार्यक्रम). +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. -MIPs प्रोग्राम हा इंडेक्सर्ससाठी एक प्रोत्साहन कार्यक्रम आहे ज्यामुळे त्यांना इथरियम मेननेटच्या पलीकडे अनुक्रमणिका साखळ्यांसाठी संसाधनांसह समर्थन मिळते आणि ग्राफ प्रोटोकॉलला विकेंद्रीकृत नेटवर्क मल्टी-चेन इन्फ्रास्ट्रक्चर लेयरमध्ये विस्तृत करण्यात मदत होते. +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -MIPs कार्यक्रमाने 0.75% च्या (75M GRT) भागात GRT आवंटित केले आहे, ज्यामध्ये 0.5% ने नेटवर्कला बूटस्ट्रॅप करण्यास सहाय्य करणार्या इंडेक्सर्सला प्रतिफळ देण्यात आले आहे, आणि 0.25% ने मल्टी-चेन सबग्रॅफ वापरणार्या सबग्रॅफ विकसकांसाठी नेटवर्क ग्रँट्समध्ये आवंटित केले आहे. +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. -### उपयुक्त संसाधने +The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. -- [Vincent (Victor) Taglia कडून Indexer 2ools](https://indexer-2ools.vincenttaglia.com/#/) -- [ग्राफ नेटवर्कवर प्रभावी इंडेक्सर कसे व्हावे](https://thegraph.com/blog/how-to-become-indexer/) -- [इंडेक्सर नॉलेज हब](https://thegraph.academy/indexers/) -- [अलोकेशन ऑप्टिमाइझर](https://github.com/graphprotocol/allocationopt.jl) -- [अलोकेशन ऑप्टिमायझेशन टूलिंग](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +### Useful Resources -### 1. सबग्राफ अयशस्वी झाला तरीही इंडेक्सिंगचा वैध पुरावा (POI) तयार करणे शक्य आहे का? +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) +- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) +- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -होय, ते खरंच आहे. +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -संदर्भासाठी, लवाद चार्टर, [येथे चार्टरबद्दल अधिक जाणून घ्या](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), अयशस्वी सबग्राफसाठी POI जनरेट करण्याची पद्धत निर्दिष्ट करते. +Yes, it is indeed. -एका समुदाय सदस्याने, [SunTzu](https://github.com/suntzu93), लवादाच्या चार्टरच्या पद्धतीनुसार ही प्रक्रिया स्वयंचलित करण्यासाठी स्क्रिप्ट तयार केली आहे. रेपो पहा [here](https://github.com/suntzu93/get_valid_poi_subgraph). +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -### 2. MIPs कार्यक्रम प्रथम कोणत्या साखळीला प्रोत्साहन देईल? +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). -विकेंद्रित नेटवर्कवर समर्थित असणारी पहिली साखळी म्हणजे Gnosis चेन! पूर्वी xDAI म्हणून ओळखली जाणारी, Gnosis चेन ही EVM-आधारित साखळी आहे. Gnosis चेनची प्रथम वापरकर्ता-अनुकूलता, चालणारे नोड्स, इंडेक्सर रेडिनेस, The Graph सह संरेखन आणि web3 मध्ये दत्तक घेतल्याने निवड करण्यात आली. +### 2. Which chain will the MIPs program incentivise first? -### 3. MIPs कार्यक्रमात नवीन साखळी कशा जोडल्या जातील? +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. -इंडेक्सरची तयारी, मागणी आणि सामुदायिक भावना यावर आधारित संपूर्ण MIP कार्यक्रमात नवीन साखळी जाहीर केल्या जातील. साखळ्यांना प्रथम टेस्टनेटवर समर्थन दिले जाईल आणि त्यानंतर, मेननेटवर त्या साखळीला समर्थन देण्यासाठी एक GIP पास केला जाईल. MIPs कार्यक्रमात सहभागी होणारे इंडेक्सर्स त्यांना कोणत्या साखळ्यांना समर्थन देण्यास स्वारस्य आहे ते निवडतील आणि प्रत्येक साखळीसाठी बक्षिसे मिळवतील, क्वेरी फी आणि सबग्राफ सर्व्ह करण्यासाठी नेटवर्कवर इंडेक्सिंग बक्षिसे मिळवण्याव्यतिरिक्त. MIPs सहभागींना त्यांची कामगिरी, नेटवर्क गरजा पूर्ण करण्याची क्षमता आणि समुदाय समर्थन यावर आधारित गुण दिले जातील. +### 3. How will new chains be added to the MIPs program? -### 4. नेटवर्क नवीन साखळीसाठी तयार आहे तेव्हा आम्हाला कसे कळेल? +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. -तत्परतेचे सर्वोत्तम मूल्यांकन करण्यासाठी ग्राफ फाउंडेशन QoS कार्यप्रदर्शन मेट्रिक्स, नेटवर्क कार्यप्रदर्शन आणि समुदाय चॅनेलचे निरीक्षण करेल. नेटवर्क त्या मल्टी-चेन डॅप्सना त्यांचे सबग्राफ स्थलांतरित करण्यात सक्षम होण्यासाठी कार्यक्षमतेच्या गरजा पूर्ण करते याची खात्री करणे हे प्राधान्य आहे. +### 4. How will we know when the network is ready for a new chain? -### 5. प्रति शृंखला बक्षिसे कशी विभागली जातात? +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. -साखळी नोड्स समक्रमित करण्यासाठी त्यांच्या आवश्यकतांमध्ये भिन्न असतात आणि ते क्वेरी व्हॉल्यूम आणि दत्तक घेण्यामध्ये भिन्न असतात हे लक्षात घेता, सर्व अभिप्राय आणि शिकणे कॅप्चर केले जातील याची खात्री करण्यासाठी त्या साखळीच्या चक्राच्या शेवटी प्रति साखळी बक्षिसे निश्चित केली जातील. तथापि, एकदा नेटवर्कवर साखळी समर्थित झाल्यावर अनुक्रमणिका कधीही क्वेरी शुल्क आणि अनुक्रमणिका बक्षिसे मिळविण्यास सक्षम असतील. +### 5. How are rewards divided per chain? -### 6. आपल्याला MIPs प्रोग्राममधील सर्व साखळी अनुक्रमित करण्याची आवश्यकता आहे किंवा आपण फक्त एक साखळी निवडू शकतो आणि ती अनुक्रमित करू शकतो? +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. -तुम्हाला हवी असलेली कोणतीही साखळी अनुक्रमित करण्यासाठी तुमचे स्वागत आहे! MIPs कार्यक्रमाचे उद्दिष्ट इंडेक्सर्सना त्यांना हव्या असलेल्या साखळ्यांना अनुक्रमित करण्यासाठी साधने आणि ज्ञानाने सुसज्ज करणे आणि त्यांना स्वारस्य असलेल्या वेब3 इकोसिस्टमला समर्थन देणे हे आहे. तथापि, प्रत्येक साखळीसाठी, टेस्टनेटपासून मेननेटपर्यंतचे टप्पे आहेत. तुम्ही अनुक्रमित करत असलेल्या साखळ्यांचे सर्व टप्पे पूर्ण केल्याचे सुनिश्चित करा. टप्प्यांबद्दल अधिक जाणून घेण्यासाठी [MIPs कल्पना पृष्ठ](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) पहा. +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? -### 7. पुरस्कार कधी वितरित केले जातील? +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. -परफॉर्मन्स मेट्रिक्स पूर्ण झाल्यावर आणि त्या इंडेक्सर्सद्वारे स्थलांतरित सबग्राफ समर्थित झाल्यावर MIPs बक्षिसे प्रत्येक साखळीत वितरित केली जातील. त्या साखळी चक्राच्या मध्यभागी प्रति साखळी एकूण पुरस्कारांबद्दल माहिती पहा. +### 7. When will rewards be distributed? -### 8. स्कोअरिंग कसे कार्य करते? +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. -इंडेक्सर्स लीडरबोर्डवरील संपूर्ण कार्यक्रमात स्कोअरिंगवर आधारित पुरस्कारांसाठी स्पर्धा करतील. प्रोग्राम स्कोअरिंग यावर आधारित असेल: +### 8. How does scoring work? -**सबग्राफ कव्हरेज** +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: -- तुम्ही प्रति साखळी सबग्राफसाठी जास्तीत जास्त समर्थन देत आहात? +**Subgraph Coverage** -- MIPs दरम्यान, मोठ्या इंडेक्सर्सनी त्यांना समर्थन देत असलेल्या प्रति साखळीतील 50%+ सबग्राफ्स भाग घेणे अपेक्षित आहे. +- Are you providing maximal support for subgraphs per chain? -**सेवेची गुणवत्ता** +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. -- इंडेक्सर सेवेच्या चांगल्या गुणवत्तेसह (विलंबता, नवीन डेटा, अपटाइम इ.) साखळी सेवा देत आहे का? +**Quality Of Service** -- इंडेक्सर सपोर्ट करणारे dapp डेव्हलपर त्यांच्या गरजा पूर्ण करत आहेत का? +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? -इंडेक्सर कार्यक्षमतेने वाटप करत आहे, नेटवर्कच्या एकूण आरोग्यासाठी योगदान देत आहे? +- Is the Indexer supporting dapp developers being reactive to their needs? -**समुदाय समर्थन** +Is Indexer allocating efficiently, contributing to the overall health of the network? -- इंडेक्सर सहकारी इंडेक्सर्सना मल्टी-चेनसाठी सेट करण्यात मदत करण्यासाठी सहयोग करत आहे का? +**Community Support** -- इंडेक्सर संपूर्ण कार्यक्रमात कोर डेव्हसना फीडबॅक देत आहे किंवा फोरममध्ये इंडेक्सर्ससोबत माहिती शेअर करत आहे? +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? -### 9. डिसकॉर्ड भूमिका कशी नियुक्त केली जाईल? +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? -नियंत्रक पुढील काही दिवसात भूमिका नियुक्त करतील. +### 9. How will the Discord role be assigned? -### 10. टेस्टनेटवर प्रोग्राम सुरू करणे आणि नंतर मेननेटवर स्विच करणे योग्य आहे का? तुम्‍ही माझा नोड ओळखण्‍यात आणि बक्षिसे वितरीत करताना ते लक्षात घेण्‍यात सक्षम असाल का? +Moderators will assign the roles in the next few days. -होय, तुमच्याकडून तसे करणे अपेक्षित आहे. अनेक टप्पे Görli वर आहेत आणि एक mainnet वर आहे. +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? -### 11. कोणत्या टप्प्यावर सहभागींनी मेननेट उपयोजन जोडावे अशी तुमची अपेक्षा आहे? +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. -फेज 3 दरम्यान मेननेट इंडेक्सर असणे आवश्यक असेल. याबद्दल अधिक माहिती [लवकरच या कल्पना पृष्ठावर सामायिक केली जाईल.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +### 11. At what point do you expect participants to add a mainnet deployment? -### 12. बक्षिसे वेस्टिंगच्या अधीन असतील का? +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -कार्यक्रमाच्या शेवटी वाटप करण्यात येणारी टक्केवारी वेस्टिंगच्या अधीन असेल. याविषयी अधिक माहिती इंडेक्सर करारामध्ये सामायिक केली जाईल. +### 12. Will rewards be subject to vesting? -### 13. एकापेक्षा जास्त सदस्य असलेल्या संघांसाठी, सर्व कार्यसंघ सदस्यांना MIPs Discord भूमिका दिली जाईल का? +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. -होय +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -### 14. MIPs testnet मध्ये सहभागी होण्यासाठी ग्राफ क्युरेटर प्रोग्राममधील लॉक केलेले टोकन वापरणे शक्य आहे का? +Yes -होय +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -### 15. MIPs कार्यक्रमादरम्यान, अवैध POI वर विवाद करण्यासाठी कालावधी असेल का? +Yes -अजून ठरवायचे आहे. यावरील अधिक तपशिलांसाठी कृपया वेळोवेळी या पृष्ठावर परत या किंवा तुमची विनंती तातडीची असल्यास, कृपया info@thegraph.foundation वर ईमेल करा +### 15. During the MIPs program, will there be a period to dispute invalid POI? -### 17. आपण दोन वेस्टिंग कॉन्ट्रॅक्ट्स एकत्र करू शकतो का? +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation -नाही. पर्याय आहेत: तुम्ही एकाला दुसऱ्याला सोपवू शकता किंवा दोन स्वतंत्र अनुक्रमणिका चालवू शकता. +### 17. Can we combine two vesting contracts? -### 18. केवायसी प्रश्न? +No. The options are: you can delegate one to the other one or run two separate indexers. -कृपया info@thegraph.foundation वर ईमेल करा +### 18. KYC Questions? -### 19. मी Gnosis चेन इंडेक्स करण्यास तयार नाही, मी तयार झाल्यावर दुसर्‍या साखळीतून इंडेक्सिंग सुरू करू शकतो का? +Please email info@thegraph.foundation -होय +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -### 20. सर्व्हर चालवण्यासाठी शिफारस केलेले प्रदेश आहेत का? +Yes -आम्ही प्रदेशांवर शिफारसी देत नाही. स्थाने निवडताना तुम्हाला क्रिप्टोकरन्सीसाठी प्रमुख बाजारपेठ कोठे आहेत याचा विचार करावा लागेल. +### 20. Are there recommended regions to run the servers? -### 21. "हँडलर गॅस कॉस्ट" म्हणजे काय? +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. -हँडलर कार्यान्वित करण्याच्या खर्चाचे हे निर्धारक उपाय आहे. नाव काय सुचवू शकते याच्या उलट, ते ब्लॉकचेनवरील गॅसच्या किंमतीशी संबंधित नाही. +### 21. What is “handler gas cost”? + +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/mr/network/benefits.mdx b/website/pages/mr/network/benefits.mdx index e8ac3ad65af5..c3322af4ad95 100644 --- a/website/pages/mr/network/benefits.mdx +++ b/website/pages/mr/network/benefits.mdx @@ -14,7 +14,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg - 60-98% कमी मासिक खर्च - $0 पायाभूत सुविधा सेटअप खर्च - उत्कृष्ट अपटाइम -- 438 इंडेक्सर्समध्ये प्रवेश (आणि मोजणी) +- Access to hundreds of independent Indexers around the world - जागतिक समुदायाद्वारे 24/7 तांत्रिक समर्थन ## फायदे स्पष्ट केले @@ -79,9 +79,9 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg सबग्राफवर क्युरेटिंग सिग्नल हा पर्यायी एक-वेळचा, निव्वळ-शून्य खर्च आहे (उदा., $1k सिग्नल सबग्राफवर क्युरेट केला जाऊ शकतो आणि नंतर मागे घेतला जाऊ शकतो—प्रक्रियेत परतावा मिळविण्याच्या संभाव्यतेसह). -काही वापरकर्त्यांना त्यांचे सबग्राफ नवीन आवृत्तीवर अद्यतनित करण्याची आवश्यकता असू शकते. इथरियम गॅस शुल्कामुळे, लेखनाच्या वेळी अद्यतनाची किंमत ~$50 आहे. +Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. -लक्षात ठेवा की [आर्बिट्रम](/arbitrum/arbitrum-faq) वरील गॅस फी इथरियम मेननेटपेक्षा लक्षणीयरीत्या कमी आहेत. +Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. ## कोणतेही सेटअप खर्च नाही & ग्रेटर ऑपरेशनल कार्यक्षमता @@ -89,8 +89,8 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## विश्वसनीयता & लवचिकता -ग्राफचे विकेंद्रित नेटवर्क वापरकर्त्यांना `ग्राफ-नोड` स्वयं-होस्ट करताना अस्तित्वात नसलेल्या भौगोलिक रिडंडंसीमध्ये प्रवेश देते. 99.9%+ अपटाइममुळे क्वेरी विश्वसनीयपणे दिल्या जातात, जागतिक स्तरावर नेटवर्क सुरक्षित करणाऱ्या 168 इंडेक्सर्सने (आणि मोजणी) मिळवले. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. तळ ओळ: ग्राफ नेटवर्क कमी खर्चिक आहे, वापरण्यास सोपे आहे आणि स्थानिक पातळीवर `ग्राफ-नोड` चालवण्याच्या तुलनेत उत्कृष्ट परिणाम देते. -आजच The Graph Network वापरणे सुरू करा आणि [तुमचा सबग्राफ The Graph च्या विकेंद्रित नेटवर्कवर कसा श्रेणीसुधारित करायचा ते शिका](/cookbook/upgrading-a-subgraph). +Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/mr/network/curating.mdx b/website/pages/mr/network/curating.mdx index 96927bd86f58..d83502c32fdf 100644 --- a/website/pages/mr/network/curating.mdx +++ b/website/pages/mr/network/curating.mdx @@ -4,7 +4,7 @@ title: क्युरेटिंग आलेख विकेंद्रित अर्थव्यवस्थेसाठी क्युरेटर महत्त्वपूर्ण आहेत. ते त्यांचे वेब3 इकोसिस्टमचे ज्ञान वापरतात आणि ते ग्राफ नेटवर्कद्वारे अनुक्रमित केले जावेत अशा सबग्राफचे मूल्यांकन आणि सिग्नल करतात. एक्सप्लोररद्वारे, क्युरेटर सिग्नलिंग निर्णय घेण्यासाठी नेटवर्क डेटा पाहण्यास सक्षम आहेत. ग्राफ नेटवर्क क्युरेटर्सना बक्षीस देते जे चांगल्या गुणवत्तेच्या सबग्राफवर संकेत देतात आणि सबग्राफ तयार करतात त्या क्वेरी फीच्या वाटा. क्युरेटरला लवकर सिग्नल देण्यासाठी आर्थिकदृष्ट्या प्रोत्साहन दिले जाते. क्युरेटर्सचे हे संकेत इंडेक्सर्ससाठी महत्त्वाचे आहेत, जे नंतर या सिग्नल केलेल्या सबग्राफमधील डेटावर प्रक्रिया किंवा अनुक्रमित करू शकतात. -संकेतसंकेत करतांना, क्युरेटर्सला सबग्राफच्या एक निश्चित आवृत्तीवर संकेतसंकेत करायचं किंवा ऑटो-मायग्रेट वापरता येईल. ऑटो-मायग्रेट वापरता संकेतसंकेत केल्यास, क्युरेटर्सचं विभाजित केलेलं शेअर डेव्हलपरांनी प्रकाशित केलेल्या नवीनतम आवृत्तीवर होईल. जर आपण तुमचं संकेतसंकेत एक निश्चित आवृत्तीवर करण्याचं निर्णय घेतलं तर, तुमचं शेअर केवळ त्या निश्चित आवृत्तीवरचं ठरेल. +When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. लक्षात ठेवा की क्युरेशन धोकादायक आहे. तुम्‍हाला विश्‍वास असलेल्‍या सबग्राफवर तुम्‍ही क्युरेट केल्‍याची खात्री करा. सबग्राफ तयार करणे अनुज्ञेय आहे, त्यामुळे लोक सबग्राफ तयार करू शकतात आणि त्यांना हवे ते नाव देऊ शकतात. क्युरेशन जोखमींबद्दल अधिक मार्गदर्शनासाठी, [द ग्राफ अकादमीचे क्युरेशन मार्गदर्शक](https://thegraph.academy/curators/) पहा @@ -60,7 +60,7 @@ The Graph च्या बाबतीत, [बँकोरची बाँड ## जोखीम 1. द ग्राफमध्ये क्वेरी मार्केट मूळतः तरुण आहे आणि नवीन मार्केट डायनॅमिक्समुळे तुमचा %APY तुमच्या अपेक्षेपेक्षा कमी असण्याचा धोका आहे. -2. क्युरेशन फी - क्युरेटर्स जीआरटी सबग्राफवर संकेतसंकेत करतात, त्यांनी १% क्युरेशन कर व्यय करणारं असतं. ह्या फीने बर्न केली जाते आणि बाकीचं भाग बॉन्डिंग कर्वच्या रिझर्व सप्लायमध्ये जमा केलं जातं. +2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. 3. जेव्हा क्युरेटर GRT काढण्यासाठी त्यांचे शेअर्स बर्न करतात, तेव्हा उर्वरित शेअर्सचे GRT मूल्यांकन कमी केले जाईल. लक्षात ठेवा की काही प्रकरणांमध्ये, क्युरेटर त्यांचे शेअर्स **सर्व एकाच वेळी बर्न करण्याचा निर्णय घेऊ शकतात**. dApp डेव्हलपरने त्यांच्या सबग्राफची आवृत्ती तयार करणे/सुधारणा करणे आणि क्वेरी करणे थांबवल्यास किंवा सबग्राफ अयशस्वी झाल्यास ही परिस्थिती सामान्य असू शकते. परिणामी, उर्वरित क्युरेटर्स त्यांच्या सुरुवातीच्या GRT चा काही अंश काढू शकतील. कमी जोखीम प्रोफाइल असलेल्या नेटवर्क भूमिकेसाठी, [प्रतिनिधी](/network/delegating) पहा. 4. बगमुळे सबग्राफ अयशस्वी होऊ शकतो. अयशस्वी सबग्राफ क्वेरी शुल्क जमा करत नाही. परिणामी, विकसक बगचे निराकरण करेपर्यंत आणि नवीन आवृत्ती तैनात करेपर्यंत तुम्हाला प्रतीक्षा करावी लागेल. - तुम्ही सबग्राफच्या नवीनतम आवृत्तीचे सदस्यत्व घेतले असल्यास, तुमचे शेअर्स त्या नवीन आवृत्तीमध्ये स्वयंचलितपणे स्थलांतरित होतील. यावर 0.5% क्युरेशन कर लागेल. @@ -79,13 +79,13 @@ The Graph च्या बाबतीत, [बँकोरची बाँड - क्युरेटर्स नेटवर्कबद्दलची त्यांची समज वापरून प्रयत्न करू शकतात आणि भविष्यात वैयक्तिक सबग्राफ अधिक किंवा कमी क्वेरी व्हॉल्यूम कसा निर्माण करू शकतो याचा अंदाज लावू शकतात - क्युरेटर्सने ग्राफ एक्सप्लोररद्वारे उपलब्ध असलेले मेट्रिक्स देखील समजून घेतले पाहिजेत. मागील क्वेरी व्हॉल्यूम आणि सबग्राफ डेव्हलपर कोण आहे यासारखे मेट्रिक्स सबग्राफ सिग्नल करणे योग्य आहे की नाही हे निर्धारित करण्यात मदत करू शकतात. -### 3. सबग्राफ अपडेट करण्याची किंमत किती आहे? +### 3. What’s the cost of updating a subgraph? -नवीन सबग्राफ आवृत्तीकडे आपल्या क्युरेशन शेअर्सचं स्थानांतरण करण्याने १% चा क्युरेशन कर व्यय होतो. क्युरेटर्सला सबग्राफच्या नवीनतम आवृत्तीच्या सदस्यत्वाचं निवड करायचं किंवा त्यास वापरण्याचं निवड करायचं करू शकता. क्युरेशन शेअर्स ऑटो-मायग्रेट करण्याने ते नवीन आवृत्तीकडे स्थानांतरित होतात, आणि त्यामुळे क्युरेटर्सला अर्धा क्युरेशन कर व्यय, अर्थात ०. ५% चा क्युरेशन कर व्यय पण होतो, कारण सबग्राफ अपडेट करणे एक ऑन-चेन क्रिया आहे ज्यामुळे गॅसचं खर्च असतं. +Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. -### 4. मी माझा सबग्राफ किती वेळा अपडेट करू शकतो? +### 4. How often can I update my subgraph? -आपल्या सबग्राफला अत्यंत वापरता जाऊ नये, असं सुचवलं जातं. अधिक माहितीसाठी वरील प्रश्नावर परत जा. +It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. ### 5. मी माझे क्युरेशन शेअर्स विकू शकतो का? diff --git a/website/pages/mr/network/delegating.mdx b/website/pages/mr/network/delegating.mdx index 408f9ddd0a8a..74e6bf0d8e10 100644 --- a/website/pages/mr/network/delegating.mdx +++ b/website/pages/mr/network/delegating.mdx @@ -1,24 +1,24 @@ --- -title: सोपविणे +title: Delegating --- प्रतिनिधी हे नेटवर्क सहभागी असतात जे एक किंवा अधिक इंडेक्सर्सना GRT सोपवतात (म्हणजे "भाग"). प्रतिनिधी स्वतः ग्राफ नोड न चालवता नेटवर्क सुरक्षित करण्यात योगदान देतात. इंडेक्सरला सोपवून, प्रतिनिधी इंडेक्सरच्या क्वेरी फी आणि रिवॉर्ड्सचा एक भाग कमावतात. इंडेक्सर किती प्रश्नांवर प्रक्रिया करू शकतो हे इंडेक्सरच्या स्वतःच्या (आणि नियुक्त केलेल्या) स्टेकवर आणि प्रत्येक क्वेरीसाठी इंडेक्सर किती किंमत आकारतो यावर अवलंबून असते, त्यामुळे इंडेक्सरला जितका अधिक स्टेक वाटप केला जाईल, तितक्या अधिक संभाव्य क्वेरींवर ते प्रक्रिया करू शकतात. -## प्रतिनिधी मार्गदर्शक +## Delegator Guide हे मार्गदर्शक ग्राफ नेटवर्कमध्ये प्रभावी प्रतिनिधी कसे असावे हे स्पष्ट करेल. प्रतिनिधी प्रोटोकॉलची कमाई सर्व इंडेक्सर्सच्या बरोबरीने त्यांच्या नियुक्त भागिदारीच्या आधारावर शेअर करतात. एका प्रतिनिधीने अनेक घटकांवर आधारित इंडेक्सर्स निवडण्यासाठी त्यांचा सर्वोत्तम निर्णय वापरला पाहिजे. कृपया लक्षात घ्या की हे मार्गदर्शक मेटामास्क योग्यरित्या सेट करण्यासारख्या चरणांवर जाणार नाही, कारण ती माहिती इंटरनेटवर मोठ्या प्रमाणात उपलब्ध आहे. या मार्गदर्शकामध्ये तीन विभाग आहेत: -- ग्राफ नेटवर्कमध्ये टोकन सोपवण्याचे धोके -- प्रतिनिधी म्हणून अपेक्षित परताव्याची गणना कशी करावी -- ग्राफ नेटवर्क UI मध्‍ये प्रतिनिधी करण्‍याच्‍या पायर्‍या दर्शविणारा व्हिडिओ मार्गदर्शक +- The risks of delegating tokens in The Graph Network +- How to calculate expected returns as a Delegator +- A video guide showing the steps to delegate in the Graph Network UI -## प्रतिनिधीत्व जोखीम +## Delegation Risks -प्रोटोकॉलमध्ये प्रतिनिधी असण्याचे मुख्य धोके खाली सूचीबद्ध केले आहेत. +Listed below are the main risks of being a Delegator in the protocol. -### शिष्टमंडळ कर +### The delegation tax वाईट वर्तनासाठी प्रतिनिधींना कमी केले जाऊ शकत नाही, परंतु नेटवर्कच्या अखंडतेला हानी पोहोचवणाऱ्या खराब निर्णयक्षमतेला प्रोत्साहन देण्यासाठी प्रतिनिधींवर कर लावला जातो. @@ -26,7 +26,7 @@ title: सोपविणे याचा अर्थ असा की सुरक्षित राहण्यासाठी, एखाद्या प्रतिनिधीने इंडेक्सरकडे सोपवून त्यांचा परतावा काय असेल याची गणना केली पाहिजे. उदाहरणार्थ, प्रतिनिधी त्यांच्या प्रतिनिधींवरील 0.5% कर परत मिळण्यापूर्वी किती दिवस लागतील याची गणना करू शकतात. -### शिष्टमंडळ अनबॉन्डिंग कालावधी +### The delegation unbonding period जेव्हा जेव्हा एखाद्या प्रतिनिधीला प्रतिनिधीत्व रद्द करायचे असते तेव्हा त्यांचे टोकन 28-दिवसांच्या अनबॉन्डिंग कालावधीच्या अधीन असतात. याचा अर्थ ते त्यांचे टोकन हस्तांतरित करू शकत नाहीत किंवा 28 दिवसांसाठी कोणतेही पुरस्कार मिळवू शकत नाहीत. @@ -37,7 +37,7 @@ title: सोपविणे अनबॉन्डिंग कालावधी._ -### प्रतिनिधींसाठी योग्य रिवॉर्ड पेआउटसह विश्वासार्ह इंडेक्सर निवडणे +### Choosing a trustworthy Indexer with a fair reward payout for Delegators समजून घेणे हा एक महत्त्वाचा भाग आहे. प्रथम तीन अत्यंत महत्त्वाच्या मूल्यांवर चर्चा करूया, जे डेलिगेशन पॅरामीटर्स आहेत. @@ -52,30 +52,30 @@ title: सोपविणे जसे तुम्ही बघू शकता, योग्य इंडेक्सर निवडण्यासाठी बरेच विचार करणे आवश्यक आहे. म्हणूनच आम्ही शिफारस करतो की, प्रतिनिधींना सातत्याने बक्षीस देण्यासाठी इंडेक्सर्स सर्वोत्तम सामाजिक प्रतिष्ठा आणि तांत्रिक प्रतिष्ठा असलेले कोण आहेत हे निर्धारित करण्यासाठी तुम्ही ग्राफ डिस्कॉर्ड एक्सप्लोर करा. अनेक इंडेक्सर्स Discord मध्ये खूप सक्रिय आहेत आणि तुमच्या प्रश्नांची उत्तरे देण्यात आनंद होईल. त्यापैकी बरेच जण टेस्टनेटमध्ये अनेक महिन्यांपासून इंडेक्स करत आहेत आणि डेलिगेटर्सना चांगला परतावा मिळविण्यात मदत करण्यासाठी त्यांचे सर्वोत्तम प्रयत्न करत आहेत, कारण यामुळे नेटवर्कचे आरोग्य आणि यश सुधारते. -### प्रतिनिधींना अपेक्षित परतावा मोजत आहे +### Calculating Delegators expected return -डेलिगेटरला परतावा ठरवताना अनेक बाबींचा विचार करावा लागतो. यात समाविष्ट: +A Delegator has to consider a lot of factors when determining the return. These include: - तांत्रिक प्रतिनिधी त्यांच्याकडे उपलब्ध असलेले प्रतिनिधी टोकन वापरण्याची इंडेक्सरची क्षमता देखील पाहू शकतो. जर इंडेक्सर उपलब्ध असलेल्या सर्व टोकन्सचे वाटप करत नसेल, तर ते स्वतःसाठी किंवा त्यांच्या प्रतिनिधींसाठी जास्तीत जास्त नफा मिळवत नाहीत. - सध्या नेटवर्कमध्ये इंडेक्सर 1 ते 28 दिवसांच्या दरम्यान कधीही वाटप बंद करणे आणि बक्षिसे गोळा करणे निवडू शकतो. त्यामुळे हे शक्य आहे की इंडेक्सरकडे भरपूर बक्षिसे आहेत जी त्यांनी अद्याप गोळा केली नाहीत आणि त्यामुळे त्यांची एकूण बक्षिसे कमी आहेत. हे सुरुवातीच्या काळात विचारात घेतले पाहिजे. -### क्वेरी फी कट आणि इंडेक्सिंग फी कपात लक्षात घेऊन +### Considering the query fee cut and indexing fee cut वरील विभागांमध्ये वर्णन केल्याप्रमाणे, तुम्ही एक इंडेक्सर निवडला पाहिजे जो त्यांच्या क्वेरी फी कट आणि इंडेक्सिंग फी कट सेट करण्याबद्दल पारदर्शक आणि प्रामाणिक असेल. त्यांच्याकडे किती वेळ बफर आहे हे पाहण्यासाठी प्रतिनिधीने पॅरामीटर्स कूलडाउन टाइम देखील पहावे. ते पूर्ण झाल्यानंतर, प्रतिनिधींना किती बक्षिसे मिळत आहेत याची गणना करणे अगदी सोपे आहे. सूत्र आहे: -![प्रतिनिधीत्व प्रतिमा 3](/img/Delegation-Reward-Formula.png) +![Delegation Image 3](/img/Delegation-Reward-Formula.png) -### इंडेक्सरच्या प्रतिनिधी मंडळाचा विचार करून +### Considering the Indexer's delegation pool प्रतिनिधीने विचारात घेण्याची आणखी एक गोष्ट म्हणजे त्यांच्या मालकीचे प्रतिनिधी पूल किती आहे. डेलिगेटरने पूलमध्ये जमा केलेल्या रकमेद्वारे निर्धारित पूलच्या साध्या पुनर्संतुलनासह, सर्व प्रतिनिधी पुरस्कार समान रीतीने सामायिक केले जातात. हे डेलिगेटरला पूलचा वाटा देते: -![सूत्र सामायिक करा](/img/Share-Forumla.png) +![Share formula](/img/Share-Forumla.png) या सूत्राचा वापर करून, आम्ही पाहू शकतो की इंडेक्सर जो प्रतिनिधींना केवळ 20% ऑफर करत आहे, प्रत्यक्षात प्रतिनिधींना 90% देणाऱ्या इंडेक्सरपेक्षा डेलिगेटर्सला अधिक चांगले बक्षीस देणे शक्य आहे. -त्यामुळे प्रतिनिधींना 20% ऑफर करणारा इंडेक्सर अधिक चांगला परतावा देत आहे हे निर्धारित करण्यासाठी एक प्रतिनिधी गणित करू शकतो. +A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. -### शिष्टमंडळाची क्षमता लक्षात घेता +### Considering the delegation capacity विचारात घेण्यासारखी दुसरी गोष्ट म्हणजे प्रतिनिधींची क्षमता. सध्या, डेलिगेशन रेशो 16 वर सेट केले आहे. याचा अर्थ असा की जर एखाद्या इंडेक्सरने 1,000,000 GRT स्टेक केले असेल, तर त्यांची डेलिगेशन क्षमता 16,000,000 GRT डेलिगेटेड टोकन्स आहे जी ते प्रोटोकॉलमध्ये वापरू शकतात. या रकमेवरील कोणतेही डेलिगेट केलेले टोकन सर्व डेलिगेटर रिवॉर्ड्स कमी करतील. @@ -83,15 +83,15 @@ title: सोपविणे त्यामुळे प्रतिनिधीने नेहमी इंडेक्सरच्या डेलिगेशन क्षमतेचा विचार केला पाहिजे आणि निर्णय घेताना त्याचा समावेश केला पाहिजे. -## प्रतिनिधी FAQ आणि बग +## Delegator FAQs and Bugs -### मेटामास्क "प्रलंबित व्यवहार" बग +### MetaMask "Pending Transaction" Bug **जेव्हा मी मेटामास्कमध्ये माझा व्यवहार सोपवण्याचा प्रयत्न करतो तेव्हा अपेक्षेपेक्षा जास्त काळ "प्रलंबित" किंवा "रांगेत" असे दिसते. मी काय करू?** काही वेळा, मेटामास्क द्वारे इंडेक्सर्सना सोपवण्याचे प्रयत्न अयशस्वी होऊ शकतात आणि परिणामी दीर्घकाळ "प्रलंबित" किंवा "रांगेत" व्यवहार प्रयत्न केले जातात. उदाहरणार्थ, वापरकर्ता सध्याच्या किमतींच्या तुलनेत अपुरे गॅस शुल्क सोपवण्याचा प्रयत्न करू शकतो, परिणामी व्यवहाराचा प्रयत्न त्यांच्या MetaMask वॉलेटमध्ये 15+ मिनिटांसाठी "प्रलंबित" म्हणून प्रदर्शित होईल. जेव्हा हे घडते तेव्हा, वापरकर्त्याद्वारे त्यानंतरच्या व्यवहारांचा प्रयत्न केला जाऊ शकतो, परंतु प्रारंभिक व्यवहार खनन होईपर्यंत त्यावर प्रक्रिया केली जाणार नाही, कारण पत्त्यावरील व्यवहारांची क्रमाने प्रक्रिया करणे आवश्यक आहे. अशा परिस्थितीत, हे व्यवहार MetaMask मध्ये रद्द केले जाऊ शकतात, परंतु त्यानंतरचे प्रयत्न यशस्वी होतील याची कोणतीही हमी न देता व्यवहाराच्या प्रयत्नांवर गॅस शुल्क जमा होईल. या बगचे सोपे निराकरण म्हणजे ब्राउझर रीस्टार्ट करणे (उदा. अॅड्रेस बारमध्ये "अ‍ॅबोर्ट:रीस्टार्ट" वापरणे), जे वॉलेटमधून गॅस वजा न करता मागील सर्व प्रयत्न रद्द करेल. अनेक वापरकर्ते ज्यांना ही समस्या आली आहे आणि त्यांनी त्यांचे ब्राउझर रीस्टार्ट केल्यानंतर आणि नियुक्त करण्याचा प्रयत्न केल्यानंतर यशस्वी व्यवहार नोंदवले आहेत. -## नेटवर्क UI साठी व्हिडिओ मार्गदर्शक +## Video guide for the network UI हे मार्गदर्शक या दस्तऐवजाचे संपूर्ण पुनरावलोकन प्रदान करते आणि UI सह संवाद साधताना या दस्तऐवजातील प्रत्येक गोष्टीचा विचार कसा करावा. diff --git a/website/pages/mr/network/developing.mdx b/website/pages/mr/network/developing.mdx index 5108ddac5370..6ada412cd3ab 100644 --- a/website/pages/mr/network/developing.mdx +++ b/website/pages/mr/network/developing.mdx @@ -34,11 +34,11 @@ title: विकसनशील डेव्हलपर गेटवेवर इंडेक्सर प्राधान्य व्यक्त करण्यास देखील सक्षम आहेत, उदाहरणार्थ ज्यांचे क्वेरी प्रतिसाद जलद आहे किंवा ज्यांचा डेटा सर्वात अद्ययावत आहे अशा इंडेक्सर्सना प्राधान्य देणे. ही नियंत्रणे सबग्राफ स्टुडिओमध्ये सेट केली जातात. -### सबग्राफ अपडेट करत आहे +### Updating Subgraphs काही वेळानंतर सबग्राफ डेव्हलपरला त्यांचा सबग्राफ अपडेट करायचा असेल, कदाचित बग फिक्स करायचा असेल किंवा नवीन कार्यक्षमता जोडायची असेल. सबग्राफ डेव्हलपर रेट-मर्यादित विकास आणि चाचणीसाठी त्यांच्या सबग्राफची नवीन आवृत्ती सबग्राफ स्टुडिओमध्ये तैनात करू शकतो. -सबग्राफ डेव्हलपर सज्ज झाल्यानंतर, ते त्यांच्या सबग्राफला नवीन आवृत्तीकडे दिलेल्या प्रक्रियेने एक लेखपालन चालू करू शकतो. सबग्राफ अपडेट करण्याने किंवा संकेतलेल्या वापरकर्त्यांनी "ऑटो-मायग्रेट" निवडल्यास, त्याच्या संकेतांचा नवीन आवृत्तीकडे स्थानांतरित करण्यात आता होईल, ज्यामुळे ती मायग्रेशन करून घेणारा कर व्यय वाढतो. ह्या संकेत मायग्रेशनची नोंदणी इंडेक्सर्सला नवीन आवृत्तीचे इंडेक्स करण्याचे प्रेरित करावे लागते, असे केल्यास ते क्वेरीसाठी लवकरच उपलब्ध होईल. +Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. ### उपग्राफ नापसंत करत आहे @@ -50,4 +50,4 @@ title: विकसनशील ### विकसक आणि नेटवर्क इकॉनॉमिक्स -सबग्राफ डेव्हलपर्स नेटवर्कमध्ये मुख्य आर्थिक भागीदार आहेत, ज्यामुळे त्यांनी इंडेक्सिंगला प्रोत्साहन देण्यासाठी आणि महत्वाच्या क्वेरींग सबग्राफांसाठी, ज्याचं नेटवर्कचं प्राथमिक मूल्यांतर होतं, जी ग्राफ्ट विनिमय करतंय. सबग्राफ डेव्हलपर्स सबग्राफ अपडेट केल्यास ते GRT बर्न करतात. +Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. diff --git a/website/pages/mr/network/explorer.mdx b/website/pages/mr/network/explorer.mdx index 37f0479f48f4..b00da83c5ebf 100644 --- a/website/pages/mr/network/explorer.mdx +++ b/website/pages/mr/network/explorer.mdx @@ -6,45 +6,45 @@ title: आलेख एक्सप्लोरर -## सबग्राफ +## Subgraphs प्रथम गोष्टी, जर तुम्ही सबग्राफ स्टुडिओमध्ये तुमचा सबग्राफ डिप्लॉय करणे आणि प्रकाशित करणे पूर्ण केले असेल तर, नेव्हिगेशन बारच्या शीर्षस्थानी असलेले सबग्राफ टॅब हे विकेंद्रित नेटवर्कवर तुमचे स्वतःचे तयार झालेले सबग्राफ (आणि इतरांचे सबग्राफ) पाहण्याचे ठिकाण आहे. येथे, आपण तयार केलेली तारीख, सिग्नल रक्कम किंवा नावावर आधारित आपण शोधत असलेला अचूक सबग्राफ शोधण्यात सक्षम असाल. -![एक्सप्लोरर इमेज १](/img/Subgraphs-Explorer-Landing.png) +![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) जेव्हा तुम्ही सबग्राफवर क्लिक करता, तेव्हा तुम्ही खेळाच्या मैदानात प्रश्नांची चाचणी घेण्यास सक्षम व्हाल आणि माहितीपूर्ण निर्णय घेण्यासाठी नेटवर्क तपशीलांचा फायदा घेण्यास सक्षम असाल. इंडेक्सर्सना त्याचे महत्त्व आणि गुणवत्तेची जाणीव करून देण्यासाठी तुम्ही तुमच्या स्वतःच्या सबग्राफवर किंवा इतरांच्या सबग्राफवर GRT सिग्नल करण्यास सक्षम असाल. हे गंभीर आहे कारण सबग्राफवर सिग्नल केल्याने ते अनुक्रमित होण्यासाठी प्रोत्साहन मिळते, याचा अर्थ असा आहे की शेवटी क्वेरी सर्व्ह करण्यासाठी ते नेटवर्कवर येईल. -![एक्सप्लोरर इमेज 2](/img/Subgraph-Details.png) +![Explorer Image 2](/img/Subgraph-Details.png) -प्रत्येक सबग्राफच्या समर्पित पृष्ठावर, अनेक तपशील समोर आले आहेत. यात समाविष्ट: +On each subgraph’s dedicated page, several details are surfaced. These include: - Signal/Un-signal on subgraphs - चार्ट, वर्तमान उपयोजन आयडी आणि इतर मेटाडेटा यासारखे अधिक तपशील पहा -- सबग्राफच्या मागील पुनरावृत्ती एक्सप्लोर करण्यासाठी आवृत्त्या स्विच करा +- Switch versions to explore past iterations of the subgraph - GraphQL द्वारे सबग्राफ क्वेरी करा -- खेळाच्या मैदानात चाचणी उपग्राफ -- एका विशिष्ट सबग्राफवर अनुक्रमित करणारे इंडेक्सर्स पहा -- सबग्राफ आकडेवारी (वाटप, क्युरेटर इ -- सबग्राफ प्रकाशित करणारी संस्था पहा +- Test subgraphs in the playground +- View the Indexers that are indexing on a certain subgraph +- Subgraph stats (allocations, Curators, etc) +- View the entity who published the subgraph -![एक्सप्लोरर इमेज 3](/img/Explorer-Signal-Unsignal.png) +![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) -## सहभागी +## Participants या टॅबमध्‍ये, इंडेक्सर्स, डेलिगेटर्स आणि क्युरेटर्स यांसारख्या नेटवर्क अ‍ॅक्टिव्हिटीमध्ये भाग घेणाऱ्या सर्व लोकांचे विहंगम दृश्य तुम्हाला मिळेल. खाली, आम्ही प्रत्येक टॅबचा तुमच्यासाठी काय अर्थ होतो याचे सखोल पुनरावलोकन करू. -### 1. इंडेक्सर्स +### 1. Indexers -![एक्सप्लोरर इमेज 4](/img/Indexer-Pane.png) +![Explorer Image 4](/img/Indexer-Pane.png) चला इंडेक्सर्ससह प्रारंभ करूया. इंडेक्सर्स हे प्रोटोकॉलचा कणा आहेत, जे सबग्राफवर भाग घेतात, त्यांना अनुक्रमित करतात आणि सबग्राफ वापरणार्‍या कोणालाही प्रश्न देतात. इंडेक्सर्स टेबलमध्ये, तुम्ही इंडेक्सर्सचे डेलिगेशन पॅरामीटर्स, त्यांची हिस्सेदारी, त्यांनी प्रत्येक सबग्राफमध्ये किती भाग घेतला आहे आणि त्यांनी क्वेरी फी आणि इंडेक्सिंग रिवॉर्ड्समधून किती कमाई केली आहे हे पाहण्यास सक्षम असाल. खाली खोल गोतावळा: -- क्‍वेरी फी कट - डेलिगेटर्ससोबत स्‍प्लिट करताना इंडेक्सर ठेवत असलेल्‍या क्‍वेरी फीचा % +- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators - प्रभावी रिवॉर्ड कट - इंडेक्सिंग रिवॉर्ड कट डेलिगेशन पूलवर लागू केला जातो. जर ते नकारात्मक असेल, तर याचा अर्थ असा आहे की इंडेक्सर त्यांच्या पुरस्कारांचा काही भाग देत आहे. जर ते सकारात्मक असेल, तर याचा अर्थ असा की इंडेक्सर त्यांचे काही बक्षिसे ठेवत आहे - Cooldown Remaining - इंडेक्सर वरील डेलिगेशन पॅरामीटर्स बदलू शकत नाही तोपर्यंत उरलेला वेळ. इंडेक्सर्स जेव्हा त्यांचे डेलिगेशन पॅरामीटर्स अपडेट करतात तेव्हा कूलडाउन पीरियड्स सेट केले जातात -- मालकीचा - हा इंडेक्सरचा जमा केलेला हिस्सा आहे, जो दुर्भावनापूर्ण किंवा चुकीच्या वर्तनासाठी कमी केला जाऊ शकतो -- प्रतिनिधी - प्रतिनिधींकडून भागभांडवल जे इंडेक्सरद्वारे वाटप केले जाऊ शकते, परंतु कमी केले जाऊ शकत नाही -- वाटप केलेले - इंडेक्सर्स ते अनुक्रमित करत असलेल्या सबग्राफसाठी सक्रियपणे वाटप करत आहेत +- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior +- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed +- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing - उपलब्ध डेलिगेशन कॅपॅसिटी - इंडेक्सर्सना जास्त डेलिगेशन होण्याआधीही डेलिगेटेड स्टेकची रक्कम मिळू शकते - कमाल डेलिगेशन क्षमता - इंडेक्सर उत्पादकपणे स्वीकारू शकणारी जास्तीत जास्त डेलिगेटेड स्टेक. वाटप किंवा बक्षिसे गणनेसाठी जास्तीचा वाटप केला जाऊ शकत नाही. - क्वेरी फी - हे एकूण शुल्क आहे जे शेवटच्या वापरकर्त्यांनी इंडेक्सरकडून नेहमीच्या क्वेरींसाठी दिले आहे @@ -54,109 +54,109 @@ title: आलेख एक्सप्लोरर इंडेक्सर कसे व्हावे याबद्दल अधिक जाणून घेण्यासाठी, तुम्ही [अधिकृत दस्तऐवज](/network/indexing) किंवा [द ग्राफ अकादमी इंडेक्सर मार्गदर्शक](https://thegraph.academy/delegators/ पाहू शकता choosing-indexers/) -![अनुक्रमणिका तपशील उपखंड](/img/Indexing-Details-Pane.png) +![Indexing details pane](/img/Indexing-Details-Pane.png) -### 2. क्युरेटर +### 2. Curators कोणते सबग्राफ उच्च दर्जाचे आहेत हे ओळखण्यासाठी क्युरेटर सबग्राफचे विश्लेषण करतात. एकदा क्युरेटरला संभाव्य आकर्षक सबग्राफ सापडला की, ते त्याच्या बाँडिंग वक्र वर सिग्नल करून ते क्युरेट करू शकतात. असे केल्याने, क्युरेटर्स इंडेक्सर्सना कळवतात की कोणते सबग्राफ उच्च दर्जाचे आहेत आणि ते अनुक्रमित केले पाहिजेत. क्युरेटर समुदाय सदस्य, डेटा ग्राहक किंवा अगदी सबग्राफ डेव्हलपर असू शकतात जे GRT टोकन बाँडिंग वक्रमध्ये जमा करून त्यांच्या स्वतःच्या सबग्राफवर सिग्नल करतात. GRT जमा करून, क्युरेटर्स सबग्राफचे क्युरेशन शेअर्स मिंट करतात. परिणामी, क्युरेटर्स क्वेरी फीचा एक भाग मिळविण्यास पात्र आहेत ज्यावर त्यांनी संकेत दिलेला सबग्राफ व्युत्पन्न करतो. बाँडिंग वक्र क्युरेटर्सना उच्च गुणवत्तेचा डेटा स्रोत तयार करण्यासाठी प्रोत्साहन देते. या विभागातील क्युरेटर टेबल तुम्हाला हे पाहण्याची परवानगी देईल: -- क्युरेटरने क्युरेटिंग सुरू केल्याची तारीख -- जीआरटी जमा करण्यात आली -- क्युरेटरच्या मालकीच्या शेअर्सची संख्या +- The date the Curator started curating +- The number of GRT that was deposited +- The number of shares a Curator owns -![एक्सप्लोरर इमेज 6](/img/Curation-Overview.png) +![Explorer Image 6](/img/Curation-Overview.png) तुम्हाला क्युरेटरच्या भूमिकेबद्दल अधिक जाणून घ्यायचे असल्यास, तुम्ही [द ग्राफ अकादमी](https://thegraph.academy/curators/) किंवा [अधिकृत दस्तऐवज](/network/curating) च्या खालील लिंक्सला भेट देऊन तसे करू शकता -### 3. प्रतिनिधी +### 3. Delegators द ग्राफ नेटवर्कची सुरक्षा आणि विकेंद्रीकरण राखण्यात प्रतिनिधी महत्त्वाची भूमिका बजावतात. ते एक किंवा एकाधिक इंडेक्सर्सना GRT टोकन्स सोपवून (म्हणजे "स्टेकिंग") नेटवर्कमध्ये सहभागी होतात. प्रतिनिधींशिवाय, इंडेक्सर्सना लक्षणीय बक्षिसे आणि शुल्क मिळण्याची शक्यता कमी असते. म्हणून, इंडेक्सर्स डेलिगेटर्सना इंडेक्सिंग रिवॉर्ड्स आणि त्यांनी कमावलेल्या क्वेरी फीचा एक भाग ऑफर करून त्यांना आकर्षित करण्याचा प्रयत्न करतात. -प्रतिनिधी, यामधून, मागील कार्यप्रदर्शन, अनुक्रमणिका बक्षीस दर आणि क्वेरी शुल्क कपात यांसारख्या अनेक भिन्न व्हेरिएबल्सवर आधारित अनुक्रमणिका निवडा. समाजातील प्रतिष्ठा देखील यामध्ये एक घटक भूमिका बजावू शकते! [The Graph's Discord](https://discord.gg/graphprotocol) किंवा [ द्वारे निवडलेल्या इंडेक्सर्सशी कनेक्ट करण्याची शिफारस केली जाते आलेख मंच](https://forum.thegraph.com/)! +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! -![एक्सप्लोरर इमेज 7](/img/Delegation-Overview.png) +![Explorer Image 7](/img/Delegation-Overview.png) -प्रतिनिधी सारणी तुम्हाला समुदायातील सक्रिय प्रतिनिधी तसेच मेट्रिक्स पाहण्याची अनुमती देईल जसे की: +The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: -- इंडेक्सर्सची संख्या ज्याकडे प्रतिनिधी नियुक्त करत आहे -- प्रतिनिधीचे मूळ प्रतिनिधी मंडळ -- त्यांनी जमा केलेले बक्षिसे पण प्रोटोकॉलमधून मागे घेतलेली नाहीत -- मिळालेली बक्षिसे त्यांनी प्रोटोकॉलमधून काढून घेतली -- त्यांच्याकडे सध्या प्रोटोकॉलमध्ये असलेली GRT ची एकूण रक्कम -- ज्या दिवशी त्यांनी शेवटचे प्रतिनिधीत्व केले होते +- The number of Indexers a Delegator is delegating towards +- A Delegator’s original delegation +- The rewards they have accumulated but have not withdrawn from the protocol +- The realized rewards they withdrew from the protocol +- Total amount of GRT they have currently in the protocol +- The date they last delegated at तुम्हाला प्रतिनिधी कसे व्हायचे याबद्दल अधिक जाणून घ्यायचे असल्यास, पुढे पाहू नका! तुम्हाला फक्त वर जावे लागेल[official documentation](/network/delegating) किंवा[आलेख अकादमी](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). -## नेटवर्क्स +## Network नेटवर्क विभागात, तुम्हाला जागतिक KPIs तसेच प्रत्येक युगाच्या आधारावर स्विच करण्याची आणि नेटवर्क मेट्रिक्सचे अधिक तपशीलवार विश्लेषण करण्याची क्षमता दिसेल. हे तपशील तुम्हाला कालांतराने नेटवर्क कसे कार्य करत आहे याची जाणीव देईल. -### क्रियाकलाप +### Activity क्रियाकलाप विभागात सर्व वर्तमान नेटवर्क मेट्रिक्स तसेच कालांतराने काही संचयी मेट्रिक्स आहेत. येथे तुम्ही यासारख्या गोष्टी पाहू शकता: -- वर्तमान एकूण नेटवर्क स्टेक -- इंडेक्सर्स आणि त्यांचे प्रतिनिधी यांच्यात भागभांडवल विभागले -- नेटवर्क सुरू झाल्यापासून एकूण पुरवठा, मिंट केलेला आणि बर्न केलेला GRT -- प्रोटोकॉलच्या स्थापनेपासून एकूण अनुक्रमणिका पुरस्कार -- प्रोटोकॉल पॅरामीटर्स जसे की क्युरेशन रिवॉर्ड, चलनवाढीचा दर आणि बरेच काही -- वर्तमान युग पुरस्कार आणि शुल्क +- The current total network stake +- The stake split between the Indexers and their Delegators +- Total supply, minted, and burned GRT since the network inception +- Total Indexing rewards since the inception of the protocol +- Protocol parameters such as curation reward, inflation rate, and more +- Current epoch rewards and fees -उल्लेख करण्यासारखे काही प्रमुख तपशील: +A few key details that are worth mentioning: - **क्वेरी फी ग्राहकांद्वारे व्युत्पन्न केलेल्या फीचे प्रतिनिधित्व करतात**, आणि उपग्राफसाठी त्यांचे वाटप बंद झाल्यानंतर किमान 7 युगांच्या कालावधीनंतर (खाली पहा) इंडेक्सर्सद्वारे त्यावर दावा केला जाऊ शकतो (किंवा नाही). आणि त्यांनी दिलेला डेटा ग्राहकांनी प्रमाणित केला आहे. - **इंडेक्सिंग रिवॉर्ड्स युगादरम्यान नेटवर्क जारी करण्यापासून निर्देशांककर्त्यांनी दावा केलेल्या पुरस्कारांच्या रकमेचे प्रतिनिधित्व करतात.** जरी प्रोटोकॉल जारी करणे निश्चित केले असले तरी, इंडेक्सर्सने त्यांचे वाटप बंद केल्यावरच बक्षिसे दिली जातात ते अनुक्रमित करत असलेल्या उपग्राफकडे. अशा प्रकारे प्रति-युगातील पुरस्कारांची संख्या बदलते (म्हणजे काही युगांदरम्यान, इंडेक्सर्सने अनेक दिवसांपासून खुले असलेले वाटप एकत्रितपणे बंद केले असावे). -![एक्सप्लोरर इमेज 8](/img/Network-Stats.png) +![Explorer Image 8](/img/Network-Stats.png) -### युग +### Epochs -Epochs विभागात, तुम्ही प्रत्येक युगाच्या आधारावर विश्लेषण करू शकता, मेट्रिक्स जसे की: +In the Epochs section, you can analyze on a per-epoch basis, metrics such as: -- युग प्रारंभ किंवा समाप्ती ब्लॉक -- व्युत्पन्न केलेली क्वेरी फी आणि एका विशिष्ट युगादरम्यान संकलित केलेली बक्षिसे अनुक्रमणिका -- Epoch स्थिती, जी क्वेरी शुल्क संकलन आणि वितरणाचा संदर्भ देते आणि भिन्न स्थिती असू शकतात: +- Epoch start or end block +- Query fees generated and indexing rewards collected during a specific epoch +- Epoch status, which refers to the query fee collection and distribution and can have different states: - सक्रिय युग असा आहे ज्यामध्ये इंडेक्सर्स सध्या स्टेक वाटप करत आहेत आणि क्वेरी फी गोळा करत आहेत - सेटलिंग युग हे असे आहेत ज्यामध्ये राज्य वाहिन्या सेटल होत आहेत. याचा अर्थ असा की जर ग्राहकांनी त्यांच्या विरुद्ध विवाद उघडले तर निर्देशांक कमी केले जातील. - वितरण युग हे असे युग आहेत ज्यामध्ये युगांसाठी राज्य चॅनेल सेटल केले जात आहेत आणि इंडेक्सर्स त्यांच्या क्वेरी फी सवलतीचा दावा करू शकतात. - अंतिम युग हे असे युग आहेत ज्यात अनुक्रमणिकांद्वारे दावा करण्यासाठी कोणतीही क्वेरी शुल्क सवलत शिल्लक नाही, अशा प्रकारे अंतिम रूप दिले जाते. -![एक्सप्लोरर इमेज 9](/img/Epoch-Stats.png) +![Explorer Image 9](/img/Epoch-Stats.png) -## तुमचा वापरकर्ता प्रोफाइल +## Your User Profile आता आम्ही नेटवर्क आकडेवारीबद्दल बोललो आहोत, चला तुमच्या वैयक्तिक प्रोफाइलकडे जाऊया. तुमची वैयक्तिक प्रोफाइल ही तुमची नेटवर्क गतिविधी पाहण्याचे ठिकाण आहे, तुम्ही नेटवर्कवर कसे भाग घेत आहात हे महत्त्वाचे नाही. तुमचे क्रिप्टो वॉलेट तुमचे वापरकर्ता प्रोफाइल म्हणून काम करेल आणि वापरकर्ता डॅशबोर्डसह तुम्ही हे पाहू शकाल: -### प्रोफाइल विहंगावलोकन +### Profile Overview येथे तुम्ही केलेल्या कोणत्याही वर्तमान क्रिया तुम्ही पाहू शकता. तुम्ही तुमची प्रोफाईल माहिती, वर्णन आणि वेबसाइट (तुम्ही जोडल्यास) येथे देखील शोधू शकता. -![एक्सप्लोरर इमेज 10](/img/Profile-Overview.png) +![Explorer Image 10](/img/Profile-Overview.png) -### सबग्राफ टॅब +### Subgraphs Tab तुम्ही सबग्राफ टॅबवर क्लिक केल्यास, तुम्हाला तुमचे प्रकाशित सबग्राफ दिसतील. यामध्ये चाचणीच्या उद्देशांसाठी CLI सोबत तैनात केलेले कोणतेही सबग्राफ समाविष्ट केले जाणार नाहीत - सबग्राफ केवळ विकेंद्रित नेटवर्कवर प्रकाशित केल्यावरच दिसून येतील. -![एक्सप्लोरर इमेज 11](/img/Subgraphs-Overview.png) +![Explorer Image 11](/img/Subgraphs-Overview.png) -### अनुक्रमणिका टॅब +### Indexing Tab तुम्ही इंडेक्सिंग टॅबवर क्लिक केल्यास, तुम्हाला सबग्राफसाठी सर्व सक्रिय आणि ऐतिहासिक वाटप असलेली एक टेबल मिळेल, तसेच तुम्ही इंडेक्सर म्हणून तुमच्या मागील कामगिरीचे विश्लेषण करू शकता आणि पाहू शकता. या विभागात तुमच्या निव्वळ इंडेक्सर रिवॉर्ड्स आणि नेट क्वेरी फीबद्दल तपशील देखील समाविष्ट असतील. तुम्हाला खालील मेट्रिक्स दिसतील: -- डेलिगेटेड स्टेक - डेलिगेटर्सचा हिस्सा जो तुमच्याद्वारे वाटप केला जाऊ शकतो परंतु कमी केला जाऊ शकत नाही +- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed - एकूण क्वेरी शुल्क - वापरकर्त्यांनी वेळोवेळी तुमच्याद्वारे दिलेल्या क्वेरींसाठी भरलेले एकूण शुल्क -- इंडेक्सर रिवॉर्ड्स - GRT मध्ये तुम्हाला मिळालेल्या इंडेक्सर रिवॉर्ड्सची एकूण रक्कम +- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT - फी कट - तुम्ही डेलिगेटर्ससह विभक्त झाल्यावर तुम्ही ठेवू शकणार्‍या क्वेरी फी सवलतींचा % -- इंडेक्सर रिवॉर्ड्स - Grt मध्ये तुम्हाला मिळालेल्या इंडेक्सर रिवॉर्ड्सची एकूण रक्कम +- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators - मालकीचा - तुमचा जमा केलेला हिस्सा, जो दुर्भावनापूर्ण किंवा चुकीच्या वर्तनासाठी कमी केला जाऊ शकतो -![एक्सप्लोरर इमेज 12](/img/Indexer-Stats.png) +![Explorer Image 12](/img/Indexer-Stats.png) -### टॅब नियुक्त करत आहे +### Delegating Tab आलेख नेटवर्कसाठी प्रतिनिधी महत्वाचे आहेत. एखाद्या प्रतिनिधीने त्यांच्या ज्ञानाचा उपयोग असा इंडेक्सर निवडण्यासाठी केला पाहिजे जो पुरस्कारांवर निरोगी परतावा देईल. येथे तुम्ही तुमच्या सक्रिय आणि ऐतिहासिक प्रतिनिधी मंडळांचे तपशील, निर्देशांकांच्या मेट्रिक्ससह शोधू शकता ज्यांना तुम्ही नियुक्त केले आहे. @@ -165,8 +165,8 @@ Epochs विभागात, तुम्ही प्रत्येक यु या टॅबमध्ये तुम्हाला येथे दिसणार्‍या डेलिगेटर मेट्रिक्समध्ये हे समाविष्ट आहे: - एकूण प्रतिनिधीत्व बक्षिसे -- एकूण अवास्तव बक्षिसे -- एकूण मिळालेली बक्षिसे +- Total unrealized rewards +- Total realized rewards पृष्ठाच्या दुसऱ्या सहामाहीत, आपल्याकडे प्रतिनिधी टेबल आहे. येथे तुम्ही ज्या निर्देशांकांना तुम्ही नियुक्त केले आहे, तसेच त्यांचे तपशील (जसे की रिवॉर्ड कट, कूलडाउन इ.) पाहू शकता. @@ -174,30 +174,30 @@ Epochs विभागात, तुम्ही प्रत्येक यु लक्षात ठेवा की हा चार्ट क्षैतिजरित्या स्क्रोल करण्यायोग्य आहे, म्हणून तुम्ही उजवीकडे स्क्रोल केल्यास, तुम्ही तुमच्या प्रतिनिधीची स्थिती देखील पाहू शकता (प्रतिनिधी, अस्वीकृत, मागे घेण्यायोग्य). -![एक्सप्लोरर इमेज 13](/img/Delegation-Stats.png) +![Explorer Image 13](/img/Delegation-Stats.png) -### क्युरेटिंग टॅब +### Curating Tab क्युरेशन टॅबमध्ये, तुम्ही सिग्नल करत असलेले सर्व सबग्राफ तुम्हाला सापडतील (अशा प्रकारे तुम्हाला क्वेरी शुल्क प्राप्त करण्यास सक्षम करते). सिग्नलिंगमुळे क्युरेटर्स इंडेक्सर्सना कोणते सबग्राफ मौल्यवान आणि विश्वासार्ह आहेत हे ठळकपणे दाखवू देते, अशा प्रकारे ते इंडेक्स केले जाणे आवश्यक असल्याचे संकेत देते. या टॅबमध्ये, तुम्हाला याचे विहंगावलोकन मिळेल: -- तुम्ही सिग्नल तपशीलांसह क्युरेट करत असलेले सर्व सबग्राफ -- प्रति सबग्राफ शेअर करा -- प्रति सबग्राफ क्वेरी रिवॉर्ड -- तारीख तपशील अद्यतनित +- All the subgraphs you're curating on with signal details +- Share totals per subgraph +- Query rewards per subgraph +- Updated at date details -![एक्सप्लोरर इमेज 14](/img/Curation-Stats.png) +![Explorer Image 14](/img/Curation-Stats.png) -## तुमची प्रोफाइल सेटिंग्ज +## Your Profile Settings तुमच्या वापरकर्ता प्रोफाइलमध्ये, तुम्ही तुमचे वैयक्तिक प्रोफाइल तपशील व्यवस्थापित करण्यास सक्षम असाल (जसे की ENS नाव सेट करणे). तुम्ही इंडेक्सर असल्यास, तुमच्या बोटांच्या टोकावर असलेल्या सेटिंग्जमध्ये तुम्हाला आणखी प्रवेश आहे. तुमच्या वापरकर्ता प्रोफाइलमध्ये, तुम्ही तुमचे डेलिगेशन पॅरामीटर्स आणि ऑपरेटर सेट करू शकाल. - ऑपरेटर इंडेक्सरच्या वतीने प्रोटोकॉलमध्ये मर्यादित कृती करतात, जसे की वाटप उघडणे आणि बंद करणे. ऑपरेटर हे सामान्यत: इतर इथरियम पत्ते असतात, जे त्यांच्या स्टॅकिंग वॉलेटपासून वेगळे असतात, इंडेक्सर्स वैयक्तिकरित्या सेट करू शकणार्‍या नेटवर्कवर गेट केलेला प्रवेश असतो -- डेलिगेशन पॅरामीटर्स तुम्हाला तुमच्या आणि तुमच्या प्रतिनिधींमधील GRT चे वितरण नियंत्रित करण्याची परवानगी देतात. +- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. -![एक्सप्लोरर इमेज 15](/img/Profile-Settings.png) +![Explorer Image 15](/img/Profile-Settings.png) विकेंद्रित डेटाच्या जगात तुमचे अधिकृत पोर्टल म्हणून, ग्राफ एक्सप्लोरर तुम्हाला नेटवर्कमधील तुमची भूमिका काहीही असो, विविध क्रिया करण्याची परवानगी देतो. तुम्ही तुमच्या पत्त्याच्या पुढील ड्रॉपडाउन मेनू उघडून, नंतर सेटिंग्ज बटणावर क्लिक करून तुमच्या प्रोफाइल सेटिंग्जवर जाऊ शकता. -
![वॉलेट तपशील](/img/Wallet-Details.png)
+
![Wallet details](/img/Wallet-Details.png)
diff --git a/website/pages/mr/network/indexing.mdx b/website/pages/mr/network/indexing.mdx index 8b55767bf233..4ead6f7ae74b 100644 --- a/website/pages/mr/network/indexing.mdx +++ b/website/pages/mr/network/indexing.mdx @@ -1,8 +1,8 @@ --- -title: अनुक्रमणिका +title: Indexing --- -इंडेक्सर्स हे ग्राफ नेटवर्कमधील नोड ऑपरेटर आहेत जे अनुक्रमणिका आणि क्वेरी प्रक्रिया सेवा प्रदान करण्यासाठी ग्राफ टोकन (GRT) ची भागीदारी करतात. इंडेक्सर्स त्यांच्या सेवांसाठी क्वेरी फी आणि अनुक्रमणिका बक्षिसे मिळवतात. Cobb-Douglas Rebate Function चे अनुसरण करून त्यांच्या कामाच्या प्रमाणात सर्व नेटवर्क योगदानकर्त्यांसोबत शेअर केलेल्या रिबेट पूलमधूनही ते कमावतात. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. प्रोटोकॉलमध्ये स्टॅक केलेले GRT वितळण्याच्या कालावधीच्या अधीन आहे आणि जर इंडेक्सर्स दुर्भावनापूर्ण असतील आणि ऍप्लिकेशन्सना चुकीचा डेटा देत असतील किंवा ते चुकीच्या पद्धतीने इंडेक्स करत असतील तर ते कमी केले जाऊ शकतात. इंडेक्सर्स नेटवर्कमध्ये योगदान देण्यासाठी डेलिगेटर्सकडून डेलिगेटेड स्टेकसाठी बक्षिसे देखील मिळवतात. @@ -12,37 +12,37 @@ title: अनुक्रमणिका ## FAQ -### नेटवर्कवर इंडेक्सर होण्यासाठी किमान किती भागभांडवल आवश्यक आहे? +### What is the minimum stake required to be an Indexer on the network? -इंडेक्सरसाठी किमान स्टेक सध्या 100K GRT वर सेट केला आहे. +The minimum stake for an Indexer is currently set to 100K GRT. -### इंडेक्सरसाठी कमाईचे प्रवाह काय आहेत? +### What are the revenue streams for an Indexer? **क्वेरी फी रिबेट्स** - नेटवर्कवर क्वेरी सर्व्ह करण्यासाठी देयके. ही देयके इंडेक्सर आणि गेटवे दरम्यान राज्य चॅनेलद्वारे मध्यस्थी केली जातात. गेटवेवरील प्रत्येक क्वेरी विनंतीमध्ये पेमेंट आणि संबंधित प्रतिसाद क्वेरी परिणाम वैधतेचा पुरावा असतो. **इंडेक्सिंग रिवॉर्ड्स** - 3% वार्षिक प्रोटोकॉल वाइड इन्फ्लेशनद्वारे व्युत्पन्न केलेले, इंडेक्सिंग रिवॉर्ड्स इंडेक्सर्सना वितरित केले जातात जे नेटवर्कसाठी सबग्राफ डिप्लॉयमेंट अनुक्रमित करतात. -### इंडेक्सिंग रिवॉर्ड कसे वितरित केले जातात? +### How are indexing rewards distributed? इंडेक्सिंग रिवॉर्ड्स प्रोटोकॉल इन्फ्लेशनमधून येतात जे 3% वार्षिक जारी करण्यावर सेट केले जातात. ते प्रत्येकावरील सर्व क्युरेशन सिग्नलच्या प्रमाणावर आधारित सबग्राफमध्ये वितरीत केले जातात, नंतर त्या सबग्राफवरील त्यांच्या वाटप केलेल्या भागभांडवलांच्या आधारे अनुक्रमणिकेला प्रमाणात वितरीत केले जातात. **पुरस्कारांसाठी पात्र होण्यासाठी लवाद चार्टरने सेट केलेल्या मानकांची पूर्तता करणार्‍या अनुक्रमणिकेच्या वैध पुराव्यासह (POI) वाटप बंद करणे आवश्यक आहे.** -बक्षिसे मोजण्यासाठी समाजाने असंख्य साधने तयार केली आहेत; तुम्हाला त्यांचा संग्रह [समुदाय मार्गदर्शक संग्रह](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c) मध्ये सापडेल. तुम्ही [Discord सर्व्हर](https://discord.gg/graphprotocol) वर #Delegators आणि #Indexers चॅनेलमध्ये टूल्सची अद्ययावत सूची देखील शोधू शकता. येथे आम्ही इंडेक्सर सॉफ्टवेअर स्टॅकसह एकात्मिक [शिफारस केलेले ऍलोकेशन ऑप्टिमाइझर](https://github.com/graphprotocol/AllocationOpt.jl) लिंक करतो. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. -### अनुक्रमणिका (POI) चा पुरावा काय आहे? +### What is a proof of indexing (POI)? इंडेक्सर त्यांनी वाटप केलेले सबग्राफ अनुक्रमित करत आहे हे सत्यापित करण्यासाठी नेटवर्कमध्ये POIs वापरले जातात. इंडेक्सिंग रिवॉर्ड्ससाठी पात्र होण्यासाठी त्या वाटपाचे वाटप बंद करताना वर्तमान युगाच्या पहिल्या ब्लॉकसाठी POI सबमिट करणे आवश्यक आहे. ब्लॉकसाठी POI हे सर्व एंटिटी स्टोअर व्यवहारांसाठी एक डायजेस्ट आहे जे विशिष्ट सबग्राफ तैनातीसाठी आणि त्या ब्लॉकपर्यंत समाविष्ट आहे. -### अनुक्रमणिका पुरस्कार कधी वितरित केले जातात? +### When are indexing rewards distributed? वाटप सक्रिय असताना आणि 28 युगांमध्ये वाटप करत असताना ते सतत बक्षिसे मिळवत असतात. इंडेक्सर्सद्वारे बक्षिसे गोळा केली जातात आणि जेव्हाही त्यांचे वाटप बंद होते तेव्हा ते वितरित केले जातात. हे एकतर मॅन्युअली घडते, जेव्हा जेव्हा इंडेक्सर त्यांना सक्तीने बंद करू इच्छितो, किंवा 28 युगांनंतर एक प्रतिनिधी इंडेक्सरसाठी वाटप बंद करू शकतो, परंतु यामुळे कोणतेही पुरस्कार मिळत नाहीत. 28 युग हे जास्तीत जास्त वाटप आजीवन आहे (सध्या, एक युग ~24 तास चालतो). -### प्रलंबित अनुक्रमणिका पुरस्कारांचे परीक्षण केले जाऊ शकते? +### Can pending indexing rewards be monitored? RewardsManager करारामध्ये केवळ वाचनीय [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) फंक्शन आहे जे वापरले जाऊ शकते. विशिष्ट वाटपासाठी प्रलंबित पुरस्कार तपासण्यासाठी. समुदायाने बनवलेल्या अनेक डॅशबोर्डमध्ये प्रलंबित पुरस्कार मूल्यांचा समावेश आहे आणि ते या चरणांचे अनुसरण करून सहजपणे व्यक्तिचलितपणे तपासले जाऊ शकतात: -1. सर्व सक्रिय वाटपांसाठी आयडी मिळविण्यासाठी [मेननेट सबग्राफ](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) वर क्वेरी करा: +1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: ```graphql query indexerAllocations { @@ -58,74 +58,74 @@ query indexerAllocations { } ``` -`getRewards()` ला कॉल करण्यासाठी इथरस्कॅन वापरा: +Use Etherscan to call `getRewards()`: -- [रिवॉर्ड्स करारासाठी इथरस्कॅन इंटरफेस](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) वर नेव्हिगेट करा +- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) * `getRewards()` ला कॉल करण्यासाठी: - - **10 विस्तृत करा. getRewards** ड्रॉपडाउन. - - इनपुटमध्ये **allocationID** प्रविष्ट करा. - - **क्वेरी** बटण क्लिक करा. + - Expand the **10. getRewards** dropdown. + - Enter the **allocationID** in the input. + - Click the **Query** button. ### विवाद काय आहेत आणि मी ते कोठे पाहू शकतो? इंडेक्सरच्या क्वेरी आणि वाटप या दोन्ही विवाद कालावधी दरम्यान ग्राफवर विवादित केले जाऊ शकतात. विवादाच्या प्रकारानुसार विवाद कालावधी बदलतो. क्वेरी/प्रमाणपत्रांमध्ये 7 युग विवाद विंडो आहे, तर वाटपांमध्ये 56 युगे आहेत. हा कालावधी संपल्यानंतर, वाटप किंवा क्वेरी यापैकी कोणतेही विवाद उघडले जाऊ शकत नाहीत. जेव्हा विवाद उघडला जातो, तेव्हा मच्छिमारांना किमान 10,000 GRT जमा करणे आवश्यक असते, जे विवाद अंतिम होईपर्यंत आणि ठराव दिले जाईपर्यंत लॉक केले जाईल. मच्छीमार हे कोणतेही नेटवर्क सहभागी आहेत जे विवाद उघडतात. -विवादांचे **तीन** संभाव्य परिणाम आहेत, तसेच मच्छिमारांच्या ठेवींवरही. +Disputes have **three** possible outcomes, so does the deposit of the Fishermen. - विवाद नाकारल्यास, मच्छिमारांनी जमा केलेला GRT जाळून टाकला जाईल आणि विवादित इंडेक्सर कमी केला जाणार नाही. - विवाद सोडतीप्रमाणे निकाली काढल्यास, मच्छिमारांची ठेव परत केली जाईल आणि विवादित इंडेक्सर कमी केला जाणार नाही. - विवाद स्वीकारल्यास, मच्छिमारांनी जमा केलेला GRT परत केला जाईल, विवादित इंडेक्सर कमी केला जाईल आणि मच्छिमारांना कमी केलेल्या GRT च्या 50% मिळतील. -`विवाद` टॅब अंतर्गत इंडेक्सरच्या प्रोफाइल पेजमधील UI मध्ये विवाद पाहिले जाऊ शकतात. +Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. -### क्वेरी फी सवलत काय आहेत आणि ते कधी वितरित केले जातात? +### What are query fee rebates and when are they distributed? -जेव्हाही वाटप बंद केले जाते आणि सबग्राफच्या क्वेरी फी रिबेट पूलमध्ये जमा केले जाते तेव्हा गेटवेद्वारे क्वेरी शुल्क गोळा केले जाते. रिबेट पूल हे इंडेक्सर्सना नेटवर्कसाठी मिळणाऱ्या क्वेरी फीच्या प्रमाणात भागभांडवल वाटप करण्यास प्रोत्साहित करण्यासाठी डिझाइन केले आहे. एका विशिष्ट इंडेक्सरला वाटप केलेल्या पूलमधील क्वेरी फीचा भाग कोब-डगलस प्रोडक्शन फंक्शन वापरून मोजला जातो; प्रति इंडेक्सर वितरित केलेली रक्कम ही त्यांच्या पूलमधील योगदानाचे आणि सबग्राफवरील भागभांडवलांचे कार्य आहे. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -एकदा वाटप बंद झाल्यानंतर आणि विवादाचा कालावधी निघून गेल्यावर इंडेक्सरकडून दावा करण्यासाठी सूट उपलब्ध होते. क्लेम केल्यावर, क्वेरी फी कट आणि डेलिगेशन पूल प्रमाणांच्या आधारे इंडेक्सर आणि त्यांच्या प्रतिनिधींना क्वेरी फी सवलत वितरीत केली जाते. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### क्वेरी फी कट आणि इंडेक्सिंग रिवॉर्ड कट म्हणजे काय? +### What is query fee cut and indexing reward cut? `queryFeeCut` आणि `indexingRewardCut` मूल्ये ही डेलिगेशन पॅरामीटर्स आहेत जी इंडेक्सर आणि त्यांच्या प्रतिनिधींमधील GRT चे वितरण नियंत्रित करण्यासाठी CooldownBlocks सोबत सेट करू शकतात. डेलिगेशन पॅरामीटर्स सेट करण्याच्या सूचनांसाठी [प्रोटोकॉलमध्ये स्टॅकिंग](/network/indexing#stake-in-the-protocol) मधील शेवटच्या पायऱ्या पहा. -- **queryFeeCut** - इंडेक्सरला वितरित केल्या जाणार्‍या सबग्राफवर जमा केलेल्या क्वेरी फी सवलतीचा %. हे 95% वर सेट केले असल्यास, इंडेक्सरला 95% क्वेरी फी रिबेट पूल प्राप्त होईल जेव्हा वाटपाचा दावा केला जाईल आणि इतर 5% प्रतिनिधींना जाईल. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - इंडेक्सरला वितरित केल्या जाणार्‍या सबग्राफवर जमा झालेल्या इंडेक्सिंग रिवॉर्डचा %. हे 95% वर सेट केले असल्यास, वाटप बंद झाल्यावर अनुक्रमणिकाला अनुक्रमणिका बक्षीस पूलच्या 95% प्राप्त होतील आणि प्रतिनिधी इतर 5% विभाजित करतील. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. -### कोणते सबग्राफ इंडेक्स करायचे हे इंडेक्सर्सना कसे कळेल? +### How do Indexers know which subgraphs to index? सबग्राफ इंडेक्सिंग निर्णय घेण्यासाठी प्रगत तंत्रे लागू करून इंडेक्सर्स स्वतःला वेगळे करू शकतात परंतु सामान्य कल्पना देण्यासाठी आम्ही नेटवर्कमधील सबग्राफचे मूल्यांकन करण्यासाठी वापरल्या जाणार्‍या अनेक मुख्य मेट्रिक्सवर चर्चा करू: - **क्युरेशन सिग्नल** - विशिष्ट सबग्राफवर लागू केलेले नेटवर्क क्युरेशन सिग्नलचे प्रमाण हे त्या सबग्राफमधील स्वारस्याचे एक चांगले सूचक आहे, विशेषत: जेव्हा क्वेरी व्हॉल्यूमिंग वाढत असते तेव्हा बूटस्ट्रॅप टप्प्यात. -- **संकलित केलेली क्वेरी फी** - विशिष्ट सबग्राफसाठी गोळा केलेल्या क्वेरी फीच्या व्हॉल्यूमचा ऐतिहासिक डेटा भविष्यातील मागणीचा चांगला सूचक आहे. +- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. - **स्टेक केलेली रक्कम** - इतर इंडेक्सर्सच्या वर्तनाचे निरीक्षण करणे किंवा विशिष्ट सबग्राफसाठी वाटप केलेल्या एकूण स्टेकचे प्रमाण पाहणे इंडेक्सरला सबग्राफ ओळखण्यासाठी सबग्राफ क्वेरीसाठी पुरवठा बाजूचे निरीक्षण करण्यास अनुमती देऊ शकते नेटवर्क अधिक पुरवठ्याची गरज दर्शवू शकणार्‍या सबग्राफवर विश्वास दाखवत आहे. - **कोणतेही अनुक्रमणिका रिवॉर्ड नसलेले सबग्राफ** - काही सबग्राफ्स इंडेक्सिंग रिवॉर्ड व्युत्पन्न करत नाहीत कारण ते IPFS सारखी असमर्थित वैशिष्ट्ये वापरत आहेत किंवा ते मेननेटच्या बाहेर दुसर्‍या नेटवर्कची चौकशी करत असल्यामुळे. सबग्राफ इंडेक्सिंग रिवॉर्ड्स व्युत्पन्न करत नसल्यास तुम्हाला एक मेसेज दिसेल. -### हार्डवेअर आवश्यकता काय आहेत? +### What are the hardware requirements? -- **लहान** - अनेक सबग्राफ अनुक्रमित करण्यास प्रारंभ करण्यासाठी पुरेसे आहे, कदाचित विस्तारित करणे आवश्यक आहे. +- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. - **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **मध्यम** - 100 सबग्राफ आणि 200-500 विनंत्यांना समर्थन देणारा उत्पादन निर्देशांक. -- **मोठा** - सध्या वापरलेले सर्व उपग्राफ अनुक्रमित करण्यासाठी आणि संबंधित रहदारीसाठी विनंत्या देण्यासाठी तयार. +- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. +- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. -| सेटअप | पोस्टग्रेस
(CPUs) | पोस्टग्रेस
(GBs मध्ये मेमरी) | पोस्टग्रेस
(टीबी मध्ये डिस्क) | VMs
(CPUs) | VMs
(GBs मध्ये मेमरी) | +| Setup | Postgres
(CPUs) | Postgres
(memory in GBs) | Postgres
(disk in TBs) | VMs
(CPUs) | VMs
(memory in GBs) | | --- | :-: | :-: | :-: | :-: | :-: | | Small | 4 | 8 | 1 | 4 | 16 | | मानक | 8 | 30 | 1 | 12 | 48 | | Medium | 16 | 64 | 2 | 32 | 64 | | Large | 72 | 468 | 3.5 | 48 | 184 | -### इंडेक्सरने काही मूलभूत सुरक्षा सावधगिरी बाळगल्या पाहिजेत? +### What are some basic security precautions an Indexer should take? - **ऑपरेटर वॉलेट** - ऑपरेटर वॉलेट सेट करणे ही एक महत्त्वाची खबरदारी आहे कारण ते इंडेक्सरला त्यांच्या स्टेक नियंत्रित करणार्‍या की आणि दैनंदिन कामकाजावर नियंत्रण ठेवणार्‍या की यांच्यात वेगळेपणा राखण्यास अनुमती देते. सूचनांसाठी [प्रोटोकॉलमध्ये भागीदारी](/network/indexing#stake-in-the-protocol) पहा. - **फायरवॉल** - फक्त इंडेक्सर सेवा सार्वजनिकपणे उघड करणे आवश्यक आहे आणि प्रशासक पोर्ट आणि डेटाबेस प्रवेश लॉक करण्यासाठी विशेष लक्ष दिले पाहिजे: ग्राफ नोड JSON-RPC एंडपॉइंट (डीफॉल्ट पोर्ट: 8030), इंडेक्सर मॅनेजमेंट API एंडपॉइंट (डीफॉल्ट पोर्ट: 18000), आणि पोस्टग्रेस डेटाबेस एंडपॉइंट (डीफॉल्ट पोर्ट: 5432) उघड करू नये. -## पायाभूत सुविधा +## Infrastructure इंडेक्सरच्या इन्फ्रास्ट्रक्चरच्या मध्यभागी आलेख नोड असतो जो अनुक्रमित नेटवर्कचे निरीक्षण करतो, सबग्राफ परिभाषानुसार डेटा काढतो आणि लोड करतो आणि [GraphQL API म्हणून काम करतो ](/about/#how-the-graph-works). ग्राफ नोडला प्रत्येक अनुक्रमित नेटवर्कमधील डेटा उघड करणाऱ्या एंडपॉईंटशी कनेक्ट करणे आवश्यक आहे; डेटा सोर्सिंगसाठी आयपीएफएस नोड; त्याच्या स्टोअरसाठी PostgreSQL डेटाबेस; आणि इंडेक्सर घटक जे त्याचे नेटवर्कशी परस्परसंवाद सुलभ करतात. @@ -143,54 +143,54 @@ query indexerAllocations { टीप: चपळ स्केलिंगला समर्थन देण्यासाठी, नोड्सच्या वेगवेगळ्या सेटमध्ये क्वेरी आणि इंडेक्सिंग चिंता विभक्त करण्याची शिफारस केली जाते: क्वेरी नोड्स आणि इंडेक्स नोड्स. -### पोर्ट विहंगावलोकन +### Ports overview > **महत्त्वाचे**: पोर्ट सार्वजनिकपणे उघड करण्याबाबत सावधगिरी बाळगा - **प्रशासन पोर्ट** लॉक डाउन ठेवले पाहिजेत. यामध्ये ग्राफ नोड JSON-RPC आणि खाली तपशीलवार निर्देशांक व्यवस्थापन एंडपॉइंट्स समाविष्ट आहेत. #### आलेख नोड -| बंदर | उद्देश | मार्ग | CLI युक्तिवाद | पर्यावरण परिवर्तनशील | +| बंदर | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP सर्व्हर
(सबग्राफ क्वेरीसाठी) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
(सबग्राफ सबस्क्रिप्शनसाठी) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
(उपयोजन व्यवस्थापित करण्यासाठी) | / | --admin-port | - | -| 8030 | सबग्राफ अनुक्रमणिका स्थिती API | /graphql | --index-node-port | - | -| 8040 | प्रोमिथियस मेट्रिक्स | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP server
(for subgraph queries) | /subgraphs/id/...
/subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
(for subgraph subscriptions) | /subgraphs/id/...
/subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
(for managing deployments) | / | --admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | +| 8040 | Prometheus metrics | /metrics | --metrics-port | - | -#### इंडेक्सर सेवा +#### Indexer Service -| बंदर | उद्देश | मार्ग | CLI युक्तिवाद | पर्यावरण परिवर्तनशील | +| बंदर | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP सर्व्हर
(सशुल्क सबग्राफ क्वेरीसाठी) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | प्रोमिथियस मेट्रिक्स | /metrics | --metrics-port | - | +| 7600 | GraphQL HTTP server
(for paid subgraph queries) | /subgraphs/id/...
/status
/channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrics | /metrics | --metrics-port | - | -#### इंडेक्सर एजंट +#### Indexer Agent -| बंदर | उद्देश | मार्ग | CLI युक्तिवाद | पर्यावरण परिवर्तनशील | -| ---- | ----------------------- | ----- | ------------------------- | --------------------------------------- | -| 8000 | इंडेक्सर व्यवस्थापन API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| बंदर | Purpose | Routes | CLI Argument | Environment Variable | +| ---- | ----------------------- | ------ | ------------------------- | --------------------------------------- | +| 8000 | इंडेक्सर व्यवस्थापन API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Google Cloud वर Terraform वापरून सर्व्हर पायाभूत सुविधा सेट करा +### Setup server infrastructure using Terraform on Google Cloud -> टीप: इंडेक्सर्स वैकल्पिकरित्या AWS, Microsoft Azure किंवा Alibaba वापरू शकतात. +> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### पूर्वस्थिती स्थापित करा +#### Install prerequisites - Google Cloud SDK - Kubectl command line tool - Terraform -#### Google क्लाउड प्रोजेक्ट तयार करा +#### Create a Google Cloud Project -- इंडेक्सर रेपॉजिटरी क्लोन करा किंवा नेव्हिगेट करा. +- Clone or navigate to the Indexer repository. -- ./terraform निर्देशिकेवर नेव्हिगेट करा, येथेच सर्व कमांड कार्यान्वित केल्या पाहिजेत. +- Navigate to the ./terraform directory, this is where all commands should be executed. ```sh cd terraform ``` -- Google Cloud सह प्रमाणीकृत करा आणि एक नवीन प्रकल्प तयार करा. +- Authenticate with Google Cloud and create a new project. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- नवीन प्रकल्पासाठी बिलिंग सक्षम करण्यासाठी Google Cloud Console चे बिलिंग पृष्ठ वापरा. +- Use the Google Cloud Console's billing page to enable billing for the new project. -- Google क्लाउड कॉन्फिगरेशन तयार करा. +- Create a Google Cloud configuration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- आवश्यक Google क्लाउड API सक्षम करा. +- Enable required Google Cloud APIs. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- सेवा खाते तयार करा. +- Create a service account. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- डेटाबेस आणि कुबर्नेट्स क्लस्टर दरम्यान पीअरिंग सक्षम करा जे पुढील चरणात तयार केले जाईल. +- Enable peering between database and Kubernetes cluster that will be created in the next step. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- किमान टेराफॉर्म कॉन्फिगरेशन फाइल तयार करा (आवश्यकतेनुसार अपडेट करा). +- Create minimal terraform configuration file (update as needed). ```sh indexer= @@ -262,11 +262,11 @@ database_password = "" EOF ``` -#### पायाभूत सुविधा निर्माण करण्यासाठी टेराफॉर्म वापरा +#### Use Terraform to create infrastructure कोणतीही आज्ञा चालवण्यापूर्वी, [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) वाचा आणि `terraform फाइल तयार करा.tfvars` या निर्देशिकेत (किंवा आम्ही शेवटच्या टप्प्यात तयार केलेल्यामध्ये सुधारणा करा). प्रत्येक व्हेरिएबलसाठी जिथे तुम्हाला डीफॉल्ट ओव्हरराइड करायचे आहे किंवा जिथे तुम्हाला मूल्य सेट करायचे आहे, तेथे `terraform.tfvars` मध्ये सेटिंग टाका. -- पायाभूत सुविधा तयार करण्यासाठी खालील आदेश चालवा. +- Run the following commands to create the infrastructure. ```sh # Install required plugins @@ -279,7 +279,7 @@ terraform plan terraform apply ``` -नवीन क्लस्टरसाठी `~/.kube/config` मध्ये क्रेडेन्शियल डाउनलोड करा आणि ते तुमचा डीफॉल्ट संदर्भ म्हणून सेट करा. +Download credentials for the new cluster into `~/.kube/config` and set it as your default context. ```sh gcloud container clusters get-credentials $indexer @@ -287,21 +287,21 @@ kubectl config use-context $(kubectl config get-contexts --output='name' | grep $indexer) ``` -#### इंडेक्सरसाठी कुबर्नेट्स घटक तयार करणे +#### Creating the Kubernetes components for the Indexer - `$dir,` निर्देशिका `k8s/overlays` कॉपी करा आणि `$dir/kustomization.yaml< मधील बेस` एंट्री समायोजित करा /code> जेणेकरून ते निर्देशिकेकडे निर्देश करेल `k8s/base`. -- `$dir` मधील सर्व फायली वाचा आणि टिप्पण्यांमध्ये सूचित केल्यानुसार कोणतीही मूल्ये समायोजित करा. +- Read through all the files in `$dir` and adjust any values as indicated in the comments. -`kubectl apply -k $dir` सह सर्व संसाधने तैनात करा. +Deploy all resources with `kubectl apply -k $dir`. ### आलेख नोड [ग्राफ नोड](https://github.com/graphprotocol/graph-node) हे एक ओपन सोर्स रस्ट अंमलबजावणी आहे जे ग्राफक्यूएल एंडपॉइंट द्वारे विचारले जाऊ शकणारे डेटा स्टोअर निश्चितपणे अपडेट करण्यासाठी इथरियम ब्लॉकचेनला इव्हेंट स्रोत देते. विकासक त्यांचा स्कीमा परिभाषित करण्यासाठी सबग्राफ वापरतात, आणि ब्लॉक साखळीतून मिळवलेल्या डेटाचे रूपांतर करण्यासाठी मॅपिंगचा संच आणि ग्राफ नोड संपूर्ण साखळी समक्रमित करणे, नवीन ब्लॉक्सचे निरीक्षण करणे आणि ग्राफक्यूएल एंडपॉईंटद्वारे सर्व्ह करणे. -#### स्त्रोतापासून प्रारंभ करणे +#### Getting started from source -#### पूर्वस्थिती स्थापित करा +#### Install prerequisites - **Rust** @@ -309,15 +309,15 @@ kubectl config use-context $(kubectl config get-contexts --output='name' - **IPFS** -- **Ubuntu वापरकर्त्यांसाठी अतिरिक्त आवश्यकता** - Ubuntu वर ग्राफ नोड चालवण्यासाठी काही अतिरिक्त पॅकेजेसची आवश्यकता असू शकते. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### सेटअप +#### Setup -1. PostgreSQL डेटाबेस सर्व्हर सुरू करा +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [ग्राफ नोड](https://github.com/graphprotocol/graph-node) रेपो क्लोन करा आणि `कार्गो बिल्ड` चालवून स्त्रोत तयार करा +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. आता सर्व अवलंबन सेटअप झाले आहेत, आलेख नोड सुरू करा: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -336,15 +336,15 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### डॉकर वापरणे सुरू करणे +#### Getting started using Docker #### पूर्वतयारी - **Ethereum नोड** - डीफॉल्टनुसार, डॉकर कंपोझ सेटअप मेननेट वापरेल: [http:// host.docker.internal:8545](http://host.docker.internal:8545) तुमच्या होस्ट मशीनवरील इथरियम नोडशी कनेक्ट करण्यासाठी. तुम्ही `docker-compose.yaml` अपडेट करून हे नेटवर्क नाव आणि url बदलू शकता. -#### सेटअप +#### Setup -1. ग्राफ नोड क्लोन करा आणि डॉकर निर्देशिकेवर नेव्हिगेट करा: +1. Clone Graph Node and navigate to the Docker directory: ```sh git clone https://github.com/graphprotocol/graph-node @@ -357,13 +357,13 @@ cd graph-node/docker ./setup.sh ``` -3. स्थानिक ग्राफ नोड सुरू करा जो तुमच्या इथरियम एंडपॉइंटशी कनेक्ट होईल: +3. Start a local Graph Node that will connect to your Ethereum endpoint: ```sh docker-compose up ``` -### इंडेक्सर घटक +### Indexer components फक्त लिनक्स वापरण्यासाठी - स्क्रिप्ट वापरून `docker-compose. yaml` मध्ये `host. docker. internal` समाविष्ट करा होस्ट आयपी पत्ता वापरा: @@ -373,11 +373,11 @@ docker-compose up - **इंडेक्सर CLI** - इंडेक्सर एजंट व्यवस्थापित करण्यासाठी कमांड लाइन इंटरफेस. हे इंडेक्सर्सना खर्चाचे मॉडेल, मॅन्युअल वाटप, क्रिया रांग आणि अनुक्रमणिका नियम व्यवस्थापित करण्यास अनुमती देते. -#### सुरू करणे +#### Getting started -इंडेक्सर एजंट आणि इंडेक्सर सेवा तुमच्या ग्राफ नोड इन्फ्रास्ट्रक्चरसह सह-स्थित असावी. तुमच्या इंडेक्सर घटकांसाठी आभासी अंमलबजावणी वातावरण सेट करण्याचे अनेक मार्ग आहेत; NPM पॅकेजेस किंवा स्त्रोत वापरून किंवा Google Cloud Kubernetes Engine वर kubernetes आणि docker द्वारे त्यांना baremetal वर कसे चालवायचे ते येथे आम्ही समजावून घेऊ. जर ही सेटअप उदाहरणे तुमच्या इन्फ्रास्ट्रक्चरमध्ये नीट अनुवादित होत नसतील तर कदाचित संदर्भासाठी समुदाय मार्गदर्शक असेल, [Discord](https://discord.gg/graphprotocol) वर हाय म्हणा! तुमचे इंडेक्सर घटक सुरू करण्यापूर्वी [प्रोटोकॉलमधील भागीदारी](/network/indexing#stake-in-the-protocol) लक्षात ठेवा! +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! -#### NPM पॅकेजेसमधून +#### From NPM packages ```sh npm install -g @graphprotocol/indexer-service @@ -400,7 +400,7 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### स्रोत पासून +#### From source ```sh # From Repo root directory @@ -420,16 +420,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### डॉकर वापरणे +#### Using docker -- रेजिस्ट्रीमधून प्रतिमा काढा +- Pull images from the registry ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -किंवा स्त्रोतावरून स्थानिक पातळीवर प्रतिमा तयार करा +Or build images locally from source ```sh # Indexer service @@ -444,24 +444,24 @@ docker build \ -t indexer-agent:latest \ ``` -- घटक चालवा +- Run the components ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**सूचना**: कंटेनर सुरू केल्यानंतर, इंडेक्सर सेवा [http://localhost:7600](http://localhost:7600) येथे प्रवेशयोग्य असावी आणि इंडेक्सर एजंटने इंडेक्सर व्यवस्थापन API [ येथे उघड केले पाहिजे. http://localhost:18000/](http://localhost:18000/). +**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### K8s आणि Terraform वापरणे +#### Using K8s and Terraform -[Google क्लाउडवर टेराफॉर्म वापरून सर्व्हर इन्फ्रास्ट्रक्चर सेटअप करा](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) विभाग पहा +See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section #### वापर > **सूचना**: सर्व रनटाइम कॉन्फिगरेशन व्हेरिएबल्स एकतर स्टार्टअपवर कमांडवर पॅरामीटर्स म्हणून लागू केले जाऊ शकतात किंवा `COMPONENT_NAME_VARIABLE_NAME` फॉरमॅटचे पर्यावरण व्हेरिएबल्स वापरून (उदा. `INDEXER_AGENT_ETHEREUM`). -#### इंडेक्सर एजंट +#### Indexer agent ```sh graph-indexer-agent start \ @@ -490,7 +490,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### इंडेक्सर सेवा +#### Indexer service ```sh SERVER_HOST=localhost \ @@ -518,14 +518,14 @@ graph-indexer-service start \ #### Indexer CLI -इंडेक्सर CLI हे [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) साठी प्लगइन आहे जे `ग्राफ इंडेक्सर` वर टर्मिनलमध्ये प्रवेशयोग्य आहे. +The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### इंडेक्सर CLI वापरून इंडेक्सर व्यवस्थापन +#### Indexer management using Indexer CLI **इंडेक्सर मॅनेजमेंट API** सह संवाद साधण्यासाठी सुचवलेले साधन हे **इंडेक्सर CLI** आहे, जो **ग्राफ CLI** चा विस्तार आहे. इंडेक्सर एजंटला इंडेक्सरच्या वतीने नेटवर्कशी स्वायत्तपणे संवाद साधण्यासाठी इंडेक्सरकडून इनपुट आवश्यक आहे. इंडेक्सर एजंट वर्तन परिभाषित करण्याची यंत्रणा म्हणजे **अलोकेशन व्यवस्थापन** मोड आणि **इंडेक्सिंग नियम**. ऑटो मोड अंतर्गत, इंडेक्सर इंडेक्समध्ये सबग्राफ निवडण्यासाठी त्यांची विशिष्ट रणनीती लागू करण्यासाठी **इंडेक्सिंग नियम** वापरू शकतो आणि त्यांच्यासाठी क्वेरी देऊ शकतो. नियम एजंटद्वारे प्रदान केलेल्या GraphQL API द्वारे व्यवस्थापित केले जातात आणि इंडेक्सर व्यवस्थापन API म्हणून ओळखले जातात. मॅन्युअल मोड अंतर्गत, इंडेक्सर **क्रिया रांग** वापरून वाटप क्रिया तयार करू शकतो आणि ते कार्यान्वित होण्यापूर्वी त्यांना स्पष्टपणे मंजूर करू शकतो. ओव्हरसाइट मोड अंतर्गत, **अ‍ॅक्शन रांग** भरण्यासाठी **इंडेक्सिंग नियम** वापरले जातात आणि अंमलबजावणीसाठी स्पष्ट मंजूरी देखील आवश्यक असते. @@ -537,7 +537,7 @@ graph indexer status - `ग्राफ इंडेक्सर नियमांना [options] [ ...]` - सर्व नियम मिळविण्यासाठी `सर्व` वापरून एक किंवा अधिक अनुक्रमणिका नियम मिळवा `` म्हणून, किंवा `ग्लोबल< /code> जागतिक डीफॉल्ट मिळविण्यासाठी. एक अतिरिक्त युक्तिवाद --merged` हे निर्दिष्ट करण्यासाठी वापरला जाऊ शकतो की तैनाती विशिष्ट नियम जागतिक नियमात विलीन केले जातात. इंडेक्सर एजंटमध्ये ते अशा प्रकारे लागू केले जातात. -- `ग्राफ इंडेक्सर नियम सेट [पर्याय] ...` - एक किंवा अधिक अनुक्रमणिका नियम सेट करा. +- `graph indexer rules set [options] ...` - Set one or more indexing rules. - `ग्राफ इंडेक्सर नियम सुरू करतात [options] ` - उपलब्ध असल्यास सबग्राफ उपयोजन अनुक्रमित करणे सुरू करा आणि त्याचा `निर्णय आधार` `नेहमी` वर सेट करा, त्यामुळे इंडेक्सर एजंट नेहमी त्याची अनुक्रमणिका निवडेल. जर जागतिक नियम नेहमी वर सेट केला असेल तर नेटवर्कवरील सर्व उपलब्ध सबग्राफ अनुक्रमित केले जातील. @@ -555,19 +555,19 @@ graph indexer status - `ग्राफ इंडेक्सर क्रिया रद्द करा [ ...] ` - आयडी निर्दिष्ट न केल्यास रांगेतील सर्व क्रिया रद्द करा, अन्यथा विभाजक म्हणून स्पेससह आयडीचा अॅरे रद्द करा -- `ग्राफ इंडेक्सर क्रिया मंजूर करतात [ ...]` - अंमलबजावणीसाठी एकाधिक क्रिया मंजूर करा +- `graph indexer actions approve [ ...]` - Approve multiple actions for execution -- `ग्राफ इंडेक्सर नियम सेट [पर्याय] <1> <2> <3>...` - एक किंवा अधिक अनुक्रमणिका नियम सेट करा +- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. -#### अनुक्रमणिका नियम +#### Indexing rules अनुक्रमणिका नियम एकतर जागतिक डीफॉल्ट म्हणून किंवा त्यांचे आयडी वापरून विशिष्ट सबग्राफ उपयोजनांसाठी लागू केले जाऊ शकतात. `डिप्लॉयमेंट` आणि `decisionBasis` फील्ड अनिवार्य आहेत, तर इतर सर्व फील्ड ऐच्छिक आहेत. जेव्हा इंडेक्सिंग नियमामध्ये `decisionBasis` म्हणून `नियम` असतात, तेव्हा इंडेक्सर एजंट त्या नियमावरील नॉन-नल थ्रेशोल्ड व्हॅल्यूजची तुलना संबंधित डिप्लॉयमेंटसाठी नेटवर्कमधून आणलेल्या मूल्यांशी करेल. जर सबग्राफ डिप्लॉयमेंटमध्ये कोणत्याही थ्रेशोल्डच्या वर (किंवा खाली) मूल्ये असतील तर ती अनुक्रमणिकेसाठी निवडली जाईल. उदाहरणार्थ, जागतिक नियमामध्ये **5** (GRT) चा `minStake` असल्यास, 5 (GRT) पेक्षा जास्त स्टेक असलेली कोणतीही सबग्राफ डिप्लॉयमेंट त्याला वाटप अनुक्रमित केले जाईल. थ्रेशोल्ड नियमांमध्ये `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` आणि `minAverageQueryFees` समाविष्ट आहेत. -डेटा मॉडेल: +Data model: ```graphql type IndexingRule { @@ -601,7 +601,7 @@ IndexingDecisionBasis { } ``` -अनुक्रमणिका नियमाचा वापर उदाहरणः: +Example usage of indexing rule: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,20 +613,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### क्रिया रांग CLI +#### Actions queue CLI इंडेक्सर-cli कृती रांगेसह मॅन्युअली कार्य करण्यासाठी `कृती` मॉड्यूल प्रदान करते. ते क्रिया रांगेशी संवाद साधण्यासाठी इंडेक्सर व्यवस्थापन सर्व्हरद्वारे होस्ट केलेले **Graphql API** वापरते. क्रिया अंमलबजावणी कार्यकर्ता रांगेतील आयटम फक्त अंमलात आणण्यासाठी पकडेल जर त्यांच्याकडे `ActionStatus = मंजूर` असेल. शिफारस केलेल्या पथात क्रिया ActionStatus = रांगेत रांगेत जोडल्या जातात, त्यामुळे ऑन-चेन अंमलात आणण्यासाठी त्या नंतर मंजूर केल्या पाहिजेत. सामान्य प्रवाह असे दिसेल: -- तृतीय पक्ष ऑप्टिमायझर टूल किंवा इंडेक्सर-क्ली वापरकर्त्याद्वारे रांगेत क्रिया जोडली -- इंडेक्सर सर्व रांगेतील क्रिया पाहण्यासाठी `indexer-cli` वापरू शकतो +- Action added to the queue by the 3rd party optimizer tool or indexer-cli user +- Indexer can use the `indexer-cli` to view all queued actions - इंडेक्सर (किंवा इतर सॉफ्टवेअर) `indexer-cli` वापरून रांगेतील क्रिया मंजूर किंवा रद्द करू शकतात. मंजूर आणि रद्द आदेश इनपुट म्हणून अॅक्शन आयडीचा अॅरे घेतात. - अंमलबजावणी कर्मचारी नियमितपणे मंजूर कृतींसाठी रांगेत मतदान करतात. ते रांगेतील `मंजूर` क्रिया पकडेल, त्या कार्यान्वित करण्याचा प्रयत्न करेल आणि अंमलबजावणीच्या स्थितीनुसार `यशस्वी` किंवा `अयशस्वी< वर db मधील मूल्ये अपडेट करेल. /code>.
  • एखादी कृती यशस्वी झाल्यास कार्यकर्ता खात्री करेल की एक अनुक्रमणिका नियम उपस्थित आहे जो एजंटला वाटप कसे व्यवस्थापित करावे हे सांगते, एजंट ऑटो` किंवा ` मध्ये असताना मॅन्युअल क्रिया करताना उपयुक्त oversight` मोड. - इंडेक्सर कारवाईच्या अंमलबजावणीचा इतिहास पाहण्यासाठी कृती रांगेचे निरीक्षण करू शकतो आणि आवश्यक असल्यास क्रिया आयटमची अंमलबजावणी अयशस्वी झाल्यास पुन्हा मंजूर आणि अद्यतनित करू शकतो. कृती रांग रांगेत लावलेल्या आणि केलेल्या सर्व क्रियांचा इतिहास प्रदान करते. -डेटा मॉडेल: +Data model: ```graphql Type ActionInput { @@ -659,54 +659,54 @@ ActionType { } ``` -स्त्रोताकडून वापराचे उदाहरणः: +Example usage from source: ```bash -indexer indexer क्रिया सर्व मिळवा +graph indexer actions get all -indexer indexer क्रिया --status queed मिळवतात +graph indexer actions get --status queued -इंडेक्सर इंडेक्सर क्रिया रांग वाटप QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -इंडेक्सर इंडेक्सर क्रिया रांग रीअलोकेट QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -इंडेक्सर इंडेक्सर क्रिया रांग अनअलोकेट QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -इंडेक्सर इंडेक्सर क्रिया रद्द करा +graph indexer actions cancel -इंडेक्सर इंडेक्सर क्रिया 1 3 5 मंजूर करतात +graph indexer actions approve 1 3 5 -इंडेक्सर इंडेक्सर क्रिया मंजूर करतात +graph indexer actions execute approve ``` -लक्षात ठेवा की वाटप व्यवस्थापनासाठी समर्थित क्रिया प्रकारांमध्ये भिन्न इनपुट आवश्यकता आहेत: +Note that supported action types for allocation management have different input requirements: -- `वाटप करा` - विशिष्ट सबग्राफ उपयोजनासाठी स्टेक वाटप करा +- `Allocate` - allocate stake to a specific subgraph deployment - - आवश्यक क्रिया मापदंड: + - required action params: - deploymentID - amount -- `अनलोकेट` - वाटप बंद करा, इतरत्र पुन्हा वाटप करण्यासाठी स्टेक मोकळा करा +- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere - - आवश्यक क्रिया मापदंड: - - allocationID + - required action params: - allocationID + - deploymentID - optional action params: - poi - - बल (ग्राफ-नोड प्रदान केलेल्या गोष्टींशी जुळत नसले तरीही प्रदान केलेला POI वापरणारी शक्ती) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -- `पुन्हा वाटप करा` - आण्विकरित्या वाटप बंद करा आणि त्याच सबग्राफ उपयोजनासाठी नवीन वाटप उघडा +- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment - - आवश्यक क्रिया मापदंड: + - required action params: - allocationID - deploymentID - amount - optional action params: - poi - - बल (ग्राफ-नोड प्रदान केलेल्या गोष्टींशी जुळत नसले तरीही प्रदान केलेला POI वापरणारी शक्ती) + - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### किंमत मॉडेल +#### Cost models किंमत मॉडेल बाजार आणि क्वेरी गुणधर्मांवर आधारित क्वेरीसाठी डायनॅमिक किंमत प्रदान करतात. इंडेक्सर सेवा प्रत्येक सबग्राफसाठी गेटवेसह किंमत मॉडेल सामायिक करते ज्यासाठी ते प्रश्नांना उत्तर देऊ इच्छितात. गेटवे, या बदल्यात, प्रति क्वेरी इंडेक्सर निवड निर्णय घेण्यासाठी आणि निवडलेल्या इंडेक्सर्ससह पेमेंटची वाटाघाटी करण्यासाठी किंमत मॉडेलचा वापर करतात. @@ -716,7 +716,7 @@ indexer indexer क्रिया --status queed मिळवतात स्टेटमेंटमध्ये प्रेडिकेटचा समावेश असतो, जो ग्राफक्यूएल क्वेरीशी जुळण्यासाठी वापरला जातो आणि किंमत एक्स्प्रेशन ज्याचे मूल्यमापन केल्यावर दशांश GRT मध्ये खर्च येतो. क्वेरीच्या नामित युक्तिवाद स्थितीतील मूल्ये प्रेडिकेटमध्ये कॅप्चर केली जाऊ शकतात आणि अभिव्यक्तीमध्ये वापरली जाऊ शकतात. अभिव्यक्तीमध्ये प्लेसहोल्डर्ससाठी ग्लोबल देखील सेट आणि बदलले जाऊ शकतात. -उदाहरण खर्च मॉडेल: +Example cost model: ``` # हे विधान स्किप व्हॅल्यू कॅप्चर करते, @@ -729,7 +729,7 @@ indexer indexer क्रिया --status queed मिळवतात डीफॉल्ट => 0.1 * $SYSTEM_LOAD; ``` -वरील मॉडेल वापरून उदाहरण क्वेरी खर्च: +Example query costing using the above model: | Query | Price | | ---------------------------------------------------------------------------- | ------- | @@ -737,7 +737,7 @@ indexer indexer क्रिया --status queed मिळवतात | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### खर्चाचे मॉडेल लागू करणे +#### Applying the cost model किमतीचे मॉडेल इंडेक्सर CLI द्वारे लागू केले जातात, जे त्यांना डेटाबेसमध्ये साठवण्यासाठी इंडेक्सर एजंटच्या इंडेक्सर मॅनेजमेंट API कडे पाठवतात. इंडेक्सर सेवा नंतर त्यांना उचलेल आणि गेटवेला जेव्हा ते मागतील तेव्हा किंमत मॉडेल सर्व्ह करेल. @@ -746,41 +746,41 @@ indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## नेटवर्कशी संवाद साधत आहे +## Interacting with the network -### प्रोटोकॉलमध्ये भाग घ्या +### Stake in the protocol इंडेक्सर म्‍हणून नेटवर्कमध्‍ये सहभागी होण्‍याची पहिली पायरी म्हणजे प्रोटोकॉल, स्‍टेक फंड आणि (वैकल्पिकपणे) दैनंदिन प्रोटोकॉल संवादांसाठी ऑपरेटर पत्ता सेट करणे. _ **टीप**: या सूचनांच्या उद्देशांसाठी रीमिक्सचा वापर कराराच्या परस्परसंवादासाठी केला जाईल, परंतु आपल्या आवडीचे साधन वापरण्यास मोकळ्या मनाने ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/) >, आणि [MyCrypto](https://www.mycrypto.com/account) ही काही इतर ज्ञात साधने आहेत)._ एकदा इंडेक्सरने प्रोटोकॉलमध्ये GRT स्टेक केल्यानंतर, [इंडेक्सर घटक](/network/indexing#indexer-components) सुरू केले जाऊ शकतात आणि त्यांचे नेटवर्कशी परस्परसंवाद सुरू करू शकतात. -#### टोकन मंजूर करा +#### Approve tokens -1. ब्राउझरमध्ये [रीमिक्स अॅप](https://remix.ethereum.org/) उघडा +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. `फाइल एक्सप्लोरर` मध्ये [टोकन ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json) सह **GraphToken.abi** नावाची फाइल तयार करा. +2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. `GraphToken.abi` निवडलेल्या आणि संपादकात उघडून, रीमिक्स इंटरफेसमधील उपयोजन आणि `व्यवहार चालवा` विभागात स्विच करा. +3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. -4. वातावरणात `इंजेक्ट केलेले वेब3` निवडा आणि `खाते` अंतर्गत तुमचा इंडेक्सर पत्ता निवडा. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. 5. ग्राफटोकन कॉन्ट्रॅक्ट अॅड्रेस सेट करा - ग्राफटोकन कॉन्ट्रॅक्ट अॅड्रेस पेस्ट करा (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) `At Address` च्या पुढे आणि लागू करण्यासाठी `At Address` बटणावर क्लिक करा. 6. स्टॅकिंग कॉन्ट्रॅक्टला मंजुरी देण्यासाठी `मंजूर(खर्च, रक्कम)` फंक्शनला कॉल करा. स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेससह `spender` भरा (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) आणि `रक्कम` शेअर करण्यासाठी टोकनसह (wei मध्ये). -#### स्टेक टोकन +#### Stake tokens -1. ब्राउझरमध्ये [रीमिक्स अॅप](https://remix.ethereum.org/) उघडा +1. Open the [Remix app](https://remix.ethereum.org/) in a browser -2. स्टॅकिंग`फाइल एक्सप्लोरर` मध्ये staking A सह **Staking.abi** नावाची फाइल तयार करा. +2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. -3. `Staking.abi` निवडलेल्या आणि संपादकात उघडून, रीमिक्स इंटरफेसमधील `डिप्लॉय` आणि `व्यवहार चालवा` विभागात स्विच करा. +3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. -4. वातावरणात `इंजेक्ट केलेले वेब3` निवडा आणि `खाते` अंतर्गत तुमचा इंडेक्सर पत्ता निवडा. +4. Under environment select `Injected Web3` and under `Account` select your Indexer address. 5. स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेस सेट करा - स्टॅकिंग कॉन्ट्रॅक्ट अॅड्रेस पेस्ट करा (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) `At Address` च्या पुढे आणि अर्ज करण्यासाठी `At Address` बटणावर क्लिक करा. -6. प्रोटोकॉलमध्‍ये GRT स्‍टेक करण्‍यासाठी `stake()` ला कॉल करा. +6. Call `stake()` to stake GRT in the protocol. 7. (पर्यायी) इंडेक्सर्स त्यांच्या इंडेक्सर इन्फ्रास्ट्रक्चरसाठी ऑपरेटर होण्यासाठी दुसर्‍या पत्त्याला मंजूरी देऊ शकतात जेणेकरुन निधी नियंत्रित करणार्‍या कीज उपग्राफवर वाटप करणे आणि (सशुल्क) क्वेरी देणे यासारख्या दैनंदिन क्रिया करत असलेल्यांपासून वेगळे करणे शक्य आहे. ऑपरेटर पत्त्यासह ऑपरेटर कॉल `setOperator()` सेट करण्यासाठी. @@ -792,14 +792,10 @@ setDelegationParameters(950000, 600000, 500) ### The life of an allocation -इंडेक्सरद्वारे तयार केल्यानंतर निरोगी वाटप चार राज्यांतून जाते. +After being created by an Indexer a healthy allocation goes through four states. - **सक्रिय** - एकदा वाटप ऑन-चेन ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) तयार झाल्यावर ते **सक्रिय** मानले जाते. इंडेक्सरच्या स्वतःच्या आणि/किंवा नियुक्त केलेल्या स्टेकचा एक भाग सबग्राफ डिप्लॉयमेंटसाठी वाटप केला जातो, ज्यामुळे त्यांना इंडेक्सिंग रिवॉर्ड्सचा दावा करता येतो आणि त्या सबग्राफ डिप्लॉयमेंटसाठी क्वेरी करता येतात. इंडेक्सर एजंट इंडेक्सर नियमांवर आधारित वाटप तयार करण्याचे व्यवस्थापन करतो. - **बंद** - एकदा 1 युग निघून गेल्यावर इंडेक्सर एलोकेशन बंद करण्यास मोकळे आहे ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master /contracts/staking/Staking.sol#L873)) किंवा त्यांचा इंडेक्सर एजंट **maxAllocationEpochs** (सध्या २८ दिवस) नंतर वाटप आपोआप बंद करेल). जेव्हा इंडेक्सिंग (POI) च्या वैध पुराव्यासह वाटप बंद केले जाते तेव्हा त्यांची अनुक्रमणिका पुरस्कार इंडेक्सर आणि त्याच्या प्रतिनिधींना वितरित केले जातात (अधिक जाणून घेण्यासाठी खाली "बक्षिसे कशी वितरित केली जातात?" पहा). -- **अंतिम झाले** - एकदा वाटप बंद झाल्यानंतर एक विवाद कालावधी असतो ज्यानंतर वाटप **अंतिम** मानले जाते आणि ते होते क्‍वेरी फी सवलत क्‍लेम करण्‍यासाठी उपलब्‍ध आहे (क्लेम()). इंडेक्सर एजंट **अंतिमीकृत** वाटप शोधण्यासाठी नेटवर्कचे निरीक्षण करतो आणि ते कॉन्फिगर करण्यायोग्य (आणि पर्यायी) थ्रेशोल्डच्या वर असल्यास, **- वर दावा करतो. -अलोकेशन-क्लेम-थ्रेशोल्ड**. - -- **दावा केला** - वाटपाची अंतिम स्थिती; त्याने सक्रिय वाटप म्हणून त्याचा मार्ग चालवला आहे, सर्व पात्र बक्षिसे वितरीत केली गेली आहेत आणि त्याच्या क्वेरी फी सवलतीचा दावा केला गेला आहे. - ऑन-चेन वाटप तयार करण्यापूर्वी चेनहेडमध्ये सबग्राफ उपयोजन समक्रमित करण्यासाठी ऑफचेन सिंकिंग कार्यक्षमता वापरण्याची शिफारस इंडेक्सर्सना केली जाते. हे वैशिष्ट्य विशेषतः उपग्राफसाठी उपयुक्त आहे ज्यांना समक्रमित करण्यासाठी 28 पेक्षा जास्त काळ लागू शकतो किंवा अनिश्चितपणे अयशस्वी होण्याची काही शक्यता असते. diff --git a/website/pages/mr/new-chain-integration.mdx b/website/pages/mr/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/mr/new-chain-integration.mdx +++ b/website/pages/mr/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/mr/operating-graph-node.mdx b/website/pages/mr/operating-graph-node.mdx index de5bb03c9e30..fe9e6c5fb9f3 100644 --- a/website/pages/mr/operating-graph-node.mdx +++ b/website/pages/mr/operating-graph-node.mdx @@ -18,11 +18,11 @@ title: ऑपरेटिंग ग्राफ नोड ### नेटवर्क क्लायंट -नेटवर्क अनुक्रमित करण्यासाठी, ग्राफ नोडला EVM-सुसंगत JSON-RPC API द्वारे नेटवर्क क्लायंटमध्ये प्रवेश आवश्यक आहे. हे RPC एकाच क्लायंटशी कनेक्ट होऊ शकते किंवा ते अधिक जटिल सेटअप असू शकते जे अनेकांवर शिल्लक लोड करते. +In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**आगामी: नेटवर्क फायरहोसेस** - फायरहोस ही एक जीआरपीसी सेवा आहे जी क्रमबद्ध, तरीही काटा-अवेअर, ब्लॉक्सचा प्रवाह प्रदान करते, जी ग्राफच्या कोर डेव्हलपर्सद्वारे विकसित केले गेले आहे. स्केल ही सध्या इंडेक्सरची आवश्यकता नाही, परंतु इंडेक्सर्सना संपूर्ण नेटवर्क समर्थनापूर्वी तंत्रज्ञानाशी परिचित होण्यासाठी प्रोत्साहित केले जाते. Firehose बद्दल [येथे](https://firehose.streamingfast.io/) अधिक जाणून घ्या. +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### आयपीएफएस नोड्स @@ -30,11 +30,11 @@ While some subgraphs may just require a full node, some may have indexing featur ### प्रोमिथियस मेट्रिक्स सर्व्हर -मॉनिटरिंग आणि रिपोर्टिंग सक्षम करण्यासाठी, ग्राफ नोड वैकल्पिकरित्या प्रोमिथियस मेट्रिक्स सर्व्हरवर मेट्रिक्स लॉग करू शकतो. +To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### स्त्रोतापासून प्रारंभ करणे +### Getting started from source -#### पूर्वस्थिती स्थापित करा +#### Install prerequisites - **गंज** @@ -42,15 +42,15 @@ While some subgraphs may just require a full node, some may have indexing featur - **IPFS** -- **Ubuntu वापरकर्त्यांसाठी अतिरिक्त आवश्यकता** - Ubuntu वर ग्राफ नोड चालवण्यासाठी काही अतिरिक्त पॅकेजेसची आवश्यकता असू शकते. +- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### सेटअप +#### Setup -1. PostgreSQL डेटाबेस सर्व्हर सुरू करा +1. Start a PostgreSQL database server ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. [ग्राफ नोड](https://github.com/graphprotocol/graph-node) रेपो क्लोन करा आणि `कार्गो बिल्ड` चालवून स्त्रोत तयार करा +2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` -3. आता सर्व अवलंबन सेटअप झाले आहेत, आलेख नोड सुरू करा: +3. Now that all the dependencies are setup, start the Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -71,19 +71,19 @@ cargo run -p graph-node --release -- \ ### Kubernetes सह प्रारंभ करणे -कुबर्नेट्सचे संपूर्ण उदाहरण कॉन्फिगरेशन [इंडेक्सर रिपॉझिटरी](https://github.com/graphprotocol/indexer/tree/main/k8s) मध्ये आढळू शकते. +A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). ### बंदरे -जेव्हा ते चालू असते तेव्हा आलेख नोड खालील पोर्ट्स उघड करतो: +When it is running Graph Node exposes the following ports: -| बंदर | उद्देश | मार्ग | CLI युक्तिवाद | पर्यावरण परिवर्तनशील | +| बंदर | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP सर्व्हर
    (सबग्राफ क्वेरीसाठी) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (सबग्राफ सबस्क्रिप्शनसाठी) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (उपयोजन व्यवस्थापित करण्यासाठी) | / | --admin-port | - | -| 8030 | सबग्राफ अनुक्रमणिका स्थिती API | /graphql | --index-node-port | - | -| 8040 | प्रोमिथियस मेट्रिक्स | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | +| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | +| 8040 | Prometheus metrics | /metrics | --metrics-port | - | > **महत्त्वाचे**: पोर्ट सार्वजनिकपणे उघड करण्याबाबत सावधगिरी बाळगा - **प्रशासन पोर्ट** लॉक डाउन ठेवले पाहिजेत. यामध्ये ग्राफ नोड JSON-RPC एंडपॉइंटचा समावेश आहे. @@ -97,9 +97,9 @@ cargo run -p graph-node --release -- \ [TOML](https://toml.io/en/) कॉन्फिगरेशन फाईल CLI मध्ये उघड केलेल्या कॉन्फिगरेशनपेक्षा अधिक जटिल कॉन्फिगरेशन सेट करण्यासाठी वापरली जाऊ शकते. फाइलचे स्थान --config कमांड लाइन स्विचसह पास केले जाते. -> कॉन्फिगरेशन फाइल वापरताना, --postgres-url, --postgres-secondary-hosts, आणि --postgres-host-weights पर्याय वापरणे शक्य नाही. +> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. -किमान `config.toml` फाइल प्रदान केली जाऊ शकते; खालील फाइल --postgres-url कमांड लाइन पर्याय वापरण्यासारखी आहे: +A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: ```toml [store] @@ -116,7 +116,7 @@ Full documentation of `config.toml` can be found in the [Graph Node docs](https: ग्राफ नोड इंडेक्सिंग क्षैतिजरित्या स्केल करू शकते, अनुक्रमणिका विभाजित करण्यासाठी ग्राफ नोडची अनेक उदाहरणे चालवते आणि वेगवेगळ्या नोड्समध्ये क्वेरी करणे. हे फक्त स्टार्टअपवर वेगळ्या `node_id` सह कॉन्फिगर केलेले आलेख नोड्स चालवून केले जाऊ शकते (उदा. डॉकर कंपोझ फाइलमध्ये), जे नंतर `config.toml` फाइलमध्ये वापरले जाऊ शकते. [समर्पित क्वेरी नोड्स](#dedicated-query-nodes) निर्दिष्ट करण्यासाठी, [इनजेस्टर अवरोधित करा](#dedicated-block-ingestor), आणि [उपयोजन नियम](#deployment-rules) सह नोड्सवर सबग्राफ विभाजित करा. -> लक्षात घ्या की एकाधिक ग्राफ नोड्स सर्व समान डेटाबेस वापरण्यासाठी कॉन्फिगर केले जाऊ शकतात, जे स्वतःच शार्डिंगद्वारे क्षैतिजरित्या मोजले जाऊ शकतात. +> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. #### डिप्लॉयमेंट नियम @@ -154,14 +154,14 @@ indexers = [ #### समर्पित क्वेरी नोड्स -कॉन्फिगरेशन फाइलमध्ये खालील समाविष्ट करून नोड्स स्पष्टपणे क्वेरी नोड्स म्हणून कॉन्फिगर केले जाऊ शकतात: +Nodes can be configured to explicitly be query nodes by including the following in the configuration file: ```toml [general] query = "" ``` -कोणताही नोड ज्याचा --node-id रेग्युलर एक्सप्रेशनशी जुळतो तो फक्त क्वेरीला प्रतिसाद देण्यासाठी सेट केला जाईल. +Any node whose --node-id matches the regular expression will be set up to only respond to queries. #### शार्डिंगद्वारे डेटाबेस स्केलिंग @@ -220,7 +220,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] प्रगत कॉन्फिगरेशनसह स्केल केलेले इंडेक्सिंग सेटअप चालवणारे वापरकर्ते त्यांचे ग्राफ नोड्स Kubernetes सोबत व्यवस्थापित करण्याचा फायदा घेऊ शकतात. -- इंडेक्सर रेपॉजिटरीमध्ये [कुबर्नेट्स संदर्भाचे उदाहरण](https://github.com/graphprotocol/indexer/tree/main/k8s) आहे +- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) - [Launchpad](https://docs.graphops.xyz/launchpad/intro) हे GraphOps द्वारे देखरेख केलेल्या Kubernetes वर ग्राफ प्रोटोकॉल इंडेक्सर चालवण्यासाठी टूलकिट आहे. हे ग्राफ नोड उपयोजन व्यवस्थापित करण्यासाठी हेल्म चार्ट आणि एक CLI प्रदान करते. ### ग्राफ नोडचे व्यवस्थापन @@ -237,7 +237,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] ग्राफ नोड डिफॉल्टनुसार 8040 पोर्टवर प्रोमिथियस एंडपॉइंटद्वारे मेट्रिक्स प्रदान करतो. या मेट्रिक्सची कल्पना करण्यासाठी ग्राफानाचा वापर केला जाऊ शकतो. -इंडेक्सर रेपॉजिटरी [उदाहरण ग्राफना कॉन्फिगरेशन](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) प्रदान करते. +The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### ग्राफमन @@ -255,9 +255,9 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] संपूर्ण स्कीमा [येथे](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) उपलब्ध आहे. -#### अनुक्रमणिका कामगिरी +#### अनुक्रमणिका कार्यप्रदर्शन -अनुक्रमणिका प्रक्रियेचे तीन स्वतंत्र भाग आहेत: +There are three separate parts of the indexing process: - प्रदात्याकडून स्वारस्यपूर्ण इव्हेंट आणत आहे - योग्य हँडलर्ससह इव्हेंट्सवर प्रक्रिया करणे (यामध्ये राज्यासाठी साखळी कॉल करणे आणि स्टोअरमधून डेटा आणणे समाविष्ट असू शकते) @@ -267,14 +267,14 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] अनुक्रमणिका मंद होण्याची सामान्य कारणे: -- साखळीतून संबंधित इव्हेंट शोधण्यासाठी लागणारा वेळ (`ट्रेस_फिल्टर` वर अवलंबून असल्यामुळे कॉल हँडलर धीमे असू शकतात) -- हँडलरचा भाग म्हणून मोठ्या संख्येने `eth_calls` बनवणे -- अंमलबजावणी दरम्यान मोठ्या प्रमाणात स्टोअर संवाद -- स्टोअरमध्ये जतन करण्यासाठी मोठ्या प्रमाणात डेटा +- साखळीतून संबंधित इव्हेंट शोधण्यासाठी लागणारा वेळ (`trace_filter` वर अवलंबून राहिल्यामुळे कॉल हँडलर धीमे असू शकतात) +- Making large numbers of `eth_calls` as part of handlers +- A large amount of store interaction during execution +- A large amount of data to save to the store - प्रक्रिया करण्यासाठी मोठ्या संख्येने इव्हेंट - गर्दीच्या नोड्ससाठी स्लो डेटाबेस कनेक्शन वेळ - प्रदाता स्वतः साखळी डोके मागे घसरण -- प्रदात्याकडून चेन हेडवर नवीन पावत्या आणण्यात मंदता +- Slowness in fetching new receipts at the chain head from the provider सबग्राफ इंडेक्सिंग मेट्रिक्स इंडेक्सिंग मंदतेच्या मूळ कारणाचे निदान करण्यात मदत करू शकतात. काही प्रकरणांमध्ये, समस्या उपग्राफमध्येच असते, परंतु इतरांमध्ये, सुधारित नेटवर्क प्रदाते, कमी डेटाबेस विवाद आणि इतर कॉन्फिगरेशन सुधारणा अनुक्रमणिका कार्यप्रदर्शनात लक्षणीय सुधारणा करू शकतात. @@ -282,7 +282,7 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] अनुक्रमणिका दरम्यान सबग्राफ अयशस्वी होऊ शकतात, त्यांना अनपेक्षित डेटा आढळल्यास, काही घटक अपेक्षेप्रमाणे कार्य करत नसल्यास किंवा इव्हेंट हँडलर किंवा कॉन्फिगरेशनमध्ये काही बग असल्यास. अपयशाचे दोन सामान्य प्रकार आहेत: -- निर्धारक अपयश: हे असे अपयश आहेत जे पुन्हा प्रयत्नांनी सोडवले जाणार नाहीत +- Deterministic failures: these are failures which will not be resolved with retries - नॉन-डिटरमिनिस्टिक अपयश: हे प्रदात्याशी संबंधित समस्या किंवा काही अनपेक्षित ग्राफ नोड त्रुटींमुळे असू शकतात. जेव्हा नॉन-डिटरमिनिस्टिक अपयश येते, तेव्हा ग्राफ नोड अयशस्वी हँडलरचा पुन्हा प्रयत्न करेल, कालांतराने बॅक ऑफ होईल. काही प्रकरणांमध्ये इंडेक्सरद्वारे बिघाडाचे निराकरण केले जाऊ शकते (उदाहरणार्थ त्रुटी योग्य प्रकारचा प्रदाता नसल्यामुळे, आवश्यक प्रदाता जोडल्याने अनुक्रमणिका सुरू ठेवता येईल). तथापि, इतरांमध्ये, सबग्राफ कोडमध्ये बदल आवश्यक आहे. @@ -293,22 +293,22 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] प्रदात्याकडून रीफेचिंग जतन करण्यासाठी ग्राफ नोड स्टोअरमधील काही डेटा कॅश करतो. `eth_calls` च्या परिणामांप्रमाणेच ब्लॉक्स कॅशे केले जातात (नंतरचे विशिष्ट ब्लॉक म्हणून कॅश केले जातात). हे कॅशिंग थोड्याशा बदललेल्या सबग्राफच्या "रीसिंकिंग" दरम्यान अनुक्रमणिकेची गती नाटकीयरित्या वाढवू शकते. -तथापि, काही उदाहरणांमध्ये, जर इथरियम नोडने काही कालावधीसाठी चुकीचा डेटा प्रदान केला असेल, तर तो कॅशेमध्ये प्रवेश करू शकतो, ज्यामुळे चुकीचा डेटा किंवा अयशस्वी सबग्राफ होऊ शकतात. या प्रकरणात इंडेक्सर्स विषयुक्त कॅशे साफ करण्यासाठी `ग्राफमन` वापरू शकतात आणि नंतर प्रभावित सबग्राफ रिवाइंड करू शकतात, जे नंतर (आशेने) निरोगी प्रदात्याकडून नवीन डेटा आणतील. +However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. -ब्लॉक कॅशे विसंगतीचा संशय असल्यास, जसे की tx पावती गहाळ इव्हेंट: +If a block cache inconsistency is suspected, such as a tx receipt missing event: -1. साखळीचे नाव शोधण्यासाठी `ग्राफमन साखळी सूची`. +1. `graphman chain list` to find the chain name. 2. `ग्राफमन चेन चेक-ब्लॉक बाय-नंबर ` कॅशे केलेला ब्लॉक प्रदात्याशी जुळतो की नाही हे तपासेल आणि तसे नसल्यास कॅशेमधून ब्लॉक हटवेल. - 1. फरक असल्यास, `ग्राफमन चेन ट्रंकेट ` सह संपूर्ण कॅशे ट्रंकेट करणे अधिक सुरक्षित असू शकते. - 2. जर ब्लॉक प्रदात्याशी जुळत असेल, तर समस्या थेट प्रदात्याशी डीबग केली जाऊ शकते. + 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. + 2. If the block matches the provider, then the issue can be debugged directly against the provider. #### समस्या आणि त्रुटींची चौकशी करणे एकदा सबग्राफ इंडेक्स केला गेला की, इंडेक्सर्स सबग्राफच्या समर्पित क्वेरी एंडपॉइंटद्वारे क्वेरी सर्व्ह करण्याची अपेक्षा करू शकतात. जर इंडेक्सर महत्त्वपूर्ण क्वेरी व्हॉल्यूम प्रदान करण्याची आशा करत असेल तर, समर्पित क्वेरी नोडची शिफारस केली जाते आणि खूप जास्त क्वेरी व्हॉल्यूम असल्यास, इंडेक्सर्स प्रतिकृती शार्ड्स कॉन्फिगर करू शकतात जेणेकरुन क्वेरींचा अनुक्रमणिका प्रक्रियेवर परिणाम होणार नाही. -तथापि, समर्पित क्वेरी नोड आणि प्रतिकृतींसह देखील, विशिष्ट क्वेरी कार्यान्वित होण्यास बराच वेळ लागू शकतो आणि काही प्रकरणांमध्ये मेमरी वापर वाढतो आणि इतर वापरकर्त्यांसाठी क्वेरी वेळेवर नकारात्मक परिणाम होतो. +However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. -एक "सिल्व्हर बुलेट" नाही, परंतु मंद प्रश्नांना प्रतिबंध करण्यासाठी, निदान करण्यासाठी आणि हाताळण्यासाठी साधनांची श्रेणी आहे. +There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. ##### क्वेरी कॅशिंग @@ -340,6 +340,6 @@ provider = [ { label = "kovan", url = "http://..", features = [] } ] #### सबग्राफ काढून टाकत आहे -> ही नवीन कार्यक्षमता आहे, जी ग्राफ नोड 0.29.x मध्ये उपलब्ध असेल +> This is new functionality, which will be available in Graph Node 0.29.x काही ठिकाणी इंडेक्सरला दिलेला सबग्राफ काढायचा असेल. हे `ग्राफमॅन ड्रॉप` द्वारे सहजपणे केले जाऊ शकते, जे उपयोजन आणि सर्व अनुक्रमित डेटा हटवते. उपयोजन एकतर सबग्राफ नाव, IPFS हॅश `Qm..` किंवा डेटाबेस नेमस्पेस `sgdNNN` म्हणून निर्दिष्ट केले जाऊ शकते. पुढील दस्तऐवजीकरण [येथे](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) उपलब्ध आहे. diff --git a/website/pages/mr/publishing/publishing-a-subgraph.mdx b/website/pages/mr/publishing/publishing-a-subgraph.mdx index f1e16de71e82..8c097cf7c0d4 100644 --- a/website/pages/mr/publishing/publishing-a-subgraph.mdx +++ b/website/pages/mr/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ title: विकेंद्रीकृत नेटवर्कवर सब विकेंद्रीकृत नेटवर्कवर सबग्राफ प्रकाशित केल्याने ते [क्युरेटर्स](/network/curating) वर क्युरेटिंग सुरू करण्यासाठी आणि [इंडेक्सर्स](/network/indexing) साठी उपलब्ध होते. त्याची अनुक्रमणिका सुरू करण्यासाठी. -विकेंद्रीकृत नेटवर्कवर सबग्राफ कसा प्रकाशित करायचा याच्या वॉकथ्रूसाठी, [हा व्हिडिओ](https://youtu.be/HfDgC2oNnwo?t=580) पहा. + तुम्ही समर्थित नेटवर्कची सूची [येथे](/developing/supported-networks) शोधू शकता. diff --git a/website/pages/mr/querying/distributed-systems.mdx b/website/pages/mr/querying/distributed-systems.mdx index 18e252753438..d967c914134e 100644 --- a/website/pages/mr/querying/distributed-systems.mdx +++ b/website/pages/mr/querying/distributed-systems.mdx @@ -1,23 +1,23 @@ --- -title: वितरित प्रणाली +title: Distributed Systems --- -आलेख वितरित प्रणाली म्हणून लागू केलेला प्रोटोकॉल आहे. +The Graph is a protocol implemented as a distributed system. कनेक्शन अयशस्वी. विनंत्या क्रमाबाहेर येतात. आउट-ऑफ-सिंक घड्याळे असलेले भिन्न संगणक आणि राज्य संबंधित विनंत्यांवर प्रक्रिया करतात. सर्व्हर रीस्टार्ट. विनंत्यांदरम्यान पुनर्संचयित होतात. या समस्या सर्व वितरित प्रणालींमध्ये अंतर्भूत आहेत परंतु जागतिक स्तरावर कार्यरत असलेल्या प्रणालींमध्ये या समस्या अधिकच वाढल्या आहेत. -जर एखाद्या क्लायंटने री-ऑर्गनायझेशन दरम्यान नवीनतम डेटासाठी इंडेक्सरला मतदान केले तर काय होऊ शकते याचे हे उदाहरण विचारात घ्या. +Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. -1. इंडेक्सर ब्लॉक 8 अंतर्भूत करतो -2. ब्लॉक 8 साठी क्लायंटला विनंती केली -3. इंडेक्सर ब्लॉक 9 अंतर्भूत करतो -4. इंडेक्सर ब्लॉक 10A अंतर्भूत करतो -5. ब्लॉक 10A साठी क्लायंटला विनंती केली -6. इंडेक्सर 10B ची पुनर्रचना शोधतो आणि 10A परत करतो -7. ब्लॉक 9 साठी क्लायंटला विनंती केली -8. इंडेक्सर ब्लॉक 10B अंतर्भूत करतो -9. इंडेक्सर ब्लॉक 11 अंतर्भूत करतो -10. ब्लॉक 11 साठी क्लायंटला विनंती केली +1. Indexer ingests block 8 +2. Request served to the client for block 8 +3. Indexer ingests block 9 +4. Indexer ingests block 10A +5. Request served to the client for block 10A +6. Indexer detects reorg to 10B and rolls back 10A +7. Request served to the client for block 9 +8. Indexer ingests block 10B +9. Indexer ingests block 11 +10. Request served to the client for block 11 इंडेक्सरच्या दृष्टिकोनातून, गोष्टी तार्किकदृष्ट्या पुढे जात आहेत. वेळ पुढे सरकत आहे, जरी आम्हाला अंकल ब्लॉक मागे घ्यावा लागला आणि त्याच्या वरच्या बाजूने एकमताने ब्लॉक खेळला गेला. वाटेत, इंडेक्सर त्यावेळेस माहित असलेली नवीनतम स्थिती वापरून विनंती करतो. @@ -27,11 +27,11 @@ title: वितरित प्रणाली डिस्ट्रिब्युटेड सिस्टम्सच्या परिणामांद्वारे तर्क करणे कठीण आहे, परंतु निराकरण होऊ शकत नाही! तुम्हाला काही सामान्य वापर-केस नेव्हिगेट करण्यात मदत करण्यासाठी आम्ही API आणि नमुने स्थापित केले आहेत. खालील उदाहरणे ते नमुने स्पष्ट करतात परंतु तरीही मुख्य कल्पना अस्पष्ट न करण्यासाठी उत्पादन कोड (जसे की त्रुटी हाताळणे आणि रद्द करणे) आवश्यक असलेले तपशील स्पष्ट करतात. -## अद्ययावत डेटासाठी मतदान +## Polling for updated data आलेख `block: { number_gte: $minBlock }` API प्रदान करतो, जो प्रतिसाद `$minBlock` च्या समान किंवा उच्च ब्लॉकसाठी आहे याची खात्री करतो. विनंती `ग्राफ-नोड` उदाहरणासाठी केली असल्यास आणि किमान ब्लॉक अद्याप समक्रमित केला नसल्यास, `ग्राफ-नोड` त्रुटी दर्शवेल. जर `ग्राफ-नोड` ने किमान ब्लॉक समक्रमित केले असेल, तर ते नवीनतम ब्लॉकसाठी प्रतिसाद चालवेल. जर विनंती एजला केली असेल तर & नोड गेटवे, गेटवे कोणत्याही इंडेक्सर्सना फिल्टर करेल ज्यांनी अद्याप मिन ब्लॉक सिंक केलेला नाही आणि इंडेक्सरने सिंक केलेल्या नवीनतम ब्लॉकसाठी विनंती करेल. -लूपमध्ये डेटासाठी मतदान करताना वेळ कधीही मागे जाणार नाही याची खात्री करण्यासाठी आम्ही `number_gte` वापरू शकतो. येथे एक उदाहरण आहे: +We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: ```javascript /// Updates the protocol.paused variable to the latest @@ -74,11 +74,11 @@ async function updateProtocolPaused() { } ``` -## संबंधित वस्तूंचा संच आणत आहे +## Fetching a set of related items आणखी एक वापर-केस म्हणजे एक मोठा संच पुनर्प्राप्त करणे किंवा, अधिक सामान्यपणे, एकाधिक विनंत्यांमधून संबंधित आयटम पुनर्प्राप्त करणे. मतदान प्रकरणाच्या विपरीत (जेथे इच्छित सातत्य वेळेत पुढे जाणे होते), इच्छित सातत्य वेळेत एका बिंदूसाठी असते. -आमचे सर्व परिणाम एकाच ब्लॉकमध्ये पिन करण्यासाठी येथे आम्ही `block: { hash: $blockHash }` युक्तिवाद वापरू. +Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. ```javascript /// Gets a list of domain names from a single block using pagination diff --git a/website/pages/mr/querying/graphql-api.mdx b/website/pages/mr/querying/graphql-api.mdx index 5232b4e0a85c..b66f2673be24 100644 --- a/website/pages/mr/querying/graphql-api.mdx +++ b/website/pages/mr/querying/graphql-api.mdx @@ -2,15 +2,15 @@ title: GraphQL API --- -हे मार्गदर्शक GraphQL क्वेरी API चे स्पष्टीकरण देते जे आलेख प्रोटोकॉलसाठी वापरले जाते. +This guide explains the GraphQL Query API that is used for the Graph Protocol. -## प्रश्न +## Queries तुमच्या सबग्राफ स्कीमामध्ये तुम्ही `एंटिटीज` नावाचे प्रकार परिभाषित करता. प्रत्येक `संस्था` प्रकारासाठी, उच्च-स्तरीय `क्वेरी` प्रकारावर एक `संस्था` आणि `संस्था` फील्ड व्युत्पन्न केले जाईल. लक्षात ठेवा की ग्राफ वापरताना `क्वेरी` `graphql` क्वेरीच्या शीर्षस्थानी समाविष्ट करणे आवश्यक नाही. -### उदाहरणे +### Examples -तुमच्या स्कीमामध्ये परिभाषित केलेल्या एका `टोकन` घटकासाठी क्वेरी: +Query for a single `Token` entity defined in your schema: ```graphql { @@ -21,9 +21,9 @@ title: GraphQL API } ``` -> **टीप:** एका घटकासाठी क्वेरी करताना, `id` फील्ड आवश्यक आहे आणि ती एक स्ट्रिंग असणे आवश्यक आहे. +> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. -सर्व `टोकन` घटकांची चौकशी करा: +Query all `Token` entities: ```graphql { @@ -34,9 +34,9 @@ title: GraphQL API } ``` -### वर्गीकरण +### Sorting -संग्रहाची क्वेरी करताना, विशिष्ट गुणधर्मानुसार क्रमवारी लावण्यासाठी `orderBy` पॅरामीटर वापरला जाऊ शकतो. याव्यतिरिक्त, `ऑर्डर डायरेक्शन` चा वापर क्रमवारीची दिशा, चढत्यासाठी `asc` किंवा उतरत्यासाठी `desc` निर्दिष्ट करण्यासाठी केला जाऊ शकतो. +When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. #### उदाहरण @@ -49,11 +49,11 @@ title: GraphQL API } ``` -#### नेस्टेड एंटिटी सॉर्टिंगचे उदाहरण +#### Example for nested entity sorting -ग्राफ नोड [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) नुसार नेस्टेड घटकांच्या आधारावर घटकांची क्रमवारी लावली जाऊ शकते. +As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. -खालील उदाहरणामध्ये, आम्ही टोकन त्यांच्या मालकाच्या नावानुसार क्रमवारी लावतो: +In the following example, we sort the tokens by the name of their owner: ```graphql { @@ -66,19 +66,19 @@ title: GraphQL API } ``` -> सध्या, तुम्ही `@entity` आणि `@derivedFrom` फील्डवर एक-स्तरीय खोल `स्ट्रिंग` किंवा `आयडी` प्रकारांनुसार क्रमवारी लावू शकता. दुर्दैवाने, [एका स्तर-खोल घटकांवरील इंटरफेसनुसार क्रमवारी लावणे](https://github.com/graphprotocol/graph-node/pull/4058), अॅरे आणि नेस्टेड घटक असलेल्या फील्डनुसार क्रमवारी लावणे अद्याप समर्थित नाही. +> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. -### पृष्ठांकन +### Pagination -संग्रहाची क्वेरी करताना, संग्रहाच्या सुरुवातीपासून पृष्ठांकन करण्यासाठी `प्रथम` पॅरामीटर वापरला जाऊ शकतो. हे लक्षात घेण्यासारखे आहे की डीफॉल्ट क्रमवारी आयडी नुसार चढत्या अल्फान्यूमेरिक क्रमाने आहे, निर्मिती वेळेनुसार नाही. +When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. -पुढे, `वगळा` पॅरामीटर घटक वगळण्यासाठी आणि पृष्ठांकन करण्यासाठी वापरले जाऊ शकते. उदा. `first:100` पहिल्या 100 संस्था दाखवते आणि `first:100, skip:100` पुढील 100 संस्था दाखवते. +Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. -क्वेरींनी खूप मोठी `वगळा` मूल्ये वापरणे टाळले पाहिजे कारण ते सामान्यतः खराब कार्य करतात. मोठ्या संख्येने आयटम पुनर्प्राप्त करण्यासाठी, शेवटच्या उदाहरणात दर्शविल्याप्रमाणे एखाद्या विशेषतावर आधारित घटकांद्वारे पृष्ठ करणे अधिक चांगले आहे. +Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. -#### `प्रथम` वापरून उदाहरण +#### Example using `first` -पहिल्या 10 टोकनसाठी क्वेरी करा: +Query the first 10 tokens: ```graphql { @@ -89,11 +89,11 @@ title: GraphQL API } ``` -संकलनाच्या मध्यभागी असलेल्या घटकांच्या गटांसाठी क्वेरी करण्यासाठी, सुरुवातीपासून सुरू होणारी विशिष्ट संख्या वगळण्यासाठी `वगळा` पॅरामीटरचा वापर `प्रथम` पॅरामीटरसह केला जाऊ शकतो. संग्रहाचे. +To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. -#### `प्रथम` आणि `वगळा` वापरण्याचे उदाहरण +#### Example using `first` and `skip` -क्वेरी 10 `टोकन` संस्था, संकलनाच्या सुरुवातीपासून 10 ठिकाणांद्वारे ऑफसेट: +Query 10 `Token` entities, offset by 10 places from the beginning of the collection: ```graphql { @@ -104,9 +104,9 @@ title: GraphQL API } ``` -#### `first` आणि `id_ge` वापरण्याचे उदाहरण +#### Example using `first` and `id_ge` -जर एखाद्या क्लायंटला मोठ्या संख्येने संस्था पुनर्प्राप्त करण्याची आवश्यकता असेल, तर विशेषतावर आधारित क्वेरी करणे आणि त्या गुणधर्माद्वारे फिल्टर करणे अधिक कार्यक्षम आहे. उदाहरणार्थ, क्लायंट ही क्वेरी वापरून मोठ्या संख्येने टोकन पुनर्प्राप्त करेल: +If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: ```graphql query manyTokens($lastID: String) { @@ -117,15 +117,15 @@ query manyTokens($lastID: String) { } ``` -प्रथमच, ते `lastID = ""` सह क्वेरी पाठवेल आणि त्यानंतरच्या विनंत्यांसाठी शेवटच्या `id` विशेषतावर `lastID` सेट करेल मागील विनंतीमध्ये अस्तित्व. वाढत्या `वगळा` मूल्यांचा वापर करण्यापेक्षा हा दृष्टीकोन लक्षणीयरीत्या चांगली कामगिरी करेल. +The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. -### फिल्टरिंग +### Filtering -वेगवेगळ्या गुणधर्मांसाठी फिल्टर करण्यासाठी तुम्ही तुमच्या क्वेरींमध्ये `where` पॅरामीटर वापरू शकता. तुम्ही `कुठे` पॅरामीटरमध्ये बहुविध मूल्यांवर फिल्टर करू शकता. +You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. -#### `कुठे` वापरण्याचे उदाहरण +#### Example using `where` -`अयशस्वी` परिणामासह क्वेरी आव्हाने: +Query challenges with `failed` outcome: ```graphql { @@ -139,9 +139,9 @@ query manyTokens($lastID: String) { } ``` -मूल्याच्या तुलनेसाठी तुम्ही `_gt`, `_lte` सारखे प्रत्यय वापरू शकता: +You can use suffixes like `_gt`, `_lte` for value comparison: -#### श्रेणी फिल्टरिंगचे उदाहरण +#### Example for range filtering ```graphql { @@ -153,11 +153,11 @@ query manyTokens($lastID: String) { } ``` -#### ब्लॉक फिल्टरिंगचे उदाहरण +#### Example for block filtering -तुम्ही `_change_block(number_gte: Int)` द्वारे देखील घटक फिल्टर करू शकता - हे निर्दिष्ट ब्लॉकमध्ये किंवा नंतर अपडेट केलेल्या घटकांना फिल्टर करते. +You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. -तुम्ही फक्त बदललेल्या घटकांना आणण्याचा विचार करत असाल तर हे उपयुक्त ठरू शकते, उदाहरणार्थ तुम्ही शेवटच्या वेळी मतदान केले तेव्हापासून. किंवा वैकल्पिकरित्या तुमच्या सबग्राफमध्ये संस्था कशा बदलत आहेत याची तपासणी करणे किंवा डीबग करणे उपयुक्त ठरू शकते (ब्लॉक फिल्टरसह एकत्रित केल्यास, तुम्ही विशिष्ट ब्लॉकमध्ये बदललेल्या घटकांना वेगळे करू शकता). +This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). ```graphql { @@ -169,11 +169,11 @@ query manyTokens($lastID: String) { } ``` -#### नेस्टेड एंटिटी फिल्टरिंगचे उदाहरण +#### Example for nested entity filtering -नेस्टेड घटकांच्या आधारे फिल्टरिंग `_` प्रत्यय असलेल्या फील्डमध्ये शक्य आहे. +Filtering on the basis of nested entities is possible in the fields with the `_` suffix. -तुम्ही फक्त अशाच संस्था आणण्याचा विचार करत असाल ज्यांच्या बाल-स्तरीय संस्था प्रदान केलेल्या अटी पूर्ण करतात. +This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. ```graphql { @@ -187,13 +187,13 @@ query manyTokens($lastID: String) { } ``` -#### तार्किक ऑपरेटर +#### Logical operators ग्राफ नोड नुसार [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) तुम्ही एकाधिक गटबद्ध करू शकता एकापेक्षा जास्त निकषांवर आधारित परिणाम फिल्टर करण्यासाठी `आणि` किंवा `किंवा` ऑपरेटर वापरून समान `जिथे` युक्तिवादात पॅरामीटर्स. -##### `आणि` ऑपरेटर +##### `AND` Operator -खालील उदाहरणामध्ये, आम्ही `परिणाम` `यशस्वी` आणि `संख्या` `100` पेक्षा जास्त किंवा त्यापेक्षा जास्त असलेल्या आव्हानांसाठी फिल्टर करत आहोत. +In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. ```graphql { @@ -207,7 +207,7 @@ query manyTokens($lastID: String) { } ``` -> **सिंटॅक्टिक शुगर:** तुम्ही स्वल्पविरामाने विभक्त केलेला उप-अभिव्यक्ती पास करून `आणि` ऑपरेटर काढून वरील क्वेरी सोपी करू शकता. +> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. > > ```graphql > { @@ -223,7 +223,7 @@ query manyTokens($lastID: String) { ##### `OR` Operator -खालील उदाहरणामध्ये, आम्ही `परिणाम` `यशस्वी` किंवा `संख्या` `100` पेक्षा जास्त किंवा त्यापेक्षा जास्त असलेल्या आव्हानांसाठी फिल्टर करत आहोत. +In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. ```graphql { @@ -237,11 +237,11 @@ query manyTokens($lastID: String) { } ``` -> **टीप**: क्वेरी तयार करताना, `किंवा` ऑपरेटर वापरण्याच्या कार्यप्रदर्शन प्रभावाचा विचार करणे महत्त्वाचे आहे. शोध परिणामांचा विस्तार करण्यासाठी `किंवा` हे एक उपयुक्त साधन असू शकते, परंतु त्याची महत्त्वपूर्ण किंमत देखील असू शकते. `किंवा` सह मुख्य समस्यांपैकी एक ही आहे की यामुळे क्वेरी मंद होऊ शकतात. याचे कारण असे की `किंवा` ला एकाधिक अनुक्रमणिकांद्वारे डेटाबेस स्कॅन करणे आवश्यक आहे, ही एक वेळ घेणारी प्रक्रिया असू शकते. या समस्या टाळण्यासाठी, विकासक वापरण्याची शिफारस केली जाते आणि त्याऐवजी किंवा जेव्हा शक्य असेल तेव्हा ऑपरेटर वापरतात. हे अधिक अचूक फिल्टरिंगसाठी अनुमती देते आणि जलद, अधिक अचूक क्वेरीस नेऊ शकते. +> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. -#### सर्व फिल्टर +#### All Filters -पॅरामीटर प्रत्ययांची संपूर्ण यादी: +Full list of parameter suffixes: ``` _ @@ -266,21 +266,21 @@ _not_ends_with _not_ends_with_nocase ``` -> कृपया लक्षात घ्या की काही प्रत्यय केवळ विशिष्ट प्रकारांसाठी समर्थित आहेत. उदाहरणार्थ, `बूलियन` फक्त `_not`, `_in` आणि `_not_in` चे समर्थन करते, परंतु `_` केवळ ऑब्जेक्ट आणि इंटरफेस प्रकारांसाठी उपलब्ध आहे. +> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. -याव्यतिरिक्त, खालील जागतिक फिल्टर `कुठे` युक्तिवादाचा भाग म्हणून उपलब्ध आहेत: +In addition, the following global filters are available as part of `where` argument: ```gr _change_block(number_gte: Int) ``` -### वेळ-प्रवास प्रश्न +### Time-travel queries -तुम्‍ही तुमच्‍या एंटिटीच्‍या स्‍थितीबद्दल क्‍वेरी करू शकता, जे डीफॉल्‍ट असलेल्‍या नवीनतम ब्लॉकसाठीच नाही, तर भूतकाळातील एका अनियंत्रित ब्लॉकसाठी देखील आहे. क्वेरी ज्या ब्लॉकवर व्हायला हवी तो ब्लॉक नंबर किंवा त्याच्या ब्लॉक हॅशद्वारे क्वेरीच्या टॉप लेव्हल फील्डमध्ये `ब्लॉक` युक्तिवाद समाविष्ट करून निर्दिष्ट केला जाऊ शकतो. +You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. -अशा क्वेरीचा परिणाम कालांतराने बदलणार नाही, म्हणजे, एखाद्या विशिष्ट भूतकाळातील ब्लॉकवर क्वेरी केल्याने तोच परिणाम मिळेल, जेव्हा तो कार्यान्वित केला जातो तेव्हा अपवाद वगळता, जर तुम्ही साखळीच्या अगदी जवळ असलेल्या ब्लॉकवर क्वेरी केली तर, जर तो ब्लॉक मुख्य साखळीवर नसला आणि साखळीची पुनर्रचना झाली तर परिणाम बदलू शकतो. एकदा ब्लॉकला अंतिम मानले जाऊ शकते, क्वेरीचा निकाल बदलणार नाही. +The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. -लक्षात ठेवा की सध्याची अंमलबजावणी अजूनही काही मर्यादांच्या अधीन आहे ज्यामुळे या हमींचे उल्लंघन होऊ शकते. अंमलबजावणी नेहमी सांगू शकत नाही की दिलेला ब्लॉक हॅश मुख्य शृंखलावर अजिबात नाही, किंवा ब्लॉकसाठी ब्लॉक हॅशद्वारे केलेल्या क्वेरीचा परिणाम ज्याला अंतिम मानले जाऊ शकत नाही अशा ब्लॉकच्या पुनर्रचनावर परिणाम होऊ शकतो. प्रश्न जेव्हा ब्लॉक अंतिम असतो आणि मुख्य शृंखलावर असल्याचे ओळखले जाते तेव्हा ते ब्लॉक हॅशद्वारे क्वेरीच्या परिणामांवर परिणाम करत नाहीत. [हा मुद्दा](https://github.com/graphprotocol/graph-node/issues/1405) या मर्यादा काय आहेत हे तपशीलवार स्पष्ट करते. +Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. #### उदाहरण @@ -296,7 +296,7 @@ _change_block(number_gte: Int) } ``` -ही क्वेरी `चॅलेंज` संस्था आणि त्यांच्याशी संबंधित `अॅप्लिकेशन` संस्था परत करेल, कारण ते ब्लॉक क्रमांक 8,000,000 वर प्रक्रिया केल्यानंतर थेट अस्तित्वात होते. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. #### उदाहरण @@ -312,26 +312,26 @@ _change_block(number_gte: Int) } ``` -ही क्वेरी `चॅलेंज` संस्था आणि त्यांच्याशी संबंधित `Application` घटक परत करेल, कारण ते दिलेल्या हॅशसह ब्लॉकवर प्रक्रिया केल्यानंतर थेट अस्तित्वात होते. +This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. -### पूर्ण मजकूर शोध क्वेरी +### Fulltext Search Queries -फुलटेक्स्ट शोध क्वेरी फील्ड एक अभिव्यक्त मजकूर शोध API प्रदान करतात जे सबग्राफ स्कीमामध्ये जोडले जाऊ शकतात आणि सानुकूलित केले जाऊ शकतात. तुमच्या सबग्राफमध्ये पूर्ण मजकूर शोध जोडण्यासाठी [पूर्ण मजकूर शोध फील्ड परिभाषित करणे](/developing/creating-a-subgraph#defining-fulltext-search-fields) पहा. +Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. -पूर्ण मजकूर शोध क्वेरींमध्ये शोध संज्ञा पुरवण्यासाठी एक आवश्यक फील्ड आहे, `text`. या `text` शोध क्षेत्रात वापरण्यासाठी अनेक विशेष फुलटेक्स्ट ऑपरेटर उपलब्ध आहेत. +Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. -पूर्ण मजकूर शोध ऑपरेटर: +Fulltext search operators: -| चिन्ह | ऑपरेटर्स | वर्णन | +| Symbol | Operator | वर्णन | | --- | --- | --- | -| `&` | `आणि` | प्रदान केलेल्या सर्व संज्ञांचा समावेश असलेल्या घटकांसाठी फिल्टरमध्ये एकाधिक शोध संज्ञा एकत्र करण्यासाठी | -| | | `किंवा` | किंवा ऑपरेटरने विभक्त केलेल्या एकाधिक शोध संज्ञांसह क्वेरी प्रदान केलेल्या कोणत्याही अटींशी जुळणारे सर्व घटक परत करतील | -| `<->` | `द्वारे अनुसरण करा` | दोन शब्दांमधील अंतर निर्दिष्ट करा. | -| `:*` | `Prefix` | उपसर्ग जुळणारे शब्द शोधण्यासाठी उपसर्ग शोध संज्ञा वापरा (2 वर्ण आवश्यक.) | +| `&` | `आणि` | For combining multiple search terms into a filter for entities that include all of the provided terms | +| | | `किंवा` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | +| `<->` | `द्वारे अनुसरण करा` | Specify the distance between two words. | +| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | -#### उदाहरणे +#### Examples -`किंवा` ऑपरेटर वापरून, ही क्वेरी त्यांच्या फुलटेक्स्ट फील्डमध्ये "अराजकता" किंवा "क्रम्पेट" च्या भिन्नतेसह ब्लॉग घटकांना फिल्टर करेल. +Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. ```graphql { @@ -344,7 +344,7 @@ _change_block(number_gte: Int) } ``` -`फॉलो बाय` ऑपरेटर पूर्ण मजकूर दस्तऐवजांमध्ये विशिष्ट अंतराशिवाय शब्द निर्दिष्ट करतो. खालील क्वेरी "विकेंद्रित" आणि त्यानंतर "तत्वज्ञान" च्या भिन्नतेसह सर्व ब्लॉग परत करेल +The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" ```graphql { @@ -357,7 +357,7 @@ _change_block(number_gte: Int) } ``` -अधिक जटिल फिल्टर बनवण्यासाठी फुलटेक्स्ट ऑपरेटर एकत्र करा. सबब शोध ऑपरेटरसह, या उदाहरण क्वेरीच्या फॉलोसह एकत्रितपणे "lou" आणि त्यानंतर "music" ने सुरू होणार्‍या शब्दांसह सर्व ब्लॉग घटकांशी जुळेल. +Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". ```graphql { @@ -370,27 +370,27 @@ _change_block(number_gte: Int) } ``` -### प्रमाणीकरण +### Validation -ग्राफ नोड [स्पेसिफिकेशन-आधारित](https://spec.graphql.org/October2021/#sec-Validation) [graphql-tools-rs](https:// वापरून प्राप्त केलेल्या GraphQL क्वेरीचे प्रमाणीकरण लागू करते github.com/dotansimha/graphql-tools-rs#validation-rules), जे [graphql-js संदर्भ अंमलबजावणी](https://github.com/graphql/graphql-js वर आधारित आहे /tree/main/src/validation). प्रमाणीकरण नियमात अयशस्वी होणाऱ्या क्वेरी प्रमाणित त्रुटीसह करतात - अधिक जाणून घेण्यासाठी [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) ला भेट द्या. +Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. -## स्कीमा +## Schema -तुमच्या डेटा स्रोताचा स्कीमा--म्हणजेच, क्वेरीसाठी उपलब्ध असलेले अस्तित्व प्रकार, मूल्ये आणि संबंध--[GraphQL इंटरफेस व्याख्या भाषा (IDL)](https://facebook.github.io/graphql/draft/# द्वारे परिभाषित केले जातात. sec-Type-System). +The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL स्कीमा सामान्यतः `क्वेरी`, `सदस्यता` आणि `उत्परिवर्तन` साठी रूट प्रकार परिभाषित करतात. आलेख फक्त `क्वेरी` ला सपोर्ट करतो. तुमच्या सबग्राफसाठी मूळ `क्वेरी` प्रकार तुमच्या सबग्राफ मॅनिफेस्टमध्ये समाविष्ट असलेल्या GraphQL स्कीमामधून आपोआप व्युत्पन्न केला जातो. +GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. -> **टीप:** आमचे API उत्परिवर्तन उघड करत नाही कारण विकासकांनी त्यांच्या अनुप्रयोगांमधून अंतर्निहित ब्लॉकचेनवर थेट व्यवहार करणे अपेक्षित आहे. +> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. -### संस्था +### Entities -तुमच्या स्कीमामधील `@entity` निर्देशांसह सर्व GraphQL प्रकारांना संस्था म्हणून मानले जाईल आणि त्यांच्याकडे `ID` फील्ड असणे आवश्यक आहे. +All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. -> **टीप:** सध्या, तुमच्या स्कीमामधील सर्व प्रकारांमध्ये `@entity` निर्देश असणे आवश्यक आहे. भविष्यात, आम्ही `@entity` निर्देशांशिवाय प्रकारांना व्हॅल्यू ऑब्जेक्ट्स म्हणून हाताळू, परंतु हे अद्याप समर्थित नाही. +> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. -### सबग्राफ मेटाडेटा +### Subgraph Metadata -सर्व सबग्राफमध्ये स्वयं-व्युत्पन्न `_Meta__` ऑब्जेक्ट असतो, जो सबग्राफ मेटाडेटामध्ये प्रवेश प्रदान करतो. हे खालीलप्रमाणे विचारले जाऊ शकते: +All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: ```graphQL { @@ -406,14 +406,14 @@ GraphQL स्कीमा सामान्यतः `क्वेरी`, ` } ``` -जर ब्लॉक प्रदान केला असेल तर, मेटाडेटा त्या ब्लॉकचा असेल, जर नवीनतम अनुक्रमित ब्लॉक वापरला नसेल. प्रदान केल्यास, ब्लॉक सबग्राफच्या स्टार्ट ब्लॉक नंतर असणे आवश्यक आहे आणि सर्वात अलीकडील अनुक्रमित ब्लॉकपेक्षा कमी किंवा समान असणे आवश्यक आहे. +If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. -`डिप्लॉयमेंट` हा एक अद्वितीय आयडी आहे, जो `subgraph.yaml` फाइलच्या IPFS CID शी संबंधित आहे. +`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. `block` नवीनतम ब्लॉकबद्दल माहिती प्रदान करते (`_meta` ला पास केलेल्या कोणत्याही ब्लॉक मर्यादा लक्षात घेऊन): -- हॅश: ब्लॉकचा हॅश -- क्रमांक: ब्लॉक क्रमांक -- टाइमस्टॅम्प: ब्लॉकचा टाइमस्टॅम्प, उपलब्ध असल्यास (हे सध्या फक्त ईव्हीएम नेटवर्क्सच्या अनुक्रमणिकेच्या सबग्राफसाठी उपलब्ध आहे) +- hash: the hash of the block +- number: the block number +- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) -`hasIndexingErrors` हे बुलियन आहे जे काही मागील ब्लॉकमध्ये सबग्राफमध्ये अनुक्रमणिक त्रुटी आढळल्या आहेत की नाही हे ओळखण्यासाठी +`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block diff --git a/website/pages/mr/querying/managing-api-keys.mdx b/website/pages/mr/querying/managing-api-keys.mdx index d7d30eb50d80..17538865c184 100644 --- a/website/pages/mr/querying/managing-api-keys.mdx +++ b/website/pages/mr/querying/managing-api-keys.mdx @@ -1,26 +1,26 @@ --- -title: तुमच्या API की व्यवस्थापित करणे +title: Managing your API keys --- तुम्ही dapp डेव्हलपर किंवा सबग्राफ डेव्हलपर असलात तरीही, तुम्हाला तुमच्या API की व्यवस्थापित कराव्या लागतील. तुमच्यासाठी सबग्राफ्स क्वेरी करण्यास सक्षम असणे महत्त्वाचे आहे कारण API की अनुप्रयोग सेवांमधील कनेक्शन वैध आणि अधिकृत असल्याची खात्री करतात. यात अंतिम वापरकर्ता आणि अनुप्रयोग वापरून डिव्हाइस प्रमाणीकृत करणे समाविष्ट आहे. -स्टुडिओ विद्यमान API की सूचीबद्ध करेल, ज्यामुळे तुम्हाला त्या व्यवस्थापित करण्याची किंवा हटवण्याची क्षमता मिळेल. +The Studio will list out existing API keys, which will give you the ability to manage or delete them. -1. **विहंगावलोकन** विभाग तुम्हाला याची अनुमती देईल: - - तुमचे मुख्य नाव संपादित करा - - API की पुन्हा निर्माण करा - - आकडेवारीसह API की चा वर्तमान वापर पहा: - - प्रश्नांची संख्या - - GRT ची रक्कम खर्च केली +1. The **Overview** section will allow you to: + - Edit your key name + - Regenerate API keys + - View the current usage of the API key with stats: + - Number of queries + - Amount of GRT spent 2. **सुरक्षा** अंतर्गत, तुम्‍हाला तुमच्‍या API की वर असल्‍याच्‍या नियंत्रणच्‍या स्‍तरावर अवलंबून सुरक्षा सेटिंग्‍ज निवडण्‍यात सक्षम असाल. या विभागात, तुम्ही हे करू शकता: - - तुमची API की वापरण्यासाठी अधिकृत डोमेन नावे पहा आणि व्यवस्थापित करा - - तुमच्‍या API की सह क्‍वेरी करता येणारे सबग्राफ असाइन करा + - View and manage the domain names authorized to use your API key + - Assign subgraphs that can be queried with your API key 3. **इंडेक्सर प्राधान्य** अंतर्गत, तुमची API की वापरल्या जाणार्‍या सबग्राफ अनुक्रमित करणाऱ्या इंडेक्सर्ससाठी तुम्ही भिन्न प्राधान्ये सेट करू शकाल. तुम्ही यापैकी प्रत्येकासाठी 5 गुणांपर्यंत नियुक्त करू शकता: - **वेगवान गती**: क्वेरी आणि इंडेक्सरकडून मिळालेला प्रतिसाद यामधील वेळ. तुम्ही हे महत्त्वाचे म्हणून चिन्हांकित केल्यास आम्ही जलद इंडेक्सर्ससाठी ऑप्टिमाइझ करू. - **सर्वात कमी किंमत**: प्रति क्वेरी भरलेली रक्कम. तुम्ही हे महत्त्वाचे म्हणून चिन्हांकित केल्यास आम्ही कमी खर्चिक इंडेक्सर्ससाठी ऑप्टिमाइझ करू. - **डेटा फ्रेशनेस**: तुम्ही क्वेरी करत असलेल्या सबग्राफसाठी इंडेक्सरने किती अलीकडील ब्लॉकवर प्रक्रिया केली आहे. जर तुम्ही हे महत्त्वाचे म्हणून चिन्हांकित केले तर आम्ही सर्वात नवीन डेटासह अनुक्रमणिका शोधण्यासाठी ऑप्टिमाइझ करू. - **आर्थिक सुरक्षा**: इंडेक्सरने तुमच्या क्वेरीला चुकीचा प्रतिसाद दिल्यास GRT ची रक्कम गमावू शकते. तुम्ही हे महत्त्वाचे म्हणून चिन्हांकित केल्यास आम्ही मोठ्या भागभांडवलांसह इंडेक्सर्ससाठी ऑप्टिमाइझ करू. 4. **बजेट** अंतर्गत, तुम्ही प्रति क्वेरी कमाल किंमत अपडेट करू शकाल. लक्षात घ्या की आमच्याकडे त्यासाठी डायनॅमिक सेटिंग आहे जी व्हॉल्यूम डिस्काउंटिंग अल्गोरिदमवर आधारित आहे. **तुम्हाला विशिष्ट समस्या येत नाही तोपर्यंत आम्ही डीफॉल्ट सेटिंग्ज वापरण्याची जोरदार शिफारस करतो.** अन्यथा, तुम्ही "सानुकूल कमाल बजेट सेट करा" अंतर्गत ते अपडेट करू शकता. या पृष्ठावर तुम्ही भिन्न KPI (GRT आणि USD मध्ये) देखील पाहू शकता: - - प्रति क्वेरी सरासरी खर्च - - कमाल किमतीपेक्षा अयशस्वी क्वेरी - - सर्वात महाग प्रश्न + - Average cost per query + - Failed queries over max price + - Most expensive query diff --git a/website/pages/mr/querying/querying-best-practices.mdx b/website/pages/mr/querying/querying-best-practices.mdx index 625a1e28fb35..5932a55da27e 100644 --- a/website/pages/mr/querying/querying-best-practices.mdx +++ b/website/pages/mr/querying/querying-best-practices.mdx @@ -1,8 +1,8 @@ --- -title: सर्वोत्तम पद्धतींची चौकशी करणे +title: Querying Best Practices --- -आलेख ब्लॉकचेन वरून डेटा क्वेरी करण्यासाठी विकेंद्रित मार्ग प्रदान करतो. +The Graph provides a decentralized way to query data from blockchains. ग्राफ नेटवर्कचा डेटा GraphQL API द्वारे उघड केला जातो, ज्यामुळे GraphQL भाषेसह डेटाची क्वेरी करणे सोपे होते. @@ -10,21 +10,16 @@ title: सर्वोत्तम पद्धतींची चौकशी --- -## GraphQL API क्वेरी करत आहे +## Querying a GraphQL API -### ग्राफक्यूएल क्वेरीचे शरीरशास्त्र +### The anatomy of a GraphQL query REST API च्या विपरीत, GraphQL API एका स्कीमावर तयार केले जाते जे कोणत्या क्वेरी पूर्ण केल्या जाऊ शकतात हे परिभाषित करते. -उदाहरणार्थ, `टोकन` क्वेरी वापरून टोकन मिळवण्यासाठीची क्वेरी खालीलप्रमाणे दिसेल: +For example, a query to get a token using the `token` query will look as follows: ```graphql -qक्वेरी GetToken($id: ID!) { - टोकन (id: $id) { - आयडी - मालक - } -} uery GetToken($id: ID!) { +query GetToken($id: ID!) { token(id: $id) { id owner @@ -32,7 +27,7 @@ qक्वेरी GetToken($id: ID!) { } ``` -जे खालील अंदाजे JSON प्रतिसाद देईल (_योग्य `$id` व्हेरिएबल व्हॅल्यू_ पास करताना): +which will return the following predictable JSON response (_when passing the proper `$id` variable value_): ```json { @@ -43,9 +38,9 @@ qक्वेरी GetToken($id: ID!) { } ``` -GraphQL क्वेरी GraphQL भाषा वापरतात, जी [विशिष्टता](https://spec.graphql.org/) वर परिभाषित केली जाते. +GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). -वरील `GetToken` क्वेरी अनेक भाषा भागांनी बनलेली आहे (खाली `[...]` प्लेसहोल्डरने बदलली आहे): +The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): ```graphql query [operationName]([variableName]: [variableType]) { @@ -59,18 +54,18 @@ query [operationName]([variableName]: [variableType]) { सिंटॅक्टिक करा आणि करू नका याची यादी लांब असताना, ग्राफक्यूएल क्वेरी लिहिताना लक्षात ठेवण्याचे आवश्यक नियम येथे आहेत: -- प्रत्येक `queryName` प्रत्येक ऑपरेशनसाठी फक्त एकदाच वापरले जाणे आवश्यक आहे. -- प्रत्येक `फील्ड` निवडीमध्ये फक्त एकदाच वापरला जाणे आवश्यक आहे (आम्ही `टोकन` अंतर्गत दोनदा `आयडी` क्वेरी करू शकत नाही) +- Each `queryName` must only be used once per operation. +- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) - काही `फील्ड` किंवा क्वेरी (जसे की `टोकन्स`) जटिल प्रकार देतात ज्यांना उप-फील्डची निवड आवश्यक असते. अपेक्षित असताना निवड प्रदान न केल्याने (किंवा अपेक्षित नसताना एक प्रदान करणे - उदाहरणार्थ, `id` वर) त्रुटी वाढवेल. फील्ड प्रकार जाणून घेण्यासाठी, कृपया [द ग्राफ एक्सप्लोरर](/network/explorer) पहा. -- आर्ग्युमेंटला नियुक्त केलेले कोणतेही व्हेरिएबल त्याच्या प्रकाराशी जुळले पाहिजे. -- व्हेरिएबल्सच्या दिलेल्या सूचीमध्ये, त्यापैकी प्रत्येक अद्वितीय असणे आवश्यक आहे. -- सर्व परिभाषित व्हेरिएबल्स वापरणे आवश्यक आहे. +- Any variable assigned to an argument must match its type. +- In a given list of variables, each of them must be unique. +- All defined variables must be used. -वरील नियमांचे पालन करण्यात अयशस्वी झाल्यास ग्राफ API मधील त्रुटीसह समाप्त होईल. +Failing to follow the above rules will end with an error from the Graph API. -कोड उदाहरणांसह नियमांच्या संपूर्ण सूचीसाठी, कृपया आमचे GraphQL प्रमाणीकरण मार्गदर्शक पहा. +For a complete list of rules with code examples, please look at our GraphQL Validations guide. -### GraphQL API वर क्वेरी पाठवत आहे +### Sending a query to a GraphQL API GraphQL is a language and set of conventions that transport over HTTP. @@ -78,10 +73,10 @@ It means that you can query a GraphQL API using standard `fetch` (natively or vi However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: -- क्रॉस-चेक्रॉस-चेन सबग्राफ हँडलिंग: एकाच क्वेरीमध्ये एकाधिक सबग्राफमधून क्वेरी करणेन -- [स्वयंचलित ब्लॉक ट्रॅकिंग](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [स्वयंचलित पृष्ठांकन](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- पूर्ण टाईप केलेला निकाल +- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fully typed result Here's how to query The Graph with `graph-client`: @@ -113,9 +108,9 @@ Now that we covered the basic rules of GraphQL queries syntax, let's now look at --- -## ग्राफक्यूएल क्वेरी लिहित आहे +## Writing GraphQL queries -### नेहमी स्थिर प्रश्न लिहा +### Always write static queries A common (bad) practice is to dynamically build query strings as follows: @@ -135,10 +130,10 @@ query GetToken { While the above snippet produces a valid GraphQL query, **it has many drawbacks**: -- त्यामुळे संपूर्ण क्वेरी **समजणे कठीण** होते -- विकसक **स्ट्रिंग इंटरपोलेशन सुरक्षितपणे निर्जंतुक करण्यासाठी जबाबदार आहेत** +- it makes it **harder to understand** the query as a whole +- developers are **responsible for safely sanitizing the string interpolation** - विनंती पॅरामीटर्सचा भाग म्हणून व्हेरिएबल्सची मूल्ये पाठवत नाही **सर्व्हर-साइडवर संभाव्य कॅशिंग प्रतिबंधित करा** -- ते **साधनांना क्वेरीचे स्थिर विश्लेषण करण्यापासून प्रतिबंधित करते** (उदा: लिंटर, किंवा जनरेशन टूल्स टाइप करा) +- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) For this reason, it is recommended to always write queries as static strings: @@ -164,10 +159,10 @@ const result = await execute(query, { Doing so brings **many advantages**: -- **वाचणे आणि देखरेख करणे सोपे** क्वेरी -- GraphQL **सर्व्हर व्हेरिएबल्स सॅनिटायझेशन हाताळतो** +- **Easy to read and maintain** queries +- The GraphQL **server handles variables sanitization** - **Variables can be cached** at server-level -- **प्रश्नांचे साधनांद्वारे स्थिर विश्लेषण केले जाऊ शकते** (याबद्दल पुढील विभागांमध्ये अधिक) +- **Queries can be statically analyzed by tools** (more on this in the following sections) **Note: How to include fields conditionally in static queries** @@ -198,7 +193,7 @@ const result = await execute(query, { Note: The opposite directive is `@skip(if: ...)`. -### कामगिरी टिपा +### Performance tips **"Ask for what you want"** @@ -286,7 +281,7 @@ const { result: { tokens, counters } } = execute(query) This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. -### GraphQL फ्रॅगमेंट्सचा फायदा घ्या +### Leverage GraphQL Fragments A helpful feature to write GraphQL queries is GraphQL Fragment. @@ -312,7 +307,7 @@ query { Such repeated fields (`id`, `active`, `status`) bring many issues: -- अधिक विस्तृत प्रश्नांसाठी वाचणे कठीण +- harder to read for more extensive queries - प्रश्नांवर आधारित TypeScript प्रकार व्युत्पन्न करणारी साधने वापरताना (_त्यावर शेवटच्या विभागात अधिक_), `newDelegate` आणि `oldDelegate` या दोन वेगळ्या इनलाइनचा परिणाम होईल इंटरफेस. A refactored version of the query would be the following: @@ -343,7 +338,7 @@ Using GraphQL `fragment` will improve readability (especially at scale) but also When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). -### GraphQL फ्रॅगमेंट काय करावे आणि करू नये +### GraphQL Fragment do's and don'ts **Fragment base must be a type** @@ -394,8 +389,8 @@ For most use-case, defining one fragment per type (in the case of repeated field Here is a rule of thumb for using Fragment: -- जेव्हा एकाच प्रकारची फील्ड क्वेरीमध्ये पुनरावृत्ती होते, तेव्हा त्यांना एका तुकड्यात गटबद्ध करा -- जेव्हा समान परंतु समान फील्डची पुनरावृत्ती होत नाही, तेव्हा अनेक तुकडे तयार करा, उदा: +- when fields of the same type are repeated in a query, group them in a Fragment +- when similar but not the same fields are repeated, create multiple fragments, ex: ```graphql # base fragment (mostly used in listing) @@ -418,15 +413,15 @@ fragment VoteWithPoll on Vote { --- -## आवश्यक साधने +## The essential tools -### ग्राफक्यूएल वेब-आधारित एक्सप्लोरर +### GraphQL web-based explorers Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). -### ग्राफक्यूएल लिंटिंग +### GraphQL Linting In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. @@ -437,22 +432,22 @@ In order to keep up with the mentioned above best practices and syntactic rules, [Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: - `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? -- `@graphql-eslint/no-unused variables`: दिलेले व्हेरिएबल वापरलेले नसावे का? -- आणि अधिक! +- `@graphql-eslint/no-unused variables`: should a given variable stay unused? +- and more! This will allow you to **catch errors without even testing queries** on the playground or running them in production! -### IDE प्लगइन +### IDE plugins **VSCode and GraphQL** The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: -- वाक्यरचना हायलाइटिंग -- स्वयंपूर्ण सूचना -- स्कीमा विरुद्ध प्रमाणीकरण -- स्निपेट्स -- तुकड्या आणि इनपुट प्रकारांसाठी परिभाषा वर जा +- syntax highlighting +- autocomplete suggestions +- validation against schema +- snippets +- go to definition for fragments and input types If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. @@ -460,9 +455,9 @@ If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketp The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: -- वाक्यरचना हायलाइटिंग -- स्वयंपूर्ण सूचना -- स्कीमा विरुद्ध प्रमाणीकरण -- स्निपेट्स +- syntax highlighting +- autocomplete suggestions +- validation against schema +- snippets More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. diff --git a/website/pages/mr/querying/querying-from-an-application.mdx b/website/pages/mr/querying/querying-from-an-application.mdx index 31466e709182..406fdbce5b38 100644 --- a/website/pages/mr/querying/querying-from-an-application.mdx +++ b/website/pages/mr/querying/querying-from-an-application.mdx @@ -1,10 +1,10 @@ --- -title: अर्जावरून क्वेरी करत आहे +title: Querying from an Application --- एकदा सबग्राफ स्टुडिओ किंवा ग्राफ एक्सप्लोररमध्ये सबग्राफ उपयोजित केल्यावर, तुम्हाला तुमच्या ग्राफक्यूएल API साठी एंडपॉइंट दिला जाईल जो असे काहीतरी दिसला पाहिजे: -**सबग्राफ स्टुडिओ (चाचणी एंडपॉइंट)** +**Subgraph Studio (testing endpoint)** ```sh Queries (HTTP) @@ -22,16 +22,16 @@ GraphQL एंडपॉइंट वापरून, तुम्ही सब इकोसिस्टममधील आणखी काही लोकप्रिय ग्राफक्यूएल क्लायंट आणि ते कसे वापरायचे ते येथे आहेत: -## ग्राफक्यूएल क्लायंट +## GraphQL clients -### आलेख क्लायंट +### Graph client -ग्राफ स्वतःचे GraphQL क्लायंट, `graph-client` प्रदान करत आहे जे अद्वितीय वैशिष्ट्यांना समर्थन देते जसे की: +The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: -- क्रॉस-चेक्रॉस-चेन सबग्राफ हँडलिंग: एकाच क्वेरीमध्ये एकाधिक सबग्राफमधून क्वेरी करणेन -- [स्वयंचलित ब्लॉक ट्रॅकिंग](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [स्वयंचलित पृष्ठांकन](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- पूर्ण टाईप केलेला निकाल +- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query +- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fully typed result Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. @@ -41,9 +41,7 @@ To get started, make sure to install The Graph Client CLI in your project: ```sh yarn add -D @graphprotocol/client-cli -# or, with NPM: यार्न अॅड -D @graphprotocol/client-cli -# किंवा, NPM सह: -npm स्थापित करा --save-dev @graphprotocol/client-cli +# or, with NPM: npm install --save-dev @graphprotocol/client-cli ``` @@ -142,17 +140,17 @@ export default App However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. -### अपोलो क्लायंट +### Apollo client [Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: -- प्रगत त्रुटी हाताळणी -- पृष्ठांकन -- डेटा प्रीफेचिंग -- आशावादी UI -- स्थानिक राज्य व्यवस्थापन +- advanced error handling +- pagination +- data prefetching +- optimistic UI +- local state management Let's look at how to fetch data from a subgraph with Apollo client in a web project. @@ -162,7 +160,7 @@ First, install `@apollo/client` and `graphql`: npm install @apollo/client graphql ``` -त्यानंतर तुम्ही खालील कोडसह API ची क्वेरी करू शकता: +Then you can query the API with the following code: ```javascript import { ApolloClient, InMemoryCache, gql } from '@apollo/client' @@ -230,10 +228,10 @@ client Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: -- लवचिक कॅशे प्रणाली -- एक्स्टेंसिबल डिझाइन (त्याच्या वर नवीन क्षमता जोडणे सोपे करणे) +- Flexible cache system +- Extensible design (easing adding new capabilities on top of it) - Lightweight bundle (~5x lighter than Apollo Client) -- फाइल अपलोड आणि ऑफलाइन मोडसाठी समर्थन +- Support for file uploads and offline mode Let's look at how to fetch data from a subgraph with URQL in a web project. @@ -243,7 +241,7 @@ First, install `urql` and `graphql`: npm install urql graphql ``` -त्यानंतर तुम्ही खालील कोडसह API ची क्वेरी करू शकता: +Then you can query the API with the following code: ```javascript import { createClient } from 'urql' diff --git a/website/pages/mr/querying/querying-the-graph.mdx b/website/pages/mr/querying/querying-the-graph.mdx index 060d722ab730..741141886dc8 100644 --- a/website/pages/mr/querying/querying-the-graph.mdx +++ b/website/pages/mr/querying/querying-the-graph.mdx @@ -1,5 +1,5 @@ --- -title: आलेख क्वेरी करत आहे +title: Querying The Graph --- सबग्राफ उपयोजित केल्यावर, [GraphQL](https://github.com/graphql/graphiql) इंटरफेस उघडण्यासाठी [Graph Explorer](https://thegraph.com/explorer) ला भेट द्या जिथे तुम्ही क्वेरी जारी करून आणि स्कीमा पाहून सबग्राफसाठी उपयोजित GraphQL API एक्सप्लोर करू शकता. @@ -8,7 +8,7 @@ title: आलेख क्वेरी करत आहे ## उदाहरण -ही क्वेरी आमच्या मॅपिंगने तयार केलेल्या सर्व काउंटरची सूची देते. आम्ही फक्त एक तयार केल्यामुळे, परिणामामध्ये फक्त आमचे `डीफॉल्ट-काउंटर` असेल: +This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: ```graphql { @@ -19,11 +19,11 @@ title: आलेख क्वेरी करत आहे } ``` -## ग्राफ एक्सप्लोरर वापरणे +## Using The Graph Explorer विकेंद्रीकृत ग्राफ एक्सप्लोररवर प्रकाशित केलेल्या प्रत्येक सबग्राफमध्ये एक अद्वितीय क्वेरी URL असते जी तुम्ही सबग्राफ तपशील पृष्ठावर नेव्हिगेट करून आणि वरच्या उजव्या कोपर्यात असलेल्या "क्वेरी" बटणावर क्लिक करून शोधू शकता. हे एक बाजूचे उपखंड उघडेल जे तुम्हाला सबग्राफची अनन्य क्वेरी URL तसेच ती कशी क्वेरी करावी याबद्दल काही सूचना देईल. -![क्वेरी सबग्राफ उपखंड](/img/query-subgraph-pane.png) +![Query Subgraph Pane](/img/query-subgraph-pane.png) तुमच्या लक्षात आल्याप्रमाणे, या क्वेरी URL ने एक अद्वितीय API की वापरणे आवश्यक आहे. तुम्ही "API की" विभागातील [सबग्राफ स्टुडिओ](https://thegraph.com/studio) मध्ये तुमच्या API की तयार आणि व्यवस्थापित करू शकता. सबग्राफ स्टुडिओ कसा वापरायचा याबद्दल अधिक जाणून घ्या [येथे](/deploying/subgraph-studio). diff --git a/website/pages/mr/querying/querying-the-hosted-service.mdx b/website/pages/mr/querying/querying-the-hosted-service.mdx index dee6190dd6fd..51735f3b2ba9 100644 --- a/website/pages/mr/querying/querying-the-hosted-service.mdx +++ b/website/pages/mr/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: होस्ट केलेल्या सेवेची चौकशी करत आहे --- -सबग्राफ उपयोजित करून, [ ला भेट द्या होस्ट केलेली सेवा](https://thegraph.com/hosted-service/) [GrafiQL](https://github.com/graphql/graphiql) इंटरफेस उघडण्यासाठी जिथे तुम्ही क्वेरी जारी करून आणि पाहण्याद्वारे सबग्राफसाठी उपयोजित GraphQL API एक्सप्लोर करू शकता योजना. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. खाली एक उदाहरण दिले आहे, परंतु कृपया सबग्राफच्या घटकांची चौकशी कशी करायची याच्या संपूर्ण संदर्भासाठी [Query API](/querying/graphql-api) पहा. @@ -19,9 +19,9 @@ title: होस्ट केलेल्या सेवेची चौकश } ``` -## होस्ट केलेली सेवा वापरणे +## Using the hosted service -ग्राफ एक्सप्लोरर आणि त्याचे GraphQL खेळाचे मैदान हे होस्ट केलेल्या सेवेवर उपयोजित सबग्राफ एक्सप्लोर करण्याचा आणि क्वेरी करण्याचा एक उपयुक्त मार्ग आहे. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. काही मुख्य वैशिष्ट्ये खाली तपशीलवार आहेत: diff --git a/website/pages/mr/querying/querying-with-python.mdx b/website/pages/mr/querying/querying-with-python.mdx new file mode 100644 index 000000000000..480343870613 --- /dev/null +++ b/website/pages/mr/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## प्रारंभ करणे + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/mr/quick-start.mdx b/website/pages/mr/quick-start.mdx new file mode 100644 index 000000000000..98bcd8755b46 --- /dev/null +++ b/website/pages/mr/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: क्विक स्टार्ट +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +हे मार्गदर्शक तुमच्याकडे आहे असे गृहीत धरून लिहिले आहे: + +- तुमच्या पसंतीच्या नेटवर्कवर एक स्मार्ट करार पत्ता +- तुमचा सबग्राफ क्युरेट करण्यासाठी GRT +- एक क्रिप्टो वॉलेट + +## 1. सबग्राफ स्टुडिओवर सबग्राफ तयार करा + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +एकदा कनेक्ट झाल्यानंतर, तुम्ही "सबग्राफ तयार करा" वर क्लिक करून सुरुवात करू शकता. तुमच्या पसंतीचे नेटवर्क निवडा आणि सुरू ठेवा क्लिक करा. + +## 2. आलेख CLI स्थापित करा + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +तुमच्या स्थानिक मशीनवर, खालीलपैकी एक कमांड चालवा: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. तुमचा सबग्राफ सुरू करा + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +तुम्ही तुमचा सबग्राफ सुरू करता तेव्हा, CLI टूल तुम्हाला खालील माहितीसाठी विचारेल: + +- प्रोटोकॉल: तुमचा सबग्राफ 4 वरून डेटा अनुक्रमित करेल असा प्रोटोकॉल निवडा +- सबग्राफ स्लग: तुमच्या सबग्राफसाठी नाव तयार करा. तुमचा सबग्राफ स्लग तुमच्या सबग्राफसाठी एक ओळखकर्ता आहे. +- उपग्राफ तयार करण्यासाठी निर्देशिका: तुमची स्थानिक निर्देशिका निवडा +- इथरियम नेटवर्क (पर्यायी): तुमचा सबग्राफ कोणत्या EVM-सुसंगत नेटवर्कवरून डेटा अनुक्रमित करेल ते तुम्हाला निर्दिष्ट करावे लागेल +- कॉन्ट्रॅक्ट अॅड्रेस: ​​तुम्ही ज्यावरून डेटा क्वेरी करू इच्छिता तो स्मार्ट कॉन्ट्रॅक्ट अॅड्रेस शोधा +- ABI: ABI ऑटोपॉप्युलेट नसल्यास, तुम्हाला JSON फाइल म्हणून व्यक्तिचलितपणे इनपुट करावे लागेल +- स्टार्ट ब्लॉक: तुमचा सबग्राफ ब्लॉकचेन डेटा इंडेक्स करत असताना वेळ वाचवण्यासाठी तुम्ही स्टार्ट ब्लॉक इनपुट करा असे सुचवले जाते. तुमचा करार जिथे तैनात करण्यात आला होता तो ब्लॉक शोधून तुम्ही स्टार्ट ब्लॉक शोधू शकता. +- कराराचे नाव: तुमच्या कराराचे नाव प्रविष्ट करा +- इंडेक्स कॉन्ट्रॅक्ट इव्हेंट्स घटक म्हणून: असे सुचवले जाते की तुम्ही हे सत्य वर सेट करा कारण ते प्रत्येक उत्सर्जित इव्हेंटसाठी तुमच्या सबग्राफमध्ये स्वयंचलितपणे मॅपिंग जोडेल +- दुसरा करार जोडा(पर्यायी): तुम्ही दुसरा करार जोडू शकता + +खालील आदेश चालवून विद्यमान करारातून तुमचा सबग्राफ सुरू करा: + +```sh +graph init --studio +``` + +तुमचा सबग्राफ सुरू करताना काय अपेक्षा करावी याच्या उदाहरणासाठी खालील स्क्रीनशॉट पहा: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. तुमचा सबग्राफ लिहा + +मागील कमांड एक स्कॅफोल्ड सबग्राफ तयार करतात ज्याचा वापर तुम्ही तुमचा सबग्राफ तयार करण्यासाठी प्रारंभिक बिंदू म्हणून करू शकता. सबग्राफमध्ये बदल करताना, तुम्ही प्रामुख्याने तीन फाइल्ससह कार्य कराल: + +- मॅनिफेस्ट (subgraph.yaml) - मॅनिफेस्ट हे परिभाषित करते की तुमचे सबग्राफ कोणते डेटास्रोत अनुक्रमित करतील. +- स्कीमा (schema.graphql) - ग्राफक्यूएल स्कीमा तुम्हाला सबग्राफमधून कोणता डेटा मिळवायचा आहे ते परिभाषित करते. +- असेंबलीस्क्रिप्ट मॅपिंग (mapping.ts) - हा असा कोड आहे जो तुमच्या डेटास्रोतमधील डेटाचे स्कीमामध्ये परिभाषित केलेल्या घटकांमध्ये भाषांतर करतो. + +तुमचा सबग्राफ कसा लिहायचा याबद्दल अधिक माहितीसाठी, [एक सबग्राफ तयार करणे](/developing/creating-a-subgraph) पहा. + +## 5. सबग्राफ स्टुडिओमध्ये तैनात करा + +तुमचा सबग्राफ लिहिल्यानंतर, खालील आदेश चालवा: + +```sh +$ आलेख कोडजेन +$ आलेख बिल्ड +``` + +- तुमचा सबग्राफ प्रमाणित करा आणि उपयोजित करा. उपयोजन की सबग्राफ स्टुडिओमधील सबग्राफ पृष्ठावर आढळू शकते. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. तुमच्या सबग्राफची चाचणी घ्या + +तुम्ही खेळाच्या मैदानाच्या विभागात नमुना क्वेरी करून तुमच्या सबग्राफची चाचणी घेऊ शकता. + +तुमच्या सबग्राफमध्ये काही त्रुटी असल्यास नोंदी तुम्हाला सांगतील. ऑपरेशनल सबग्राफचे लॉग यासारखे दिसतील: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. ग्राफच्या विकेंद्रीकृत नेटवर्कवर तुमचा सबग्राफ प्रकाशित करा + +एकदा तुमचा सबग्राफ सबग्राफ स्टुडिओमध्ये तैनात केला गेला की, तुम्ही त्याची चाचणी घेतली आणि उत्पादनात ठेवण्यास तयार असाल, त्यानंतर तुम्ही ते विकेंद्रित नेटवर्कवर प्रकाशित करू शकता. + +सबग्राफ स्टुडिओमध्ये, तुमच्या सबग्राफवर क्लिक करा. सबग्राफच्या पृष्ठावर, आपण शीर्षस्थानी उजवीकडे प्रकाशित बटणावर क्लिक करण्यास सक्षम असाल. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +तुम्‍ही तुमच्‍या सबग्राफची क्‍वेरी करण्‍यापूर्वी, इंडेक्सर्सना त्यावर क्‍वेरी देणे सुरू करणे आवश्‍यक आहे. ही प्रक्रिया सुव्यवस्थित करण्यासाठी, तुम्ही GRT वापरून तुमचा स्वतःचा सबग्राफ क्युरेट करू शकता. + +लेखनाच्या वेळी, तो अनुक्रमित केला गेला आहे आणि शक्य तितक्या लवकर क्वेरीसाठी उपलब्ध आहे याची खात्री करण्यासाठी तुम्ही तुमचा स्वतःचा सबग्राफ 10,000 GRT सह क्युरेट करा अशी शिफारस केली जाते. + +गॅसच्या खर्चावर बचत करण्यासाठी, जेव्हा तुम्ही तुमचा सबग्राफ The Graph च्या विकेंद्रित नेटवर्कवर प्रकाशित करता तेव्हा हे बटण निवडून तुम्ही प्रकाशित केलेल्या व्यवहारात तुम्ही तुमचा सबग्राफ क्युरेट करू शकता: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. तुमचा सबग्राफ क्वेरी करा + +आता, तुम्ही तुमच्या सबग्राफच्या क्वेरी URL वर GraphQL क्वेरी पाठवून तुमच्या सबग्राफची क्वेरी करू शकता, जी तुम्ही क्वेरी बटणावर क्लिक करून शोधू शकता. + +तुम्‍ही तुमच्‍या dapp वरून क्‍वेरी करू शकता जर तुमच्‍याकडे तुमच्‍या API की नसल्‍यास मोफत, रेट-मर्यादित तात्पुरती क्‍वेरी URL द्वारे जे डेव्हलपमेंट आणि स्‍टेजिंगसाठी वापरले जाऊ शकते. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/mr/release-notes/assemblyscript-migration-guide.mdx b/website/pages/mr/release-notes/assemblyscript-migration-guide.mdx index aefa57a2c1ec..a170ebec8cda 100644 --- a/website/pages/mr/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/mr/release-notes/assemblyscript-migration-guide.mdx @@ -1,5 +1,5 @@ --- -title: असेंबलीस्क्रिप्ट स्थलांतर मार्गदर्शक +title: AssemblyScript Migration Guide --- आत्तापर्यंत, सबग्राफ [असेंबलीस्क्रिप्टच्या पहिल्या आवृत्त्यांपैकी एक वापरत आहेत](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). शेवटी आम्ही [नवीन उपलब्ध](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) साठी समर्थन जोडले आहे! 🎉 @@ -10,41 +10,41 @@ title: असेंबलीस्क्रिप्ट स्थलांतर > टीप: `0.24.0` नुसार, सबग्राफ मॅनिफेस्टमध्ये निर्दिष्ट केलेल्या `apiVersion` वर अवलंबून `graph-node` दोन्ही आवृत्त्यांना समर्थन देऊ शकते. -## वैशिष्ट्य +## Features -### नवीन कार्यक्षमता +### New functionality - `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) - New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) च्या x उदाहरणासाठी समर्थन जोडले -- GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) च्या x उदाहरणासाठी समर्थन जोडले -- `अॅरे#flat` जोडले ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- `Number#toString` ([v0 वर `radix` वितर्क लागू केले. 10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- फ्लोटिंग पॉइंट लिटरल्समध्ये विभाजकांसाठी समर्थन जोडले ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- प्रथम श्रेणी कार्यांसाठी समर्थन जोडले ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0) -- बिल्टइन जोडा: `i32/i64/f32/f64.add/sub/mul` ([ v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) लागू करा -- टेम्पलेट शाब्दिक स्ट्रिंगसाठी जोडलेले समर्थन ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- `encodeURI(घटक)` आणि `decodeURI(घटक)` जोडा ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- `toString`, `toDateString` आणि `toTimeString` ला `तारीख` ([v0.18.29](https://github.com/) मध्ये जोडा असेंबलीस्क्रिप्ट/assemblyscript/releases/tag/v0.18.29)) -- `तारीख` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) साठी `toUTCSstring` जोडा -- `nonnull/NonNullable` अंगभूत प्रकार जोडा ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### ऑप्टिमायझेशन +### Optimizations -- `गणित` फंक्शन्स जसे की `exp`, `exp2`, `log`, `log2` आणि ` pow` हे वेगवान प्रकारांनी बदलले गेले आहे ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- थोडेसे ऑप्टिमाइझ करा `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Std नकाशा आणि सेट ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) मध्ये अधिक फील्ड प्रवेश कॅश करा -- `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) मध्ये दोनच्या शक्तींसाठी ऑप्टिमाइझ करा +- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### इतर +### Other -- अ‍ॅरे लिटरलचा प्रकार आता त्याच्या सामग्रीवरून अनुमान लावला जाऊ शकतो ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Stdlib युनिकोड 13.0.0 वर अपडेट केले ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## अपग्रेड कसे करावे? +## How to upgrade? -1. तुमची मॅपिंग `apiVersion` `subgraph.yaml` मधील `0.0.6` वर बदला: +1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: ```yaml ... @@ -56,7 +56,7 @@ dataSources: ... ``` -2. तुम्ही वापरत असलेल्या `graph-cli` वर `नवीनतम` आवृत्ती चालवून अपडेट करा: +2. Update the `graph-cli` you're using to the `latest` version by running: ```bash # if you have it globally installed @@ -66,20 +66,20 @@ npm install --global @graphprotocol/graph-cli@latest npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. `graph-ts` साठी तेच करा, परंतु जागतिक स्तरावर स्थापित करण्याऐवजी, ते तुमच्या मुख्य अवलंबनांमध्ये जतन करा: +3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. भाषेतील बदलांचे निराकरण करण्यासाठी उर्वरित मार्गदर्शकाचे अनुसरण करा. -5. `कोडजेन` चालवा आणि पुन्हा `उपयोजित करा`. +4. Follow the rest of the guide to fix the language breaking changes. +5. Run `codegen` and `deploy` again. -## ब्रेकिंग बदल +## Breaking changes -### शून्यता +### Nullability -असेंबलीस्क्रिप्टच्या जुन्या आवृत्तीवर, तुम्ही असा कोड तयार करू शकता: +On the older version of AssemblyScript, you could create code like this: ```typescript function load(): Value | null { ... } @@ -88,7 +88,7 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -तथापि, नवीन आवृत्तीवर, कारण मूल्य रद्द करण्यायोग्य आहे, यासाठी आपण हे तपासणे आवश्यक आहे, जसे की: +However on the newer version, because the value is nullable, it requires you to check, like this: ```typescript let maybeValue = load() @@ -98,7 +98,7 @@ if (maybeValue) { } ``` -किंवा हे असे सक्ती करा: +Or force it like this: ```typescript let maybeValue = load()! // breaks in runtime if value is null @@ -108,9 +108,9 @@ maybeValue.aMethod() तुम्हाला कोणती निवड करायची याची खात्री नसल्यास, आम्ही नेहमी सुरक्षित आवृत्ती वापरण्याची शिफारस करतो. जर मूल्य अस्तित्वात नसेल तर तुम्ही तुमच्या सबग्राफ हँडलरमध्ये रिटर्नसह फक्त लवकर इफ स्टेटमेंट करू इच्छित असाल. -### व्हेरिएबल शॅडोइंग +### Variable Shadowing -तुम्ही [व्हेरिएबल शॅडोइंग](https://en.wikipedia.org/wiki/Variable_shadowing) करण्यापूर्वी आणि यासारखा कोड कार्य करेल: +Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: ```typescript let a = 10 @@ -118,7 +118,7 @@ let b = 20 let a = a + b ``` -तथापि आता हे आता शक्य नाही, आणि कंपाइलर ही त्रुटी परत करतो: +However now this isn't possible anymore, and the compiler returns this error: ```typescript त्रुटी TS2451: ब्लॉक-स्कोप केलेले व्हेरिएबल 'a' पुन्हा घोषित करू शकत नाही @@ -128,11 +128,11 @@ let a = a + b in assembly/index.ts(4,3) ``` -जर तुमच्याकडे व्हेरिएबल शेडिंग असेल तर तुम्हाला तुमच्या डुप्लिकेट व्हेरिएबल्सचे नाव बदलणे आवश्यक आहे. +You'll need to rename your duplicate variables if you had variable shadowing. -### शून्य तुलना +### Null Comparisons -तुमच्या सबग्राफवर अपग्रेड करून, काहीवेळा तुम्हाला यासारख्या एरर मिळू शकतात: +By doing the upgrade on your subgraph, sometimes you might get errors like these: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -सोडवण्यासाठी तुम्ही फक्त `if` स्टेटमेंट बदलू शकता: +To solve you can simply change the `if` statement to something like this: ```typescript if (!decimals) { @@ -151,21 +151,21 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i if (decimals === null) { ``` -तुम्ही == ऐवजी != करत असाल तर तेच लागू होते. +The same applies if you're doing != instead of ==. -### कास्टिंग +### Casting -आधी कास्ट करण्याचा सामान्य मार्ग म्हणजे फक्त `म्हणून` शब्द वापरणे, जसे की: +The common way to do casting before was to just use the `as` keyword, like this: ```typescript let byteArray = new ByteArray(10) let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` -तथापि, हे केवळ दोन परिस्थितींमध्ये कार्य करते: +However this only works in two scenarios: - Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- वर्ग वारसावर अपकास्टिंग (उपवर्ग → सुपरक्लास) +- Upcasting on class inheritance (subclass → superclass) उदाहरणे: @@ -184,10 +184,10 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -तुम्ही कास्ट करू शकता अशा दोन परिस्थिती आहेत, परंतु `म्हणून`/`var` **सुरक्षित नाही** वापरणे: +There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: -- वर्ग वारसा कमी करणे (सुपरक्लास → उपवर्ग) -- दोन प्रकारांमध्ये जे सुपरक्लास शेअर करतात +- Downcasting on class inheritance (superclass → subclass) +- Between two types that share a superclass ```typescript // downcasting on class inheritance @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -वर्ग वारसा कमी करणे (सुपरक्लास → उपवर्ग): +For those cases, you can use the `changetype` function: ```typescript // downcasting on class inheritance @@ -240,16 +240,16 @@ let newBalance = new AccountBalance(balanceId) शून्यता प्रकरणासाठी आम्ही [शून्यता तपासणी वैशिष्ट्य](https://www.assemblyscript.org/basics.html#nullability-checks) पाहण्याची शिफारस करतो, ते तुमचा कोड अधिक क्लीनर बनवेल 🙂 -तसेच कास्टिंग सुलभ करण्यासाठी आम्ही काही प्रकारांमध्ये आणखी काही स्थिर पद्धती जोडल्या आहेत, त्या आहेत: +Also we've added a few more static methods in some types to ease casting, they are: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### मालमत्ता प्रवेशासह शून्यता तपासा +### Nullability check with property access -[शून्यता तपासणी वैशिष्ट्य](https://www.assemblyscript.org/basics.html#nullability-checks) वापरण्यासाठी तुम्ही एकतर `if` विधाने किंवा टर्नरी ऑपरेटर (`?` आणि `:`) वापरू शकता. हे: +To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -तथापि ते केवळ तेव्हाच कार्य करते जेव्हा तुम्ही व्हेरिएबलवर `if` / ternary करत असाल, प्रॉपर्टी अॅक्सेसवर नाही, याप्रमाणे: +However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: ```typescript class Container { @@ -280,7 +280,7 @@ container.data = 'data' let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile ``` -ही त्रुटी कोणती आउटपुट करते: +Which outputs this error: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -304,7 +304,7 @@ let data = container.data let somethingOrElse: string = data ? data : 'else' // compiles just fine :) ``` -### मालमत्ता प्रवेशासह ऑपरेटर ओव्हरलोडिंग +### Operator overloading with property access जर तुम्ही (उदाहरणार्थ) रद्द करता येण्याजोगा प्रकार (प्रॉपर्टी ऍक्सेसमधून) नॉन-नलेबल असलेल्या प्रकाराची बेरीज करण्याचा प्रयत्न केला, तर असेंबलीस्क्रिप्ट कंपाइलर संकलित वेळेची त्रुटी चेतावणी देण्याऐवजी मूल्यांपैकी एक रद्द करण्यायोग्य आहे, तो संधी देऊन शांतपणे संकलित करतो. कोड रनटाइममध्ये खंडित होण्यासाठी. @@ -342,9 +342,9 @@ if (!wrapper.n) { wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt ``` -### मूल्य आरंभीकरण +### Value initialization -आपल्याकडे असा कोणताही कोड असल्यास: +If you have any code like this: ```typescript var value: Type // null @@ -352,7 +352,7 @@ value.x = 10 value.y = 'content' ``` -हे संकलित होईल परंतु रनटाइमच्या वेळी खंडित होईल, असे घडते कारण मूल्य प्रारंभ केले गेले नाही, म्हणून खात्री करा की आपल्या सबग्राफने त्यांची मूल्ये आरंभ केली आहेत, जसे की: +It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: ```typescript var value = new Type() // initialized @@ -360,7 +360,7 @@ value.x = 10 value.y = 'content' ``` -तसेच तुमच्याकडे GraphQL घटकामध्ये रद्द करण्यायोग्य गुणधर्म असल्यास, याप्रमाणे: +Also if you have nullable properties in a GraphQL entity, like this: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -आणि आपल्याकडे यासारखा कोड आहे: +And you have code similar to this: ```typescript let total = Total.load('latest') @@ -413,9 +413,9 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -### वर्ग मालमत्ता आरंभीकरण +### Class property initialization -तुम्ही इतर वर्ग (तुम्ही किंवा मानक लायब्ररीद्वारे घोषित केलेले) गुणधर्म असलेले कोणतेही वर्ग याप्रमाणे निर्यात केल्यास: +If you export any classes with properties that are other classes (declared by you or by the standard library) like this: ```typescript class Thing {} @@ -425,7 +425,7 @@ export class Something { } ``` -कंपाइलरमध्ये त्रुटी येईल कारण तुम्हाला एकतर वर्ग असलेल्या गुणधर्मांसाठी इनिशिएलायझर जोडणे आवश्यक आहे किंवा `!` ऑपरेटर जोडणे आवश्यक आहे: +The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: ```typescript export class Something { @@ -449,7 +449,7 @@ export class Something { } ``` -### अॅरे इनिशिएलायझेशन +### Array initialization `अॅरे` वर्ग अजूनही सूचीची लांबी सुरू करण्यासाठी संख्या स्वीकारतो, तथापि तुम्ही काळजी घेतली पाहिजे कारण `.push` सारखी ऑपरेशन्स सुरुवातीला जोडण्याऐवजी आकार वाढवतील., उदाहरणार्थ: @@ -459,13 +459,13 @@ let arr = new Array(5) // ["", "", "", "", ""] arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ``` -तुम्ही वापरत असलेल्या प्रकारांवर अवलंबून, उदा. रद्द करण्यायोग्य, आणि तुम्ही ते कसे ऍक्सेस करत आहात, तुम्हाला कदाचित यासारखी रनटाइम त्रुटी येऊ शकते: +Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: ``` ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -सुरुवातीला पुश करण्यासाठी तुम्ही एकतर, शून्य आकारासह `अॅरे` सुरू करा, याप्रमाणे: +To actually push at the beginning you should either, initialize the `Array` with size zero, like this: ```typescript let arr = new Array(0) // [] @@ -473,7 +473,7 @@ let arr = new Array(0) // [] arr.push('something') // ["something"] ``` -किंवा आपण ते अनुक्रमणिकेद्वारे बदलले पाहिजे: +Or you should mutate it via index: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -481,11 +481,11 @@ let arr = new Array(5) // ["", "", "", "", ""] arr[0] = 'something' // ["something", "", "", "", ""] ``` -### ग्राफक्यूएल स्कीमा +### GraphQL schema -हा थेट असेंबलीस्क्रिप्ट बदल नाही, परंतु तुम्हाला तुमची `schema.graphql` फाइल अपडेट करावी लागेल. +This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. -आता तुम्ही तुमच्या प्रकारांमध्ये नॉन-नलेबल लिस्ट असलेल्या फील्ड्स परिभाषित करू शकत नाही. तुमच्याकडे अशी स्कीमा असल्यास: +Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: ```graphql type Something @entity { @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -तुम्हाला सूची प्रकारातील सदस्याला एक `!` जोडावे लागेल, जसे की: +You'll have to add an `!` to the member of the List type, like this: ```graphql type Something @entity { @@ -513,12 +513,12 @@ type MyEntity @entity { असेंबलीस्क्रिप्ट आवृत्त्यांमधील शून्यता भिन्नतेमुळे हे बदलले आणि ते `src/generated/schema.ts` फाइलशी संबंधित आहे (डिफॉल्ट मार्ग, तुम्ही कदाचित हे बदलले असेल). -### इतर +### Other -- संरेखित `नकाशा#सेट` आणि `सेट#जोडा` स्पेकसह, `हे` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) परत करत आहे -- अ‍ॅरे यापुढे ArrayBufferView कडून वारसा मिळत नाहीत, परंतु ते आता वेगळे आहेत ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0) -- ऑब्जेक्ट लिटरलमधून सुरू केलेले वर्ग यापुढे कन्स्ट्रक्टर परिभाषित करू शकत नाहीत ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - दोन्ही ऑपरेंड पूर्णांक असल्यास `**` बायनरी ऑपरेशनचा परिणाम आता सामान्य भाजक पूर्णांक आहे. पूर्वी, परिणाम `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) वर कॉल केल्यासारखा फ्लोट होता -- `बूल` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag) वर कास्ट करताना NaN ला false करा /v0.14.9)) +- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) - `i8`/`u8` किंवा `i16`/`u16` प्रकाराचे लहान पूर्णांक मूल्य हलवताना, फक्त 3 अनुक्रमे 4 किमान RHS मूल्याचे महत्त्वपूर्ण बिट्स परिणामावर परिणाम करतात, `i32.shl` च्या परिणामाप्रमाणेच RHS मूल्याच्या 5 सर्वात कमी महत्त्वपूर्ण बिट्सवर परिणाम होतो. उदाहरण: `someI8 << 8` ने पूर्वी `0` मूल्य तयार केले होते, परंतु आता `someI8` तयार करते कारण RHS ला `8 & 7 = 0` (3 बिट) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) - जेव्हा आकार भिन्न असतात तेव्हा रिलेशनल स्ट्रिंग तुलनांचे दोष निराकरण ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/pages/mr/release-notes/graphql-validations-migration-guide.mdx b/website/pages/mr/release-notes/graphql-validations-migration-guide.mdx index 71084a8f5f6f..25586c408e48 100644 --- a/website/pages/mr/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/mr/release-notes/graphql-validations-migration-guide.mdx @@ -1,20 +1,20 @@ --- -title: GraphQL प्रमाणीकरण स्थलांतर मार्गदर्शक +title: GraphQL Validations migration guide --- -लवकरच `ग्राफ-नोड` [GraphQL प्रमाणीकरण तपशील](https://spec.graphql.org/June2018/#sec-Validation) च्या 100% कव्हरेजला समर्थन देईल. +Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). `graph-node` च्या मागील आवृत्त्यांनी सर्व प्रमाणीकरणांना समर्थन दिले नाही आणि अधिक सुंदर प्रतिसाद दिले - म्हणून, संदिग्धतेच्या बाबतीत, `graph-node` अवैध GraphQL ऑपरेशन घटकांकडे दुर्लक्ष करत आहे. -ग्राफक्‍युएल व्हॅलिडेशन सपोर्ट हा आगामी नवीन वैशिष्‍ट्ये आणि ग्राफ नेटवर्कच्‍या स्‍केलवरील कामगिरीचा आधारस्तंभ आहे. +GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. -हे क्वेरी प्रतिसादांचे निर्धारवाद देखील सुनिश्चित करेल, ग्राफ नेटवर्कवरील एक प्रमुख आवश्यकता. +It will also ensure determinism of query responses, a key requirement on The Graph Network. -**GraphQL प्रमाणीकरण सक्षम केल्याने काही विद्यमान क्वेरी खंडित होतील** ग्राफ API वर पाठवल्या जातात. +**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. -त्या प्रमाणीकरणांचे पालन करण्यासाठी, कृपया स्थलांतर मार्गदर्शकाचे अनुसरण करा. +To be compliant with those validations, please follow the migration guide. -> ⚠️ जर तुम्ही तुमच्या शंकांचे प्रमाणीकरण लागू होण्यापूर्वी स्थलांतरित केले नाही, तर ते त्रुटी परत करतील आणि शक्यतो तुमचे फ्रंटएंड/क्लायंट खंडित करतील. +> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. ## स्थलांतर मार्गदर्शक @@ -22,41 +22,41 @@ title: GraphQL प्रमाणीकरण स्थलांतर मार > तुम्ही [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) किंवा [GraphQL कोड जनरेटर](https://the-guild.dev) वापरत असल्यास, सर्व उपग्राफ स्थलांतरित करण्याची गरज नाही /graphql/codegen), ते तुमच्या क्वेरी वैध असल्याची खात्री करतात. -## स्थलांतर CLI साधन +## Migration CLI tool -**बहुतांश GraphQL ऑपरेशन एरर तुमच्या कोडबेसमध्ये वेळेपूर्वी आढळू शकतात.** +**Most of the GraphQL operations errors can be found in your codebase ahead of time.** -या कारणास्तव, आम्ही विकासादरम्यान किंवा CI मध्ये तुमचे GraphQL ऑपरेशन्स प्रमाणित करण्यासाठी एक सहज अनुभव प्रदान करतो. +For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. [`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) हे एक साधे CLI साधन आहे जे दिलेल्या स्कीमावर GraphQL ऑपरेशन्स प्रमाणित करण्यात मदत करते. -### **सुरुवात करणे** +### **Getting started** -आपण खालीलप्रमाणे साधन चालवू शकता: +You can run the tool as follows: ```bash npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql ``` -**नोट्स:** +**Notes:** - योग्य मूल्यांसह $GITHUB_USER, $SUBGRAPH_NAME सेट किंवा पुनर्स्थित करा. जसे: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) - प्रिव्ह्यू स्कीमा URL (https://api-next.thegraph.com/) प्रदान केली आहे हे खूप प्रमाणात मर्यादित आहे आणि सर्व वापरकर्ते नवीन आवृत्तीवर स्थलांतरित झाल्यावर सूर्यास्त होईल. **उत्पादनात वापरू नका.** - खालील विस्तार [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx सह फायलींमध्ये ऑपरेशन ओळखले जातात `, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` पर्याय). -### CLI आउटपुट +### CLI output -`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI टूल खालीलप्रमाणे कोणत्याही GraphQL ऑपरेशन एरर आउटपुट करेल: +The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: -![CLI वरून त्रुटी आउटपुट](https://i.imgur.com/x1cBdhq.png) +![Error output from CLI](https://i.imgur.com/x1cBdhq.png) -प्रत्येक त्रुटीसाठी, तुम्हाला वर्णन, फाईल मार्ग आणि स्थान आणि समाधानाच्या उदाहरणाची लिंक मिळेल (खालील विभाग पहा). +For each error, you will find a description, file path and position, and a link to a solution example (see the following section). -## प्रिव्ह्यू स्कीमा विरुद्ध तुमच्या स्थानिक क्वेरी चालवा +## Run your local queries against the preview schema -आम्ही एक एंडपॉइंट प्रदान करतो `https://api-next.thegraph.com/` जी `ग्राफ-नोड` आवृत्ती चालवते ज्याची प्रमाणीकरणे चालू आहेत. +We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. -तुम्ही त्यांना येथे पाठवून क्वेरी वापरून पाहू शकता: +You can try out queries by sending them to: - `https://api-next.thegraph.com/subgraphs/id/` @@ -68,19 +68,19 @@ npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHU ## समस्यांचे निराकरण कसे करावे -खाली, तुम्हाला तुमच्या विद्यमान GraphQL ऑपरेशन्समध्ये उद्भवू शकणाऱ्या सर्व GraphQL प्रमाणीकरण त्रुटी आढळतील. +Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. -### GraphQL व्हेरिएबल्स, ऑपरेशन्स, फ्रॅगमेंट्स किंवा वितर्क अद्वितीय असणे आवश्यक आहे +### GraphQL variables, operations, fragments, or arguments must be unique -ऑपरेशनमध्ये GraphQL व्हेरिएबल्स, ऑपरेशन्स, फ्रॅगमेंट्स आणि आर्ग्युमेंट्सचा एक अद्वितीय संच समाविष्ट आहे याची खात्री करण्यासाठी आम्ही नियम लागू केले. +We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. -GraphQL ऑपरेशन फक्त वैध आहे जर त्यात कोणतीही अस्पष्टता नसेल. +A GraphQL operation is only valid if it does not contain any ambiguity. -ते साध्य करण्यासाठी, तुमच्या GraphQL ऑपरेशनमधील काही घटक अद्वितीय असले पाहिजेत याची आम्हाला खात्री करणे आवश्यक आहे. +To achieve that, we need to ensure that some components in your GraphQL operation must be unique. -या नियमांचे उल्लंघन करणाऱ्या काही अवैध ऑपरेशन्सचे येथे उदाहरण आहे: +Here's an example of a few invalid operations that violates these rules: -**डुप्लिकेट क्वेरी नाव (#UniqueOperationNamesRule)** +**Duplicate Query name (#UniqueOperationNamesRule)** ```graphql # The following operation violated the UniqueOperationName @@ -95,7 +95,7 @@ query myData { } ``` -_उपाय:_ +_Solution:_ ```graphql query myData { @@ -108,7 +108,7 @@ query myData2 { } ``` -**डुप्लिकेट फ्रॅगमेंट नाव (#UniqueFragmentNamesRule)** +**Duplicate Fragment name (#UniqueFragmentNamesRule)** ```graphql # The following operation violated the UniqueFragmentName @@ -127,7 +127,7 @@ fragment MyFields { } ``` -_उपाय:_ +_Solution:_ ```graphql query myData { @@ -145,7 +145,7 @@ fragment MyFieldsName { # assign a unique name to fragment } ``` -**डुप्लिकेट व्हेरिएबल नाव (#UniqueVariableNamesRule)** +**Duplicate variable name (#UniqueVariableNamesRule)** ```graphql # The following operation violates the UniqueVariables @@ -155,7 +155,7 @@ query myData($id: String, $id: Int) { } ``` -_उपाय:_ +_Solution:_ ```graphql query myData($id: String) { @@ -165,7 +165,7 @@ query myData($id: String) { } ``` -**डुप्लिकेट युक्तिवाद नाव (#UniqueArgument)** +**Duplicate argument name (#UniqueArgument)** ```graphql # The following operation violated the UniqueArguments @@ -176,7 +176,7 @@ query myData($id: ID!) { } ``` -_उपाय:_ +_Solution:_ ```graphql query myData($id: ID!) { @@ -186,9 +186,9 @@ query myData($id: ID!) { } ``` -**नक्कल निनावी क्वेरी (#LoneAnonymousOperationRule)** +**Duplicate anonymous query (#LoneAnonymousOperationRule)** -तसेच, दोन निनावी ऑपरेशन्स वापरल्याने प्रतिसाद संरचनेतील विरोधामुळे `LoneAnonymousOperation` नियमाचे उल्लंघन होईल: +Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: ```graphql # This will fail if executed together in @@ -202,7 +202,7 @@ query { } ``` -_उपाय:_ +_Solution:_ ```graphql query { @@ -211,7 +211,7 @@ query { } ``` -किंवा दोन प्रश्नांची नावे द्या: +Or name the two queries: ```graphql query FirstQuery { @@ -223,15 +223,15 @@ query SecondQuery { } ``` -### ओव्हरलॅपिंग फील्ड +### Overlapping Fields -GraphQL निवड संच वैध मानला जातो जर तो अंतिम परिणाम संच योग्यरित्या निराकरण करतो. +A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. -विशिष्ट निवड संच, किंवा फील्ड, निवडलेल्या फील्डद्वारे किंवा वापरलेल्या वितर्कांद्वारे संदिग्धता निर्माण करत असल्यास, ग्राफक्यूएल सेवा ऑपरेशन प्रमाणित करण्यात अयशस्वी होईल. +If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. -या नियमाचे उल्लंघन करणाऱ्या अवैध ऑपरेशन्सची येथे काही उदाहरणे आहेत: +Here are a few examples of invalid operations that violate this rule: -**विरोधी फील्ड उपनाम (#OverlappingFieldsCanBeMergedRule)** +**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** ```graphql # Aliasing fields might cause conflicts, either with @@ -245,7 +245,7 @@ query { } ``` -_उपाय:_ +_Solution:_ ```graphql query { @@ -256,7 +256,7 @@ query { } ``` -**वितर्कांसह परस्परविरोधी फील्ड (#OverlappingFieldsCanBeMergedRule)** +**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** ```graphql # Different arguments might lead to different data, @@ -269,7 +269,7 @@ query { } ``` -_उपाय:_ +_Solution:_ ```graphql query { @@ -280,7 +280,7 @@ query { } ``` -तसेच, अधिक जटिल वापर-प्रकरणांमध्ये, तुम्ही दोन तुकड्यांचा वापर करून या नियमाचे उल्लंघन करू शकता ज्यामुळे शेवटी अपेक्षित सेटमध्ये संघर्ष होऊ शकतो: +Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: ```graphql query { @@ -299,7 +299,7 @@ fragment B on Type { } ``` -त्या व्यतिरिक्त, `@skip` आणि `@include` सारख्या क्लायंट-साइड GraphQL निर्देशांमुळे संदिग्धता येऊ शकते, उदाहरणार्थ: +In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: ```graphql fragment mergeSameFieldsWithSameDirectives on Dog { @@ -308,15 +308,15 @@ fragment mergeSameFieldsWithSameDirectives on Dog { } ``` -[तुम्ही येथे अल्गोरिदमबद्दल अधिक वाचू शकता.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) +[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) -### न वापरलेले चल किंवा तुकडे +### Unused Variables or Fragments -सर्व ऑपरेशन-परिभाषित घटक (व्हेरिएबल्स, फ्रॅगमेंट्स) वापरल्यास ग्राफक्यूएल ऑपरेशन देखील वैध मानले जाते. +A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. -या नियमांचे उल्लंघन करणाऱ्या ग्राफक्यूएल ऑपरेशन्ससाठी येथे काही उदाहरणे आहेत: +Here are a few examples for GraphQL operations that violates these rules: -**न वापरलेले चल** (#NoUnusedVariablesRule) +**Unused variable** (#NoUnusedVariablesRule) ```graphql # Invalid, because $someVar is never used. @@ -325,7 +325,7 @@ query something($someVar: String) { } ``` -_उपाय:_ +_Solution:_ ```graphql query something { @@ -333,7 +333,7 @@ query something { } ``` -**न वापरलेला तुकडा** (#NoUnusedFragmentsRule) +**Unused Fragment** (#NoUnusedFragmentsRule) ```graphql # Invalid, because fragment AllFields is never used. @@ -347,7 +347,7 @@ fragment AllFields { # unused :( } ``` -_उपाय:_ +_Solution:_ ```graphql # Invalid, because fragment AllFields is never used. @@ -360,12 +360,12 @@ query something { ### अवैध किंवा गहाळ निवड-सेट (#ScalarLeafsRule) -तसेच, GraphQL फील्ड निवड केवळ खालील प्रमाणीकृत असल्यास वैध आहे: +Also, a GraphQL field selection is only valid if the following is validated: -- ऑब्जेक्ट फील्डमध्ये निवड सेट असणे आवश्यक आहे. -- एज फील्ड (स्केलर, एनम) मध्ये निवड सेट निर्दिष्ट नसावा. +- An object field must-have selection set specified. +- An edge field (scalar, enum) must not have a selection set specified. -खालील स्कीमासह या नियमांच्या उल्लंघनाची काही उदाहरणे येथे आहेत: +Here are a few examples of violations of these rules with the following Schema: ```graphql schema { @@ -384,7 +384,7 @@ schema { } ``` -**अवैध निवड-सेट** +**Invalid Selection-Set** ```graphql query { @@ -396,7 +396,7 @@ query { } ``` -_उपाय:_ +_Solution:_ ```graphql query { @@ -406,7 +406,7 @@ query { } ``` -**अवैध निवड-सेट** +**Missing Selection-Set** ```graphql query { @@ -417,7 +417,7 @@ query { } ``` -_उपाय:_ +_Solution:_ ```graphql query { @@ -430,11 +430,11 @@ query { } ``` -### चुकीची वितर्क मूल्ये (#VariablesInAllowedPositionRule) +### Incorrect Arguments values (#VariablesInAllowedPositionRule) -GraphQL ऑपरेशन्स जी हार्ड-कोडेड व्हॅल्यू वितर्कांना पास करतात ती स्कीमामध्ये परिभाषित केलेल्या मूल्यावर आधारित वैध असणे आवश्यक आहे. +GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. -या नियमांचे उल्लंघन करणाऱ्या अवैध ऑपरेशन्सची येथे काही उदाहरणे आहेत: +Here are a few examples of invalid operations that violate these rules: ```graphql query purposes { @@ -457,22 +457,22 @@ query purposes($name: Int!) { } ``` -### अज्ञात प्रकार, व्हेरिएबल, फ्रॅगमेंट किंवा डायरेक्टिव्ह (#UnknownX) +### Unknown Type, Variable, Fragment, or Directive (#UnknownX) -कोणताही अज्ञात प्रकार, व्हेरिएबल, खंड किंवा निर्देश वापरल्यास GraphQL API त्रुटी वाढवेल. +The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. -ते अज्ञात संदर्भ निश्चित करणे आवश्यक आहे: +Those unknown references must be fixed: -- टायपिंग असल्यास नाव बदला -- अन्यथा, काढा +- rename if it was a typo +- otherwise, remove ### तुकडा: अवैध स्प्रेड किंवा परिभाषा -**अवैध फ्रॅगमेंट स्प्रेड (#PossibleFragmentSpreadsRule)** +**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** -एक तुकडा लागू नसलेल्या प्रकारावर पसरवला जाऊ शकत नाही. +A Fragment cannot be spread on a non-applicable type. -उदाहरण, आम्ही `कुत्रा` प्रकारावर `मांजर` तुकडा लागू करू शकत नाही: +Example, we cannot apply a `Cat` fragment to the `Dog` type: ```graphql query { @@ -486,11 +486,11 @@ fragment CatSimple on Cat { } ``` -**अवैध फ्रॅगमेंट व्याख्या (#FragmentsOnCompositeTypesRule)** +**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** -सर्व तुकड्यांची व्याख्या (`चालू ...` वापरून) संमिश्र प्रकारावर केली जाणे आवश्यक आहे, थोडक्यात: ऑब्जेक्ट, इंटरफेस किंवा युनियन. +All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. -खालील उदाहरणे अवैध आहेत, कारण स्केलरवर तुकड्यांची व्याख्या करणे अवैध आहे. +The following examples are invalid, since defining fragments on scalars is invalid. ```graphql fragment fragOnScalar on Int { @@ -506,13 +506,13 @@ fragment inlineFragOnScalar on Dog { } ``` -### निर्देशांचा वापर +### Directives usage -**निर्देशक या ठिकाणी वापरले जाऊ शकत नाहीत (#KnownDirectivesRule)** +**Directive cannot be used at this location (#KnownDirectivesRule)** -ग्राफ API द्वारे समर्थित फक्त GraphQL निर्देश (`@...`) वापरले जाऊ शकतात. +Only GraphQL directives (`@...`) supported by The Graph API can be used. -ग्राफक्यूएल समर्थित निर्देशांसह येथे एक उदाहरण आहे: +Here is an example with The GraphQL supported directives: ```graphql query { @@ -525,11 +525,11 @@ query { _Note: `@stream`, `@live`, `@defer` are not supported._ -**निर्देशक या स्थानावर फक्त एकदाच वापरले जाऊ शकतात (#UniqueDirectivesPerLocationRule)** +**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** -The Graph द्वारे समर्थित निर्देश प्रत्येक स्थानासाठी फक्त एकदाच वापरले जाऊ शकतात. +The directives supported by The Graph can only be used once per location. -खालील अवैध (आणि अनावश्यक): +The following is invalid (and redundant): ```graphql query { diff --git a/website/pages/mr/substreams.mdx b/website/pages/mr/substreams.mdx index d0354f06bab1..b8bb1f38017e 100644 --- a/website/pages/mr/substreams.mdx +++ b/website/pages/mr/substreams.mdx @@ -1,9 +1,44 @@ --- -title: Substreams +title: उपप्रवाह --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### प्रारंभ करणे + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/mr/sunrise.mdx b/website/pages/mr/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/mr/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/mr/tokenomics.mdx b/website/pages/mr/tokenomics.mdx index 15171f4ab0a9..faeeb0770cee 100644 --- a/website/pages/mr/tokenomics.mdx +++ b/website/pages/mr/tokenomics.mdx @@ -11,7 +11,7 @@ description: ग्राफ नेटवर्क शक्तिशाली हे B2B2C मॉडेलसारखेच आहे, शिवाय ते सहभागींच्या विकेंद्रित नेटवर्कद्वारे समर्थित आहे. नेटवर्क सहभागी GRT रिवॉर्डच्या बदल्यात अंतिम वापरकर्त्यांना डेटा प्रदान करण्यासाठी एकत्र काम करतात. GRT हे कार्य उपयुक्तता टोकन आहे जे डेटा प्रदाते आणि ग्राहकांना समन्वयित करते. GRT नेटवर्कमधील डेटा प्रदाते आणि ग्राहकांमध्ये समन्वय साधण्यासाठी उपयुक्तता म्हणून काम करते आणि प्रोटोकॉल सहभागींना डेटा प्रभावीपणे व्यवस्थित करण्यासाठी प्रोत्साहन देते. -The Graph वापरून, वापरकर्ते ब्लॉकचेन वरून डेटा सहजपणे ऍक्सेस करू शकतात, फक्त त्यांना आवश्यक असलेल्या विशिष्ट माहितीसाठी पैसे देऊन. आज वेब3 इकोसिस्टममध्ये अनेक [लोकप्रिय अॅप्लिकेशन्स](https://thegraph.com/explorer) द्वारे आलेख वापरला जातो. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. Google वेब कसे अनुक्रमित करते त्याचप्रमाणे आलेख ब्लॉकचेन डेटा अनुक्रमित करतो. खरं तर, तुम्ही कदाचित आधीच The Graph वापरत असाल ते लक्षात न घेता. तुम्ही सबग्राफ वरून डेटा मिळवणाऱ्या dapp चे पुढचे टोक पाहिले असल्यास, तुम्ही सबग्राफ वरून डेटा विचारला आहे! @@ -75,7 +75,7 @@ Google वेब कसे अनुक्रमित करते त्या इंडेक्सर्स दोन प्रकारे GRT रिवॉर्ड मिळवू शकतात: -1. क्वेरी शुल्क: सबग्राफ डेटा क्वेरीसाठी विकासक किंवा वापरकर्त्यांद्वारे जीआरटी दिले जाते. क्वेरी फी एका रिबेट पूलमध्ये जमा केली जाते आणि इंडेक्सर्सना वितरित केली जाते. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. इंडेक्सिंग रिवॉर्ड्स: 3% वार्षिक जारी इंडेक्सर्सना ते अनुक्रमित करत असलेल्या सबग्राफच्या संख्येवर आधारित वितरीत केले जातात. हे रिवॉर्ड्स इंडेक्सर्सना अनुक्रमणिका सबग्राफसाठी प्रोत्साहन देतात, कधीकधी क्वेरी फी सुरू होण्यापूर्वी, त्यांनी डेटा अचूकपणे अनुक्रमित केला आहे याची पडताळणी करून प्रूफ ऑफ इंडेक्सिंग (POI) जमा करणे आणि सबमिट करणे. diff --git a/website/pages/nl/arbitrum/arbitrum-faq.mdx b/website/pages/nl/arbitrum/arbitrum-faq.mdx index b57414ede827..a9e47e65b6ba 100644 --- a/website/pages/nl/arbitrum/arbitrum-faq.mdx +++ b/website/pages/nl/arbitrum/arbitrum-faq.mdx @@ -39,9 +39,9 @@ Kern-ontwikkelingsteams werken eraan om L2 overdrachtstools te creëren die het Vanaf 10 april 2023 wordt 5% van alle indexing beloningen gemint op Arbitrum. Naarmate de netwerkparticipatie toeneemt en de Raad het goedkeurt, zullen de indexing rewards geleidelijk verschuiven van Ethereum naar Arbitrum en uiteindelijk volledig overgaan naar Arbitrum. -## Als ik wil deelnemen in het netwerk op L2, wat moet ik doen? +## If I would like to participate in the network on L2, what should I do? -Help alsjeblieft [het netwerk testen](https://testnet.thegraph.com/explorer) op L2 en geef feedback over je ervaring in [Discord](https://discord.gg/graphprotocol). +Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). ## Zijn er risico's verbonden met het schalen van het netwerk naar L2? diff --git a/website/pages/nl/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/nl/arbitrum/l2-transfer-tools-faq.mdx index 306cc7381b6b..32dfb5ca98a7 100644 --- a/website/pages/nl/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/nl/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools zijn nog niet gereleased. Het is verwacht dat deze in de zomer van 2023 beschikbaar zullen worden. +## General -## Wat zijn L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph heeft het 26 keer goedkoper gemaakt voor bijdragers om deel te nemen aan het netwerk door het protocol naar Arbitrum One over te dragen. De L2 Transfer Tools zijn gemaakt door kernontwikkelaars om de overstap naar L2 gemakkelijk te maken. Voor elke protocoldeelnemer zal er een set overdrachtshulpmiddelen worden gedeeld om de ervaring naadloos te maken bij de overstap naar L2, waarbij de behoefte aan ontdooiperiodes of handmatig opnemen en overzetten van GRT wordt vermeden. Deze tools vereisen dat je een specifieke reeks stappen volgt, afhankelijk van wat jouw rol is binnen The Graph en wat je naar L2 overdraagt. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Kan ik dezelfde wallet gebruiken als degene die ik gebruik op Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -Als je gebruik maakt van een [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet dan kan je hetzelfde adres gebruiken. Als je Ethereum mainnet wallet een contract is (bijv. een multisig) dan moet je een [Arbitrum wallet addres](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) specificeren waar de transfer naartoe gestuurd wordt. Controleer het adres met zorg aangezien elke overdracht naar een incorrect adres zal leiden naar een permanent verlies. Als je gebruik wilt maken van een multisig op L2, zorg dat je een multisig contract op Arbitrum One implementeert. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? + +If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. + +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### Wat gebeurt er als ik mijn overdracht niet binnen 7 dagen afrond? + +De L2 Transfer Tools gebruiken Arbitrum's eigen mechanismen op berichten te sturen van L1 naar L2. Dit mechanisme heet een "retryable ticket" en is gebruikt door alle eigen token bruggen, inclusief de Arbitrum GRT brug. Je kunt meer lezen over retryable tickets in de [Arbiturm docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +Wanneer je jouw activa (subgraph, inzet, delegatie of curatie) overdraagt naar L2, wordt er een bericht via de Arbitrum GRT-brug gestuurd dat een herhaalbaar ticket in L2 aanmaakt. De overdrachtstool bevat een bepaalde hoeveelheid ETH in de transactie, die gebruikt wordt om 1) te betalen voor de creatie van de ticket en 2) te betalen voor de gas voor de uitvoer van de ticket in L2. Omdat de gasprijzen kunnen variëren in de tijd tot het ticket gereed is om in L2 uit te voeren, is het mogelijk dat deze automatische uitvoerpoging mislukt. Als dat gebeurt, zal de Arbitrum-brug het herhaalbare ticket tot 7 dagen lang actief houden, en iedereen kan proberen het ticket te "inlossen" (wat een portemonnee met wat ETH dat naar Arbitrum is overgebracht, vereist). + +Dit is wat we de "Bevestigen"-stap noemen in alle overdrachtstools - deze zal in de meeste gevallen automatisch worden uitgevoerd, omdat de automatische uitvoering meestal succesvol is, maar het is belangrijk dat je terugkeert om te controleren of het is gelukt. Als het niet lukt en er zijn geen succesvolle herhaalpogingen in 7 dagen, zal de Arbitrum-brug het ticket verwerpen, en je activa (subgraph, inzet, delegatie of curatie) zullen verloren gaan en kunnen niet worden hersteld. De kernontwikkelaars van The Graph hebben een bewakingssysteem om deze situaties te detecteren en proberen de tickets in te lossen voordat het te laat is, maar uiteindelijk ben jij verantwoordelijk om ervoor te zorgen dat je overdracht op tijd is voltooid. Als je problemen hebt met het bevestigen van je transactie, neem dan contact op via [dit formulier](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) en de kernontwikkelaars zullen er zijn om je te helpen. + +### Ik ben mijn delegatie/inzet/curatie overdracht begonnen en ik ben niet zeker of deze door is gekomen naar L2, hoe kan ik bevestigen dat deze correct is overgedragen? + +Als je geen banner op je profiel ziet die vraagt om de overdracht af te ronden, dan is het waarschijnlijk dat de transactie het veilig heft gehaald naar L2 en geen acties meer vereist zijn. Als je twijfelt, kan je checken of Explorer jouw delegatie, inzet of curatie toont op Arbitrum One. + +Als je de L1 transactie-hash hebt (die je kunt vinden door naar de recente transacties in je portemonnee te kijken), kun je ook bevestigen of het "herhaalbare ticket" dat de boodschap naar L2 heeft gedragen hier is ingewisseld: https://retryable-dashboard.arbitrum.io/ - als de automatische inwisseling is mislukt, kun je ook daar je portemonnee verbinden en het inwisselen. Wees gerust dat de kernontwikkelaars ook berichten monitoren die vastlopen en zullen proberen ze in te wisselen voordat ze verlopen. ## Subgraph Overdracht -## Hoe verplaats ik mijn subgraphs? +### Hoe verplaats ik mijn subgraphs? + + Om je subgraph te verplaatsen, moet je de volgende stappen volgen: @@ -26,59 +50,151 @@ Om je subgraph te verplaatsen, moet je de volgende stappen volgen: 4. Maak het publiceren van subrgaph op Arbitrum af -5. Update Query URL (aanbeveling) +5. Update Query URL (aanbevolen) \*Let op dat je de overdracht binnen 7 dagen moet bevestigen, anders kan je subgraph verloren gaan. In de meeste gevallen zal deze stap automatisch verlopen, maar een handmatige bevestiging kan nodig zijn als er een gasprijsstijging is op Arbitrum. Als er tijdens dit proces problemen zijn, zijn er bronnen beschikbaar om te helpen: neem contact op met de ondersteuning via support@thegraph.com of op [Discord](https://discord.gg/graphprotocol). -## Waarvandaan moet ik mijn overdracht vanaf starten? +### Waarvandaan moet ik mijn overdracht vanaf starten? Je kan je overdracht starten vanaf de [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) of elke subgraph details pagina. Klik de "Transfer Subgraph" knop in de subgraph details pagina om de overdracht te starten. -## Hoe lang moet ik wachten to mijn subrgaph overgedragen is +### Hoe lang moet ik wachten to mijn subrgaph overgedragen is De overdracht duurt ongeveer 20 minuten. De Arbitrum brug werkt momenteel op de achtergrond om de brug overdracht automatisch te laten voltooien. In sommige gevallen kunnen gaskosten pieken en zul je de overdracht opnieuw moeten bevestigen. -## Is mijn subgraph nog te ontdekken nadat ik het naar L2 overgedragen heb? +### Is mijn subgraph nog te ontdekken nadat ik het naar L2 overgedragen heb? Jouw subgraph zal alleen te ontdekken zijn op het netwerk waarnaar deze gepubliceerd is. Bijvoorbeeld, als jouw subgraph gepubliceerd is op Arbitrum One, dan kan je deze alleen vinden via de Explorer op Arbitrum One en zul je deze niet kunnen vinden op Ethereum. Zorg ervoor dat je Arbitrum One hebt geselecteerd in de netwerkschakelaar bovenaan de pagina om er zeker van te zijn dat je op het juiste netwerk bent.  Na de overdracht zal de L1 subgraph als verouderd worden weergegeven. -## Moet mijn subgraph gepubliceerd zijn om deze te kunnen overdragen? +### Moet mijn subgraph gepubliceerd zijn om deze te kunnen overdragen? Om gebruik te maken van de subgraph transfer tool, moet jouw subgraph al gepubliceerd zijn op het Ethereum mainnet en moet het enige curatie-signalen hebben die eigendom zijn van de wallet die de subgraph bezit. Als jouw subgraph nog niet is gepubliceerd, wordt het aanbevolen om het direct op Arbitrum One te publiceren - de bijbehorende gas fees zullen aanzienlijk lager zijn. Als je een gepubliceerde subgraph wilt overdragen maar het eigenaarsaccount heeft nog geen enkel curatie-signalen, kun je een klein bedrag signaleren (bv.: 1 GRT) vanaf dat account; zorg ervoor dat je "auto-migrating" signalen kiest. -## Wat gebeurt er met de Ethereum mainnet versie van mijn subgraph nadat ik overdraag naar Arbitrum? +### Wat gebeurt er met de Ethereum mainnet versie van mijn subgraph nadat ik overdraag naar Arbitrum? Nadat je je subgraph naar Arbitrum hebt overgezet, zal de versie op het Ethereum mainnet als verouderd worden beschouwd. We raden aan om je query URL binnen 48 uur bij te werken. Er is echter een overgangsperiode waardoor je mainnet URL nog steeds werkt, zodat ondersteuning voor externe dapps kan worden bijgewerkt. -## Nadat ik overgedragen heb, moet ik opnieuw publiceren op Arbitrum? +### Nadat ik overgedragen heb, moet ik opnieuw publiceren op Arbitrum? Na de overdracht periode van 20 minuten, zul je de overdracht moeten bevestigen met transactie in de UI om de overdracht af te ronden, maar de transfer tool zal je hierdoor begeleiden. Jouw L1 endpoint zal worden ondersteund tijdens de overdracht periode en transitie periode hierna. Het wordt aangeraden om jouw endpoint bij te werken wanneer dit jou uitkomt. -## Zal er down-time zijn bij mijn endpoint tijdens het her-publiceren? +### Zal mijn eindpunt downtime ervaren tijdens het opnieuw publiceren? -Tijdens het gebruik van de overdracht tool zou er geen down-time moeten zijn tijdens het overdragen van jouw subgraph naar L2. Jouw L1 endpoint zal worden ondersteund tijdens de overdracht perioden en transitie periode hierna. Het wordt aangeraden om je endpoint bij te werken wanneer dit jou uitkomt. +Het is onwaarschijnlijk, maar mogelijk om een korte downtime te ervaren afhankelijk van welke Indexers de subgraph op L1 ondersteunen en of zij blijven indexen totdat de subgraph volledig ondersteund wordt op L2. -## Is het publiceren en versiebeheer hetzelfde op L2 als Ethereum mainnet? +### Is het publiceren en versiebeheer hetzelfde op L2 als Ethereum mainnet? -Ja. Zorg dat je Arbitrum One geselecteerd hebt als jou gepubliceerde netwerk tijdens het publiceren in Subrgaph Studio. In de studio, de laatste endpoint die beschikbaar is zal wijzen naar de meest recentelijk bijgewerkte versie van de subgraph. +Ja. Selecteer Arbiturm One als jou gepubliceerde netwerk tijdens het publiceren in Subrgaph Studio. In de studio, de laatste endpoint die beschikbaar is zal wijzen naar de meest recentelijk bijgewerkte versie van de subgraph. -## Zal mijn subgraphs curatie mee verplaatsen met mijn subgraph? +### Zal mijn subgraphs curatie mee verplaatsen met mijn subgraph? Als je gekozen hebt voor auto-migrating signal, dan zal 100% van je eigen curatie mee verplaatsen met jouw subgraph naar Arbitrum One. Alle curatie signalen van de subgraph zullen worden omgezet naar GRT tijdens de overdracht en alle GRT die corresponderen met jouw curatie signaal zullen worden gebruikt om signalen te minten op de L2 subgraph. Andere curators kunnen kiezen of ze hun deel van GRT kunnen opnemen, of overdragen naar L2 om signalen te minten op dezelfde subgraph. -## Kan ik nadat ik mijn subgraph overgedragen heb deze weer terug overdragen naar Ethereum mainnet? +### Kan ik nadat ik mijn subgraph overgedragen heb deze weer terug overdragen naar Ethereum mainnet? Wanneer overgedragen, zal jouw Ethereum mainnet versie van deze subgraph als verouderd worden beschouwd. Als je terug wilt gaan naar het mainnet, zul je deze opnieuw moeten implementeren en publiceren op het mainnet. Echter, het wordt sterk afgeraden om terug naar het Ethereum mainnet over te dragen gezien index beloningen uiteindelijk op Arbitrum One zullen worden verdeeld. -## Waarom heb ik gebrugd ETH nodig om mijn transactie te voltooien? +### Waarom heb ik gebrugd ETH nodig om mijn transactie te voltooien? Gas fees op Arbitrum One zijn betaald door middel van gebrugd ETH (bv. ETH dat naar Arbitrum One gebrugd is). Echter, gas fees zijn aanzienlijk lager vergeleken met Ethereum mainnet. +## Delegatie + +### Hoe draag ik mijn delegatie over? + + + +Om je delegatie over te dragen moet je de volgende stappen volgen: + +1. Start delegatie overdracht op Ethereum mainnet +2. Wacht 20 minuten op bevestiging +3. Bevestig delegatie overdracht op Arbitrum + +\*\*\*\*Je moet de transactie bevestigen om de delegatie overdracht te voltooien op Arbitrum. Deze stap moet binnen 7 dagen voltooid worden of de delegatie kan verloren gaan. In de meeste gevallen, zal deze stap automatisch verlopen, maar een handmatige bevestiging kan nodig zijn als er een gasprijsstijging is op Arbitrum. Als er problemen zijn tijdens dit process, zijn er hier hulpmiddelen: neem contact op met support via support@thegraph.com op op [Discord](https://discord.gg/graphprotocol). + +### Wat gebeurt er met mijn beloningen als ik een overdracht start met een open allocatie op Ethereum mainnet? + +Als de Indexer waaraan je gedelegeerd hebt nog steeds actief is op L1, zul je bij de overdracht naar Arbitrum eventuele delegatiebeloningen van openstaande allocaties op het Ethereum-mainnet verliezen. Dit betekent dat je de beloningen van hoogstens de laatste 28-daagse periode zult verliezen. Als je de overdracht precies plant na het sluiten van de allocaties door de Indexer, kun je ervoor zorgen dat dit het minimale bedrag is. Als je een communicatiekanaal hebt met je Indexer(s), overweeg dan om met hen te bespreken wanneer het beste moment is om de overdracht uit te voeren. + +### Wat gebeurt er als de Indexer waaraan ik gedelegeerd heb niet actief is op Arbitrum One? + +De L2 transfer tool zal alleen worden ingeschakeld als de Indexer waaraan je hebt gedelegeerd hun eigen inzet heeft overgedragen naar Arbitrum. + +### Hebben Delegatoren de optie om een andere Indexer te delegeren? + +Als je wilt delegeren naar een andere Indexer, kun je overdragen naar dezelfde Indexer op Arbitrum, daarna delegeren ongedaan maken en wachten op de dooiperiode. Hierna, kun je een andere actieve delegator kiezen om naar te delegeren. + +### Wat als ik de Indexer niet kan vinden waarnaar ik delegeer op L2? + +De L2 transfer tool zal automatisch de Indexer detecteren naar wie je eerder gedelegeerd hebt. + +### Zal ik mijn delegatie kunnen verdelen of 'spreiden' over nieuw of meerdere Indexers in plaats van de voorgaande Indexer? + +De L2 transfer tool zal altijd jouw delegatie verplaatsen naar dezelfde Indexer aan wie jij eerder gedelegeerd hebt. Zodra je bent verplaatst naar L2 kan ja, onderdelegeren, wachten op de dooi-periode, en beslissen of jij je delegering wilt splitsen. + +### Ben ik gebonden aan de dooi-periode of kan ik direct opnemen na het gebruiken van de L2 transfer tool? + +De transfer tool staat jou toe direct naar te verplaatsen naar L2. Als je wilt onderdelegeren zul je moeten wachten op de dooi-periode. Alhoewel, als een Indexer al hun inzet heeft overgedragen naar L2 zul je direct kunnen opnemen op het Ethereum Mainnet. + +### Kunnen mijn beloningen negatief beïnvloedt worden als ik mijn delegering overdraag? + +Het is verwacht dat alle netwerk deelname zal verplaatsen naar Arbitrum One in de toekomst. + +### Hoe lang duurt het voordat de overdracht van mijn delegering naar L2 voldaan is? + +Een 20-minuten bevestiging is nodig voor een delegerings-overdracht. Houd er rekening dat na de periode van 20 minuten, je stap 3 van het overdracht proces moet voltooien binnen 7 dagen. Als je dit niet doet, zou je delegering verloren gegaan kunnen. Houd ook rekening met dat de tool, in de meeste gevallen, deze stap automatisch zal voltooien. In het geval van een gefaalde auto-attempt, zul je dit handmatig moeten voltooien. Mochten er problemen omhoog komen tijdens dit proces, geen zorgen, wij zijn hier om te helpen: bereik ons via support@thegraph.com of op [Discord](https://discord.gg/graphprotocol). + +### Kan ik mijn delegering overdragen als ik gebruik maak van een GRT toekenningscontract/token lock wallet? + +Ja! Het proces is een beetje anders omdat toekenningscontract de benodigde ETH voor de L2-gas niet kunnen doorsturen, dus je moet het van tevoren storten. Als je toekenningscontract nog niet volledig is gevestigd, moet je ook eerst een tegenhanger van het toekenningscontract op L2 initialiseren en kun je alleen de delegering overzetten naar dit L2-toekenningscontract. De UI op Explorer kan je door dit proces leiden wanneer je bent verbonden met Explorer met behulp van de vesting lock-wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is er een delegering belasting? + +Nee. Ontvangen tokens op L2 zijn gedelegeerd aan de specifieke Indexer namens de gespecificeerde Delegator zonder een delegering belasting. + +### Zullen mijn ongerealiseerde beloningen overgedragen worden als ik mijn delegering overdraag? + +Ja! De enige beloningen die niet overgedragen kunnen worden zijn degene voor open allocaties, omdat deze niet zullen bestaan totdat de Indexer de allocaties sluit (doorgaans elke 28 dagen). Als je voor een tijd al aan het delegeren bent geweest zal dit enkel een fractie van de beloningen zijn. + +Op het smart contract niveau, ongerealiseerde beloningen zijn al deel van je delegering balans, dus worden deze overgedragen wanneer je jouw delegering overdraagt naar L2. + +### Is verplaatsen van delegeringen naar L2 verplicht? Is er een deadline? + +Verplaatsen van delegeringen naar L2 is niet verplicht, maar index beloningen groeien op L2 volgend de tijdlijn omschreven in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Uiteindelijk, als de raad de toenames blijft goedkeuren, zullen alle beloningen in L2 worden verdeeld en zullen er geen index beloningen zijn voor Indexers en Delegators op L1. + +### Als ik aan het delegeren ben aan een Indexer die al zijn inzet overgedragen heeft naar L2, ontvang ik dan geen beloningen meer op L1? + +Vele Indexers zijn geleidelijk inzet aan het overdragen zodat Indexers op L1 nog steeds beloningen en vergoedingen ontvangen op L1, die vervolgens gedeeld worden met Delegators. Zodra een Indexer al zijn inzet overgedragen heeft, houden zij op met opereren op L1, dus Delegators zullen geen beloningen meer ontvangen tenzij ze overdragen naar L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curatie Signaal -## Hoe draag ik mijn curatie over? +### Hoe draag ik mijn curatie over? Om je curatie over te dragen, moet je de volgende stappen volgen: @@ -90,25 +206,29 @@ Om je curatie over te dragen, moet je de volgende stappen volgen: \*indien nodig - bv. als je een contract adres gebruikt hebt. -## Hoe weet ik of de subgraph die ik cureer verplaatst is naar L2? +### Hoe weet ik of de subgraph die ik cureer verplaatst is naar L2? Bij het bekijken van de details pagina van de subgraph zal er een banner verschijnen om je te laten weten dat deze subgraph is overgedragen. Je kunt de instructies volgen om je curatie over te zetten. Deze informatie is ook te vinden op de detailspagina van elke subgraph die is overgezet. -## Wat als ik niet mijn curatie wil overdragen naar L2? +### Wat als ik niet mijn curatie wil overdragen naar L2? Wanneer een subgraph is verouderd, heb je de optie om je signaal terug te trekken. Op dezelfde manier, als een subgraph naar L2 is verhuisd, kun je ervoor kiezen om je signaal op het Ethereum-mainnet terug te trekken of het signaal naar L2 te sturen. -## Hoe weet ik of mijn curatie succesvol is overgedragen? +### Hoe weet ik of mijn curatie succesvol is overgedragen? Signaal details zullen toegankelijk zijn via Explorer ongeveer 20 minuten nadat de L2 transfer tool is gestart. -## Kan ik mijn curatie overdragen op meer dan een subgraph per keer? +### Kan ik mijn curatie overdragen op meer dan een subgraph per keer? Op dit moment is er geen bulk overdracht optie. ## Indexer Inzet -## Hoe draag ik mijn inzet over naar Arbitrum? +### Hoe draag ik mijn inzet over naar Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + Om je inzet over te dragen, moet je de volgende stappen volgen: @@ -118,9 +238,9 @@ Om je inzet over te dragen, moet je de volgende stappen volgen: 3. Bevestig inzet overdracht op Arbitrum -\*Let op dat je de overdracht binnen 7 dagen moet bevestigen, anders kan je inzet verloren gaan. In de meeste gevallen zal deze stap automatisch verlopen, maar een handmatige bevestiging kan nodig zijn als er een gasprijsstijging is op Arbitrum. Als er tijdens dit proces problemen zijn, zijn er bronnen beschikbaar om te helpen: neem contact op met de ondersteuning via support@thegraph.com of op [Discord](https://discord.gg/graphprotocol). +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Zal al mijn inzet overgedragen worden? +### Zal al mijn inzet overgedragen worden? Je kan kiezen hoeveel van inzet je wilt overdragen. Als je ervoor kiest om al je inzet in een keer over te dragen, zul je eerst openstaande allocaties moeten sluiten. @@ -128,188 +248,164 @@ Als je van plan bent on je inzet over te dragen in meerdere transacties, moet je Let op: De eerste keer dat je de overdracht tool gebruikt, moet je voldoen aan de minimale inzet vereisten op L2. Indexers moeten ten minste 100.000 GRT verzenden (bij het uitvoeren van deze functie voor de eerste keer). Als je een deel van je inzet op L1 achterlaat, moet dit ook boven het minimum van 100.000 GRT zijn en voldoende zijn (samen met je delegaties) om je openstaande allocaties te dekken. -## Hoe lang heb ik om mijn inzet overdracht te bevestigen naar Arbitrum? +### Hoe lang heb ik om mijn inzet overdracht te bevestigen naar Arbitrum? \*\*\* Je moet jouw transactie bevestigen om je inzet overdracht te voltooien op Arbitrum. Deze stap moet voltooid worden binnen 7 dagen of je inzet kan verloren gaan. -## Wat als ik allocaties open heb staan? +### Wat als ik allocaties open heb staan? Als je niet al je inzet verstuurt, zal de L2-overdrachtstool valideren dat er ten minste het minimum van 100k GRT achterblijft op het Ethereum mainnet en dat je resterende inzet en delegaties voldoende zijn om eventuele openstaande allocaties te dekken. Mogelijk moet je openstaande allocaties sluiten als je GRT-saldo de minimumvereisten + openstaande allocaties niet dekt. -## Gebruik makend van de transfer tools, is het noodzakelijk om 28 dagen te wachten om je inzet op heffen op Ethereum mainnet voor het overdragen? +### Gebruik makend van de transfer tools, is het noodzakelijk om 28 dagen te wachten om je inzet op heffen op Ethereum mainnet voor het overdragen? Nee, je kan je inzet direct naar L2 overdragen, het is niet nodig om je inzet op te heffen en wachten voordat je gebruik maakt van de transfer tool. De 28 dagen wachttijd is alleen van toepassing als je de inzet wilt opnemen naar je wallet, op Ethereum mainnet of L2. -## Hoe lang zal het duren om mijn inzet over te dragen? +### Hoe lang zal het duren om mijn inzet over te dragen? Het duurt ongeveer 20 minuten voordat de L2-overdrachtstool je inzet heeft overgezet. -## Moet ik indexeren op Arbitrum voordat ik mijn inzet overdraag? +### Moet ik indexeren op Arbitrum voordat ik mijn inzet overdraag? Je kunt je inzet effectief overdragen voordat je indexing opzet, maar je zult geen beloningen kunnen claimen op L2 totdat je toewijst aan subgraphs op L2, ze indexeert en POI's presenteert. -## Kunnen Delegators hun delegatie overdragen voordat ik mijn index inzet overdraag? - -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### Kunnen Delegators hun delegatie overdragen voordat ik mijn index inzet overdraag? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +Nee, om Delegators in staat te stellen hun gedelegeerde GRT naar Arbitrum over te zetten, moet de Indexer waaraan ze delegeren actief zijn op L2. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Kan ik mijn inzet overdragen als ik een GRT toekenningscontract / token lock-wallet gebruik? -## Delegation +Ja! Het proces is een beetje anders, omdat toekenningscontract de benodigde ETH voor de L2-gas niet kunnen doorsturen, dus je moet het van tevoren storten. Als je toekenningscontract nog niet volledig is toegekend, moet je ook eerst een tegenhanger van het toekenningscontract op L2 initialiseren en kun je alleen de inzet overzetten naar dit L2-toekenningscontract. De UI op Explorer kan je door dit proces leiden wanneer je bent verbonden met Explorer met behulp van de toekenning lock-wallet. -## How do I transfer my delegation? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -To transfer your delegation, you will need to complete the following steps: +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -1. Initiate delegation transfer on Ethereum mainnet +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -2. Wacht 20 minuten op bevestiging - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? - -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. - -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? - -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. - -## What if I can't find the Indexer I'm delegating to on L2? +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +## Toekenningscontract Overdracht -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +### Hoe draag ik mijn toekenningscontract over? -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +Om je toekenning over te dragen, moet je de volgende stappen volgen: -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +1. Start de toekennings transfer op Ethereum mainnet -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? +2. Wacht 20 minuten op bevestiging -It is anticipated that all network participation will move to Arbitrum One in the future. +3. Bevestig toekenning transfer op Arbitrum -## How long does it take to complete the transfer of my delegation to L2? +### Hoe draag ik mijn toekenningscontract over als ik alleen deels toegekend ben? -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +1. Stort wat ETH in de transfer tool contract (UI kan helpen met het schatten van een redelijke hoeveelheid) -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +2. Stuur wat gesloten GRT via de transfer tool contract naar L3 om de L2 vesting lock te starten. Deze zal ook de begunstigde adres instellen. -## Is there any delegation tax? +3. Stuur de inzet/delegering naar L2 door de "gesloten" transfer tool functies in de L1Staking contract. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +4. Neem overgebleven ETH op van de transfer tool contract -## Vesting Contract Transfer +### Hoe draag ik mijn vesting contract over als ik volledig gevest ben? -## How do I transfer my vesting contract? + -To transfer your vesting, you will need to complete the following steps: +Voor die volledig toegekend zijn is het proces vergelijkbaar: -1. Initiate the vesting transfer on Ethereum mainnet +1. Stort wat ETH in de transfer tool contract (UI kan helpen met het schatten van een redelijke hoeveelheid) -2. Wacht 20 minuten op bevestiging +2. Stel jouw L2 adres in met een vraag aan de transfer tool contract -3. Confirm vesting transfer on Arbitrum +3. Stuur jouw inzet/delegering naar L2 door de "gesloten" transfer tool functies in de L1 Staking contract. -## How do I transfer my vesting contract if I am only partially vested? +4. Neem overgebleven ETH op van de transfer tool contract -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### Kan ik mijn toekenningscontract overdragen naar Arbitrum? -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +Je kan je toekenninscontract's GRT-balans overdragen aan een toekenningscontract in L2. Dit is een vereiste voor inzet overdracht of delegering van jouw toekenningscontract naar L2. De toekennignscontract met een niet nul hoeveelheid aan GRT bevatten (je kunt kleine hoeveelheden zoals 1 GRT overdragen indien nodig). -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Wanneer jij GRT overdraagt van jouw L1 toekenningscontract naar L2, kan je de hoeveelheid om te sturen kiezen en dit doen zo vaak je wilt. De L2 toekenningscontract zal gestart worden de eerste keer dat je GRT overdraagt. -4. Withdraw any remaining ETH from the transfer tool contract +De overdrachten worden gedaan door middel van de Transfer Tool die zichtbaar zullen zijn op jouw Explorer profiel zodra je verbindt met de toekenninsgcontract account. -## How do I transfer my vesting contract if I am fully vested? +Houd rekening dat je niet GRT kunt opnemen/vrijlaten van de L2 toekenningscontract to het einde van je toekenningstijdlijn wanneer je contract volledig is toegekend. Als jij je GRT moet vrijlaten voor dat moment, kan jij je GRT weer terug overdragen naar de L1 toekenningscontract door middel van een andere tool die beschikbaar is voor deze toepassing. -For those that are fully vested, the process is similar: +Als je nog niet toekenningscontract balans overgedragen hebt naar L2, en je contract is volledig toegekend, moet je niet je toekenningscontract overdragen naar L2. In plaats daarvan kan je de transfer tool gebruiken om een L2 wallet adres in te stellen, en direct je inzet of delegering over te dragen naar deze reguliere wallet op L2. -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### Ik gebruik mijn toekenningscontract om in te zetten op mainnet. Kan ik mijn inzet overdragen naar Arbitrum? -2. Set your L2 address with a call to the transfer tool contract +Ja, maar als je contract nog aan het toekennen is, kan je alleen de inzet overdragen die van jouw toekenningscontract is. Je moet eerst deze L2 contract starten door GRT balans over te dragen door gebruik te maken van de toekenningscontract overdracht tool op Explorer. Als jouw contract volledig overgedragen is, kun je jouw inzeten overdragen naar elk adres in L2, maar je moet deze op voorhand instellen en ETH storten om de L2 tool te laten betalen voor L2 gas. -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +### Ik gebruik mijn toekenningscontract om te delegeren op mainnet. Kan ik mijn delegeringen overdragen naar Arbitrum? -4. Withdraw any remaining ETH from the transfer tool contract +Ja, maar als je contract nog aan het toekennen is, kan je alleen de delegering overdragen die van jouw toekenningscontract is. Je moet eerst deze L2 contract starten door GRT balans over te dragen door gebruik te maken van de toekenningscontract overdracht tool op Explorer. Als jouw contract volledig overgedragen is, kun je jouw delegering overdragen naar elk adres in L2, maar je moet deze op voorhand instellen en ETH storten om de L2 tool te laten betalen voor L2 gas. -## Can I transfer my vesting contract to Arbitrum? +### Kan ik een andere begunstigde specificeren voor mijn toekenninscontract op L2? -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +Ja, de eerste keer dat je balans overdraagt en je L2 toekennings contract opzet moet je een L2 begunstigde opgeven. Zorg dat de begunstigde een wallet is waarmee je transacties op Arbitrum One kunt uitvoeren, bijv. het moet een EOA of een multisig zijn ingezet op Arbitrum One. -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +Als je contract volledig toegekend is, zul je niet een toekenningscontract opzetten op L2; in plaats daarvan, zul je een L2 wallet adres instellen en zal deze de ontvangende wallet zijn voor jouw inzet of delegering op Arbitrum. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +### Mijn contract is volledig toegekend. Kan ik mijn inzet of delegering overdragen naar een ander adres dat niet een L2 toekenning contract is? -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Ja. Als je nog niet toekenningscontract balans overgedragen hebt naar L2, en je contract is volledig toegekend, moet je niet je toekenningscontract overdragen naar L2. In plaats daarvan kan je de transfer tool gebruiken om een L2 wallet adres in te stellen, en direct je inzet of delegering over te dragen naar deze reguliere wallet op L2. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Dit staat je toe om je inzet of delegering over te dragen naar elk L2 adres. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### Mij toekenning contract is nog steeds toe aan het kennen. Hoe draag ik mijn toekenning balans over naar L2? -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +Deze stappen zijn alleen van toepassing als je contract nog steeds toe aan het kennen is, of als je dit proces gebruikt hebt toen je contract nog toe aan het kennen was. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +Om je toekenning contract over te dragen naar L2, zul je elke GRT-balans naar L2 sturen door middel van de transfer tools die je L2 toekenning contract zullen starten: -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +1. Stort wat ETH in de transfer tool contract (deze zal gebruikt worden om je L2 gas te financieren) -## Can I specify a different beneficiary for my vesting contract on L2? +2. Hef protocol toegang naar de toekenning contract op (benodigd voor de volgende stap) -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +3. Geef protocol toegang aan het toekenningscontract (laat jouw contract interactie te hebben met de overdracht tool) -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +4. Specifieer een L2 begunstigde adres\* en start de balans overdracht op Ethereum mainnet -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? - -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +5. Wacht 20 minuten op bevestiging -This allows you to transfer your stake or delegation to any L2 address. +6. Bevestig de balans overdracht op L2 -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +\*indien nodig - bv. als je een contract adres gebruikt hebt. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. -2. Revoke protocol access to the vesting contract (needed for the next step) +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. -5. Wacht 20 minuten op bevestiging +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ -6. Confirm the balance transfer on L2 +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? -\*indien nodig - bv. als je een contract adres gebruikt hebt. +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. -## Can I move my vesting contract back to L1? +### Kan ik mijn toekenning contract terug verplaatsen naar L1? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +Er is geen nood om dit te doen omdat je toekenning contract nog steeds in L1 is. Als jee gebruik maakt van de transfer tools dan creëer je een nieuw contract in L2 dat verbonden is met je L1 toekenning contract, en je kan GRT tussen deze twee sturen. -## Why do I need to move my vesting contract to begin with? +### Waarom moet ik überhaupt mijn toekenning contract verplaatsen om mee te beginnen? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +Je moet een L2 toekenning contract opzetten zodat deze account bezit kan nemen van jouw inzet of delegering op L2. Anders is er geen mogelijkheid voor jou om inzet/delegering over te dragen naar L2 zonder te "ontsnappen" van de toekenning contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Wat gebeurt er als ik mijn contract uit probeer te betalen als deze gedeeltelijk toegekend is? Is dit mogelijk? -This is not a possibility. You can move funds back to L1 and withdraw them there. +Dit is geen mogelijkheid. Je kan fondsen terug naar L1 overdragen en van daar opnemen. -## What if I don't want to move my vesting contract to L2? +### Wat als ik niet mijn toekenning contract wil verplaatsen naar L2? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +Je kan blijven inzetten/delegeren op L1. Naar mate de tijd vordert zou je kunnen overwegen om over te dragen naar L2 om daar beloningen in te schakelen naarmate het protocol opschaalt op Arbitrum. Houd er rekening mee dat deze tools voor toekenning contracten zijn die in mogen zetten en delegeren in het protocol. Als jouw contract inzetten of delegeren niet toelaat, of herroproepbaar is, dan zijn er geen transfer tools beschikbaar. Je kan nog steeds GRT opnemen van L1 indien beschikbaar. diff --git a/website/pages/nl/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/nl/arbitrum/l2-transfer-tools-guide.mdx index fbd72c878899..11b9ba5a10ef 100644 --- a/website/pages/nl/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/nl/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools zijn nog niet gereleased. Het is verwacht dat deze in de zomer van 2023 beschikbaar zullen worden. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/nl/billing.mdx b/website/pages/nl/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/nl/billing.mdx +++ b/website/pages/nl/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/nl/chain-integration-overview.mdx b/website/pages/nl/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/nl/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/nl/cookbook/arweave.mdx b/website/pages/nl/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/nl/cookbook/arweave.mdx +++ b/website/pages/nl/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/nl/cookbook/grafting.mdx b/website/pages/nl/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/nl/cookbook/grafting.mdx +++ b/website/pages/nl/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/nl/cookbook/near.mdx b/website/pages/nl/cookbook/near.mdx index 879e8e5c15aa..304e1202e278 100644 --- a/website/pages/nl/cookbook/near.mdx +++ b/website/pages/nl/cookbook/near.mdx @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/nl/cookbook/upgrading-a-subgraph.mdx b/website/pages/nl/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/nl/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/nl/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/nl/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/nl/deploying/hosted-service.mdx b/website/pages/nl/deploying/hosted-service.mdx index 2e6093531110..3b65cfbccdf0 100644 --- a/website/pages/nl/deploying/hosted-service.mdx +++ b/website/pages/nl/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/nl/deploying/subgraph-studio.mdx b/website/pages/nl/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/nl/deploying/subgraph-studio.mdx +++ b/website/pages/nl/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/nl/developing/creating-a-subgraph.mdx b/website/pages/nl/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/nl/developing/creating-a-subgraph.mdx +++ b/website/pages/nl/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/nl/developing/developer-faqs.mdx b/website/pages/nl/developing/developer-faqs.mdx index 0b925a79dce2..053853897a41 100644 --- a/website/pages/nl/developing/developer-faqs.mdx +++ b/website/pages/nl/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/nl/developing/graph-ts/api.mdx b/website/pages/nl/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/nl/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/nl/developing/graph-ts/common-issues.mdx b/website/pages/nl/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/nl/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/nl/developing/supported-networks.mdx b/website/pages/nl/developing/supported-networks.mdx index 58ce56345f7c..cd82305bfce2 100644 --- a/website/pages/nl/developing/supported-networks.mdx +++ b/website/pages/nl/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/nl/firehose.mdx b/website/pages/nl/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/nl/firehose.mdx +++ b/website/pages/nl/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/nl/glossary.mdx b/website/pages/nl/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/nl/glossary.mdx +++ b/website/pages/nl/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/nl/graphcast.mdx b/website/pages/nl/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/nl/graphcast.mdx +++ b/website/pages/nl/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/nl/index.json b/website/pages/nl/index.json index 9e28e13d5001..ddbbb68445fe 100644 --- a/website/pages/nl/index.json +++ b/website/pages/nl/index.json @@ -23,8 +23,8 @@ "description": "Use Studio to create subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/nl/mips-faqs.mdx b/website/pages/nl/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/nl/mips-faqs.mdx +++ b/website/pages/nl/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/nl/network/benefits.mdx b/website/pages/nl/network/benefits.mdx index 839a0a7b9cf7..945586f14180 100644 --- a/website/pages/nl/network/benefits.mdx +++ b/website/pages/nl/network/benefits.mdx @@ -1,96 +1,97 @@ --- -title: The Graph Network vs. Self Hosting +title: Het Graph Netwerk vs. Zelf Hosten socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +Het gedecentraliseerde netwerk van The Graph is ontworpen en verfijnd om een robuuste ervaring te creëren bij het indexeren en opvragen van data. Het netwerk wordt iedere dag sterker door de duizenden bijdragers wereldwijd. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +De voordelen van dit gedecentraliseerde protocol is dat het niet gerepliceerd kan worden door een `graph-node` lokaal te laten werken. Het Graph Netwerk is betrouwbaarder, efficiënter en goedkoper. -Here is an analysis: +Hier is een analyse: -## Why You Should Use The Graph Network +## Waarom jij het Graph Network zou moeten gebruiken -- 60-98% lower monthly cost -- $0 infrastructure setup costs -- Superior uptime -- Access to 438 Indexers (and counting) -- 24/7 technical support by global community +- 60-98% lagere maandelijkse kosten +- Geen kosten voor het opzetten van infrastructuur +- Superieure beschikbaarheid +- Access to hundreds of independent Indexers around the world +- 24/7 technische ondersteuning door de wereldwijde Graph Netwerk gemeenschap -## The Benefits Explained +## De voordelen uitgelegd -### Lower & more Flexible Cost Structure +### Lagere & meer flexibele kostenstructuur -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. +Geen contracten. Geen maandelijkse kosten. Betaal alleen voor de queries die je gebruikt, met een gemiddelde prijs per query van $ 0.0002. Queries hebben een waarde in USD en worden betaald in de equivalente hoeveelheid GRT. -Query costs may vary; the quoted cost is the average at time of publication (December 2022). +Kosten per query kunnen variëren; de genoemde kosten zijn het gemiddelde op het moment van publicatie (december 2022). -## Low Volume User (less than 30,000 queries per month) +## Gebruiker met een laag volume (minder dan 30.000 queries per maand) -| Cost Comparison | Self Hosted | Graph Network | +| Kostenvergelijking | Zelf hosten | Graph Netwerk | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | ~$15 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | -| Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | ~$15 | - -## Medium Volume User (3,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $0+ | ~$15 per maand | +| Engineering time | $400 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 30000 (schaalt automatisch) | +| Kosten per query | $0 | $0,0005 | +| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $750+ per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $750+ | ~$15 | + +## Gebruiker met een gemiddeld volume (3.000.000+ queries per maand) + +| Kostenvergelijking | Zelf hosten | Graph Netwerk | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $750 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 3,000,000+ | -| Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $750 | - -## High Volume User (30,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Maandelijkse serverkosten | $350 per maand | $0 | +| Querykosten | $500 per maand | $750 per maand | +| Engineering time | $800 per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 3000000+ | +| Kosten per query | $0 | $0,00025 | +| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | +| Technische personeelskosten | $200 per uur | Inbegrepen | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $1650+ | $750 | + +## Gebruiker met een hoog volume (30.000.000+ query's per maand) + +| Kostenvergelijking | Zelf hosten | Graph Netwerk | | :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $4,500 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000,000+ | -| Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $4,500 | - -\*including costs for backup: $50-$100 per month +| Maandelijkse serverkosten | $1100 per maand, per node | $0 | +| Querykosten | $4000 | $4500 per maand | +| Aantal benodigde nodes | 10 | Niet van toepassing | +| Engineering time | $6000 of meer per maand | Geen, deze kosten worden opgevangen door het wereldwijd gedistribueerde netwerk van indexeerders | +| Aantal queries per maand | Beperkt tot infrastructuurcapaciteiten | 30000000+ | +| Kosten per query | $0 | $0.00015 | +| Infrastructuur | Gecentraliseerd | Gedecentraliseerd | +| Geografische redundantie | $1200 totale kosten per extra node | Inbegrepen | +| Uptime | Wisselend | 99,9%+ | +| Totale maandelijkse kosten | $11000+ | $4500 | + +\*inclusief kosten voor een back-up: $50-$100 per maand Engineering time based on $200 per hour assumption -using the max query budget function in the budget billing tab, while maintaining high quality of service +gebruikmakend van de maximale querybudgetfunctie in het budgetfacturatietabblad, met behoud van hoge +kwaliteit van de service -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. +Geschatte kosten gelden alleen voor Ethereum Mainnet-subgraphs - de kosten zijn nog hoger bij zelfhosting van een `graph-node` op andere netwerken. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Signaal cureren op een subgraph is een optionele eenmalige, kostenneutrale actie (bijv. $1000 aan signaal kan worden gecureerd op een subgraph en later worden opgenomen - met het potentieel om rendementen te verdienen tijdens het proces). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Sommige gebruikers moeten mogelijk hun subgraph bijwerken naar een nieuwe versie. Vanwege Ethereum-gaskosten kost een update ~$50 op het moment van schrijven. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +Let op: gaskosten op [Arbitrum](/arbitrum/arbitrum-faq) zijn aanzienlijk lager dan op het Ethereum-mainnet. -## No Setup Costs & Greater Operational Efficiency +## Geen voorafgaande kosten & grotere operationele efficiëntie -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Geen voorafgaande kosten. Begin direct zonder voorafgaande- of overheadkosten. Geen hardwarevereisten. Geen uitval door gecentraliseerde infrastructuur, en meer tijd om je te concentreren op je kernproduct. Geen noodzaak voor back-up servers, probleemoplossing of dure engineeringtijd. -## Reliability & Resiliency +## Betrouwbaarheid & Veerkrachtigheid -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +Samenvattend: Het Graph Network is goedkoper, gemakkelijker te gebruiken en levert superieure resultaten in vergelijking met het lokaal hosten van een `graph-node`. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Begin vandaag nog met het gebruik van The Graph Network en leer hoe je je subgraph kunt [upgraden naar het gedecentraliseerde netwerk van The Graph](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/nl/network/curating.mdx b/website/pages/nl/network/curating.mdx index 797d9b9dd896..8a12ceba709a 100644 --- a/website/pages/nl/network/curating.mdx +++ b/website/pages/nl/network/curating.mdx @@ -2,95 +2,95 @@ title: Curating --- -Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. +Curatoren spelen een cruciale rol in de gedecentraliseerde economie van The Graph. Ze gebruiken hun kennis van het web3-ecosysteem om signalen af te geven op de subgraphs die geïndexeerd moeten worden door The Graph Network. Via de Graph Explorer kunnen curatoren netwerkgegevens bekijken om signaleringsbeslissingen te nemen. Het Graph Network beloont curatoren die signalen afgeven op subgraphs van goede kwaliteit door een deel van de querykosten die door deze subgraphs worden gegenereerd te verdelen over de curatoren. Curatoren worden beloond als ze vroeg signaleren. Deze signaleringen van curatoren zijn belangrijk voor Indexeerders, die vervolgens de gegevens van deze gesignaleerde subgraphs kunnen verwerken of indexeren. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +Bij het signaleren kunnen curatoren beslissen om een signaal af te geven voor een specifieke versie van de subgraph of om te signaleren via automatische migratie. Wanneer de curator kiest om te signaleren via automatische migratie worden de aandelen van de curator altijd gemigreerd naar de nieuwste versie van de subgraph. Wanneer de curator kiest om te signaleren op een specifieke versie van een subgraph, blijven de aandelen altijd op deze specifieke versie. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Onthoud dat curatie risico's met zich meebrengt. Doe alsjeblieft je eigen onderzoek om ervoor te zorgen dat je signaleert op subgraphs die je vertrouwt. Het maken van een subgraph is permissieloos, dus mensen kunnen subgraphs maken en ze elke gewenste naam geven. Voor meer begeleiding over risico's van curatie, bekijk de [Curatie Gids van The Graph Academy.](https://thegraph.academy/curators/) ## Bonding Curve 101 -First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. +Laten we aan het begin beginnen: Elke subgraph heeft een bonding curve, op die bonding curve worden curatie-aandelen gemunt wanneer een gebruiker een signaal toevoegt **aan** de curve. De bonding curve van elke subgraph is uniek. De bonding curves zijn ontworpen zodat de prijs om een curatie-aandeel op een subgraph te muntten lineair stijgt over het aantal gemunte aandelen. -![Price per shares](/img/price-per-share.png) +![Prijs per aandeel](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +Hierdoor neemt de prijs lineair toe, wat betekent dat het in de loop van de tijd duurder wordt om een curatie-aandeel te kopen. Voor een voorbeeld van wat we precies bedoelen, zie de bonding curve hieronder: -![Bonding curve](/img/bonding-curve.png) +![Bonding Curve](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Stel dat we twee curatoren hebben die curatie-aandelen voor een subgraph maken: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. -- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. +- Curator A is de eerste die signaleert op de subgraph. Door 120.000 GRT aan de curve toe te voegen, kan hij 2000 aandelen maken. +- Curator B signaleert op een later moment op de subgraph. Om dezelfde hoeveelheid aandelen als Curator A te krijgen, zou hij 360.000 GRT aan de curve moeten toevoegen. +- Aangezien beide curatoren de helft van het totale aantal curatie-aandelen hebben, zullen ze een gelijke hoeveelheid curator royalties ontvangen. +- Als een van de curatoren nu zijn 2000 curatie-aandelen zou "burnen", zou hij 360.000 GRT ontvangen. +- De overgebleven curator zou nu alle curator royalties voor die subgraph ontvangen. Als ze hun aandelen zouden "burnen" om GRT op te nemen, zouden ze 120.000 GRT ontvangen. +- **Samenvattend:** De GRT-waardering van curatie-aandelen wordt bepaald door de bonding curve en kan volatiel zijn. Er is een mogelijkheid om grote verliezen te leiden. Vroeg signaleren betekent dat je minder GRT inlegt voor elk aandeel. Als gevolg hiervan verdien je meer curator royalties per GRT dan latere curatoren voor dezelfde subgraph. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +In het algemeen is een bonding curve een wiskundige curve die de relatie tussen tokenaanbod en prijs definieert. In het specifieke geval van subgraphcuratie **stijgt de prijs van elk subgraphaandeel met elke geïnvesteerde token** en **daalt de prijs van elk aandeel met elke verkochte token.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +In het geval van The Graph wordt gebruik gemaakt van de implementatie van een [bonding curve-formule van Bancor.](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA). -## How to Signal +## Hoe werkt het Signaleren -Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) +Nu we de basis van de werking van de bonding curve hebben behandeld, is dit hoe je te werk gaat om een signaal af te geven op een subgraph. Binnen het Curator-tabblad op de Graph Explorer kunnen curatoren signalen afgeven en intrekken voor een bepaalde subgraph op basis van netwerkstatistieken. Voor een stapsgewijs overzicht over hoe dit werkt in de Explorer, [klik hier.](/network/explorer) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Een curator kan ervoor kiezen om een signaal af te geven voor een specifieke subgraph versie, of ze kunnen ervoor kiezen om hun signaal automatisch te laten migreren naar de nieuwste versie van de subgraph. Beide strategieën hebben voordelen en nadelen. -Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Signaleren voor een specifieke versie is vooral handig wanneer één subgraph door meerdere dApps wordt gebruikt. Sommige dApps moeten de subgraph mogelijk regelmatig bijwerken met nieuwe functies. Andere dApps hebben mogelijk de voorkeur om een oudere, goed geteste subgraphversie te gebruiken. Bij de initiële curatie wordt een standaardbelasting van 1% in rekening gebracht. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Automatische migratie van je signalering naar de nieuwste subgraphversie kan waardevol zijn om ervoor te zorgen dat je querykosten blijft ontvangen. Elke keer dat je signaleert, wordt een curatiebelasting van 1% in rekening gebracht. Je betaalt ook een curatiebelasting van 0,5% bij elke migratie. Subgraphontwikkelaars worden ontmoedigd om vaak nieuwe versies te publiceren - ze moeten een curatiebelasting van 0,5% betalen voor alle automatisch gemigreerde curatie-aandelen. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Opmerking**: Het eerste adres dat op een bepaalde subgraph signaleert, wordt beschouwd als de eerste curator en moet veel meer gasintensief werk doen dan de rest van de volgende curatoren, omdat de eerste curator de curatie-aandeeltokens initialiseert, de bonding curve initialiseert en ook tokens overdraagt naar de Graph-proxy. -## What does Signaling mean for The Graph Network? +## Wat betekent Signalering voor het Graph Netwerk? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +Voordat consumenten queries kunnen opvragen van een subgraph, moet de subgraph eerst worden geïndexeerd. Indexeren is een proces waarbij bestanden, gegevens en metadata worden bekeken, gecatalogiseerd en vervolgens worden geïndexeerd zodat resultaten sneller kunnen worden gevonden. Om de gegevens van een subgraph doorzoekbaar te maken, moet hij duidelijk georganiseerd worden. -And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. +En dus, als Indexeerders zouden moeten raden welke subgraphs ze moeten indexeren, zou de kans klein zijn dat ze substantiële querykosten zouden verdienen, omdat ze geen manier zouden hebben om te valideren welke subgraphs van goede kwaliteit zijn. Hier komt curatie goed van pas. -Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! +Curatoren maken het Graph netwerk efficiënt en signalering is het proces dat curatoren gebruiken om Indexeerders te laten weten dat een subgraph goed is om te indexeren, waarbij GRT wordt toegevoegd aan een bonding curve voor een subgraph. Indexeerders kunnen het signaal van een curator vertrouwen omdat curatoren bij het signaleren een curatieaandeel voor de subgraph krijgen, waardoor ze een deel van de toekomstige querykosten die de subgraph genereert ontvangen. Het signaal van de curator wordt weergegeven als ERC20-tokens genaamd Graph Curation Shares (GCS). Curatoren die meer querykosten willen verdienen, moeten hun GRT signaleren naar subgraphs waarvan ze voorspellen dat ze een sterke stroom aan query's zullen genereren. Curatoren kunnen niet worden gestraft voor slecht gedrag, maar er is een stortingsbelasting voor curatoren om slechte besluitvorming te ontmoedigen die de integriteit van het netwerk zou kunnen schaden. Curatoren verdienen ook minder querykosten als ze ervoor kiezen om te cureren op een subgraphs van lage kwaliteit, omdat er minder query's zijn om te verwerken of minder Indexers om die vragen te verwerken. Zie het onderstaande diagram! -![Signaling diagram](/img/curator-signaling.png) +![Signaleringsafbeelding](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +Indexeerders kunnen subgraphs vinden om te indexeren op basis van curatiesignalen die ze zien in The Graph Explorer (zie onderstaande schermafbeelding). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Verken Subgraphs](/img/explorer-subgraphs.png) -## Risks +## Risico's -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. -3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. De querymarkt is nog jong bij het Graph Netwerk en er bestaat een risico dat je %APY lager kan zijn dan je verwacht door opkomende marktdynamiek. +2. Curatiekosten - wanneer een Curator GRT signaleert voor een subgraph, betalen ze een curatiebelasting van 1%. Deze vergoeding wordt verbrand en de rest wordt gedeponeerd in de reservevoorraad van de bonding curve. +3. Wanneer curators hun aandelen verbranden om GRT op te nemen, wordt de GRT-waardering van de overgebleven aandelen verminderd. Let op dat curatoren in sommige gevallen kunnen besluiten om hun aandelen **in één keer** te verbranden. Deze situatie kan voorkomen als een dApp-ontwikkelaar stopt met het verbeteren van een subgraph en geen queries meer naar de subgraph stuurt, of als een subgraph faalt. Als gevolg hiervan kunnen overgebleven curators mogelijk slechts een fractie van hun oorspronkelijke GRT opnemen. Voor een netwerkrol met een lager risicoprofiel, zie [Delegators](/network/delegating). +4. Een subgraph kan stuk gaan door een bug. Een subgraph die stuk is gegenereerd geen querykosten. Als gevolg hiervan moet je wachten tot de ontwikkelaar de bug repareert en een nieuwe versie implementeert. + - Als je bent geabonneerd op de nieuwste versie van een subgraph, worden je curatieaandelen automatisch gemigreerd naar die nieuwe versie. Er is een curatiebelasting van 0,5%. + - Als je hebt gesignaleerd op een specifieke subgraphversie en deze stuk gaat, moet je je curatieaandelen handmatig verbranden. Houd er rekening mee dat je mogelijk meer of minder GRT ontvangt dan je aanvankelijk hebt gestort in de curatiecurve, wat een risico is dat gepaard gaat met curatie. Je kunt vervolgens signaleren op de nieuwe subgraphversie, je zult hier een curatiebelasting van 1% betalen. -## Curation FAQs +## Veelgestelde Vragen over Curatie -### 1. What % of query fees do Curators earn? +### Welk percentage van de querykosten verdienen curatoren? -By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Door te signaleren op een subgraph, verdien je een deel van alle querykosten die deze subgraph genereert. 10% van alle querykosten gaat naar de curatoren pro rata naar hun hoeveelheid curatieaandelen. Deze 10% kan door governance veranderen de toekomst. -### 2. How do I decide which subgraphs are high quality to signal on? +### Hoe bepaal ik welke subgraphs van hoge kwaliteit zijn om op te signaleren? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Het vinden van subgraphs van hoge kwaliteit is een complexe taak, maar het kan op vele verschillende manieren worden benadert. Als curator wil je op zoek gaan naar betrouwbare subgraphs die veel queries genereren. Een betrouwbare subgraph kan waardevol zijn als deze compleet, nauwkeurig is en voldoet aan de gegevensbehoeften van een dApp. Een slecht geconstrueerde subgrafiek moet mogelijk worden herzien of opnieuw worden gepubliceerd en kan ook stuk gaan. Het is essentieel voor curatoren om de architectuur of code van een subgraph te beoordelen om te bepalen of een subgraph waardevol is. Als gevolg daarvan kunnen curatoren: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Hun begrip van een netwerk gebruiken om te proberen voorspellen hoe een individuele subgraph in de toekomst mogelijk een hoger of lager queryvolume zal genereren +- De metrics begrijpen die beschikbaar zijn via The Graph Explorer. Metrics zoals historisch queryvolume en wie de subgraphontwikkelaar is, kunnen helpen bepalen of een subgraph al dan niet de moeite waard is om op te signaleren. -### 3. What’s the cost of updating a subgraph? +### Wat zijn de kosten voor het updaten van een subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +Het migreren van je curatieaandelen naar een nieuwe subgraphversie brengt een curatiebelasting van 1% met zich mee. Curatoren kunnen ervoor kiezen om zich te abonneren op de nieuwste versie van een subgraph. Wanneer curatieaandelen automatisch worden gemigreerd naar een nieuwe versie, betalen curatoren ook een 0.5% curatiebelasting, omdat het updaten van subgraph een on-chain actie is die gas kost. -### 4. How often can I update my subgraph? +### Hoe vaak kan ik mijn subgraph updaten? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Het wordt aanbevolen om je subgraphs niet te vaak bij te werken. Zie de bovenstaande vraag voor meer details. -### 5. Can I sell my curation shares? +### Kan ik mijn curatieaandelen verkopen? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. +Curatieaandelen kunnen niet "gekocht" of "verkocht" worden zoals andere ERC20-tokens waar je mogelijk bekend mee bent. Ze kunnen alleen worden gemint (gemaakt) of verbrand (vernietigd) langs de bonding curve voor een specifieke subgraph. De hoeveelheid GRT die nodig is om een nieuw signaal te minten en de hoeveelheid GRT die je ontvangt wanneer je je bestaande signaal verbrandt, worden bepaald door die bonding curve. Als curator moet je weten dat wanneer je je curatieaandelen verbrandt om GRT op te nemen, je uiteindelijk meer of minder GRT kunt krijgen dan je aanvankelijk hebt gestort. -Still confused? Check out our Curation video guide below: +Nog in de war? Bekijk onze Curatie videogids hieronder: diff --git a/website/pages/nl/network/developing.mdx b/website/pages/nl/network/developing.mdx index d0758d7e94c4..7a9dd694b719 100644 --- a/website/pages/nl/network/developing.mdx +++ b/website/pages/nl/network/developing.mdx @@ -1,5 +1,5 @@ --- -title: Ontwikkelen +title: Developing --- Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. @@ -18,7 +18,7 @@ As with all subgraph development, it starts with local development and testing. Once defined, the subgraph can be built and deployed to the [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). The Subgraph Studio is a sandbox environment which will index the deployed subgraph and make it available for rate-limited development and testing. This gives developers an opportunity to verify that their subgraph does not encounter any indexing errors, and works as expected. -### Publiceer op het netwerk +### Publiceren op het netwerk When the developer is happy with their subgraph, they can publish it to The Graph Network. This is an on-chain action, which registers the subgraph so that it is discoverable by Indexers. Published subgraphs have a corresponding NFT, which is then easily transferable. The published subgraph has associated metadata, which provides other network participants with useful context and information. diff --git a/website/pages/nl/network/indexing.mdx b/website/pages/nl/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/nl/network/indexing.mdx +++ b/website/pages/nl/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/nl/new-chain-integration.mdx b/website/pages/nl/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/nl/new-chain-integration.mdx +++ b/website/pages/nl/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/nl/operating-graph-node.mdx b/website/pages/nl/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/nl/operating-graph-node.mdx +++ b/website/pages/nl/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/nl/publishing/publishing-a-subgraph.mdx b/website/pages/nl/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/nl/publishing/publishing-a-subgraph.mdx +++ b/website/pages/nl/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/nl/querying/querying-the-hosted-service.mdx b/website/pages/nl/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/nl/querying/querying-the-hosted-service.mdx +++ b/website/pages/nl/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/nl/querying/querying-with-python.mdx b/website/pages/nl/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/nl/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/nl/quick-start.mdx b/website/pages/nl/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/nl/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/nl/substreams.mdx b/website/pages/nl/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/nl/substreams.mdx +++ b/website/pages/nl/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/nl/sunrise.mdx b/website/pages/nl/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/nl/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/nl/tokenomics.mdx b/website/pages/nl/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/nl/tokenomics.mdx +++ b/website/pages/nl/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/pl/about.mdx b/website/pages/pl/about.mdx index c1f7c886900f..e2726962f0d0 100644 --- a/website/pages/pl/about.mdx +++ b/website/pages/pl/about.mdx @@ -1,47 +1,47 @@ --- -title: About The Graph +title: Więcej o The Graph --- -This page will explain what The Graph is and how you can get started. +Ta strona ma na celu wyjaśnienie czym jest The Graph i jak możesz zacząć go używać. -## What is The Graph? +## Co to jest The Graph? -The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. +The Graph jest zdecentralizowanym protokołem ideksującym dane na blockchainie i wysyłającym zapytania o te dane. The Graph umożliwia tworzenie zapytań o dane, które są bezpośrenio trudne do odpytania. -Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. +Projekty wykorzystujące kompleksowe smart kontrakty jak [Uniswap](https://uniswap.org/) i inicjatywy NFT jak [Bored Ape Yacht Club](https://boredapeyachtclub.com/) przechowują dane na blockchainie Ethereum, co sprawia, że bardzo trudno jest odczytać cokolwiek poza bardzo podstawowymi danymi dezpośrednio z danej sieci blockchain. -In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. +W przypadku inicjatywy Bored Ape Yacht Club, możemy wykonać podstawowe operacje odczytania przez [kontrakt](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) jak np. znalezienie posiadacza konkretnej małpy (Ape), adresu URI małpy z jej numeru ID, czy informację o całkowitej podaży, ponieważ te informacje są zaprogramowane bezpośrednio w smart kontrakcie, ale już bardziej zaawansowane zapytania real-world i operacje takie jak agregacja, wyszukiwanie, zależności i niebanalne filtrowanie nie będą możliwe. Np. jeśli chcielibyśmy stworzyć zapytanie o małpy, które należą do danego adresu i filtrować pojedyńczo każdą ich cechę, nie bylibyśmy w stanie otrzymać tej informacji przez bezpośrednią interakcję z samym smart kontraktem. -To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. +Aby otrzymać te dane trzeba byłoby przeprocesować każdy pojedyńczy [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) lub wydarzenie, które kiedykolwiek miało miejsce, zczytać dane z protokołu IPFS używając Token ID i IPFS hash, by w końcu dane te zagregować. Uzyskanie odpowiedzi nawet na tego typu, relatywnie proste pytania, zajęłoby **wiele godzin, a nawet dni** dla zdecentralizowanej aplikacji (dapp) działającej w przeglądarce. -You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. +Możesz równieź zbudować własny serwer, przetwarzać na nim tranzakcje, zapisaywać je w bazie danych i wykorzystywać punkt końcowy API w celu tworzenia zapytań o dane. Jednak ta opcja [wymaga dużych nakładów finansowych](/network/benefits/), regularnej konserwacji i utrzymania, a mimo to stanowi ona pojedyńczy punkt podatności na awarię i narusza warunki bezpieczeństwa wymagane w procesie decentralizacji. -**Indexing blockchain data is really, really hard.** +**Indeksowanie danych na blockchainie jest bardzo, bardzo trudne.** -Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. +Właściwości sieci blockchain jak ich nieodwołalność, reorganizacja łańcucha, czy tzw. "uncle blocks" komplikują ten proces i sprawiają, że jest on nie tylko sam w sobie długotrwały ale również odzyskiwanie poprawnych wyników zapytań o dane blockchainowe jest utrudnione. -The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). +The Graph rozwiązuje te problemy za pomocą zdecentralizowanego protokołu indeksującego i umożliwiającego wydajne i efektywne odpytywanie danych z sieci blockchain. Te APIs (indeksowane subgrafy) mogą być odpytywane przez standardowy GraphQL API. Dziś istnieje już serwis "hosted", jak również zdecentralizowany protokół z tymi samymi możliwościami. Oba te rozwiązania są zabezpieczone przez zaimplementowany [Graph Node](https://github.com/graphprotocol/graph-node), który jest rozwiązaniem dostępnym publicznie (tzw. open source). -## How The Graph Works +## Jak działa The Graph -The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. +The Graph uczy się co i jak należy indeksować spośród danych sieci Ethereum na podstawie opisów subgraphów, zwanych manifestami. Opis subgraphu definiuje smart kontrakty, które leżą w obszarze zainteresowania danego subgraphu, zdarzenia w tych kontraktach, na które należy zwracać uwagę, oraz sposób mapowania danych zdarzeń na dane przechowywane w bazie danych The Graph. -Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. +Po napisaniu `manifestu subgraphu` można użyć narzędzia Graph CLI, aby przechować definicję w protokole IPFS i poinformować dowolnego indeksera o możliwości rozpoczęcia indeksowania danych dla tego subgraphu. -This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: +Ten diagram przedstawia bardziej szczegółowo przepływ danych po wdrożeniu manifestu subgraphu, kiedy mamy do czynienia z transakcjami Ethereum: -![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) +![Grafika wyjaśniająca sposób w jaki protokół The Graph wykorzystuje węzeł Graph Node by obsługiwać zapytania dla konsumentów danych](/img/graph-dataflow.png) -The flow follows these steps: +Proces ten przebiega według poniższych kroków: -1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. -5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. +1. Aplikacja dApp dodaje dane do sieci Ethereum za pomocą transakcji w smart kontrakcie. +2. Inteligentny kontrakt emituje jedno lub więcej zdarzeń podczas przetwarzania transakcji. +3. Graph Node nieprzerwanie skanuje sieć Ethereum w poszukiwaniu nowych bloków i danych dla Twojego subgraphu, które mogą one zawierać. +4. Graph Node znajduje zdarzenia Ethereum dla Twojego subgraphu w tych blokach i uruchamia dostarczone przez Ciebie procedury mapowania. Mapowanie to moduł WASM, który tworzy lub aktualizuje jednostki danych przechowywane przez węzeł Graph Node w odpowiedzi na zdarzenia Ethereum. +5. Aplikacja dApp wysyła zapytanie do węzła Graph Node o dane zindeksowane na blockchainie, korzystając z [punktu końcowego GraphQL](https://graphql.org/learn/). Węzeł Graph Node przekształca zapytania GraphQL na zapytania do swojego podstawowego magazynu danych w celu pobrania tych danych, wykorzystując zdolności indeksowania magazynu. Aplikacja dApp wyświetla te dane w interfejsie użytkownika dla użytkowników końcowych, którzy używają go do tworzenia nowych transakcji w sieci Ethereum. Cykl się powtarza. -## Next Steps +## Kolejne kroki -In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. +W następnych częściach omówimy bardziej szczegółowo, jak definiować subgraphy, jak je wdrażać i jak tworzyć zapytania o dane z indeksów budowanych przez węzeł Graph Node. -Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. +Zanim zaczniesz pisać własny subgraph, warto sprawdzić narzędzie Graph Explorer i poznać niektóre już wdrożone subgraphy. Strona każdego subgraphu zawiera miejsce zwane "placem zabaw" (ang. playground), który umożliwia zapytanie o dane tego subgraphu za pomocą języka GraphQL. diff --git a/website/pages/pl/arbitrum/arbitrum-faq.mdx b/website/pages/pl/arbitrum/arbitrum-faq.mdx index 849d08c92b93..4381ac1286cd 100644 --- a/website/pages/pl/arbitrum/arbitrum-faq.mdx +++ b/website/pages/pl/arbitrum/arbitrum-faq.mdx @@ -1,16 +1,16 @@ --- -title: Arbitrum FAQ +title: Arbitrum - najczęściej zadawane pytania --- Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. -## Why is The Graph implementing an L2 Solution? +## Dlaczego The Graph implementuje rozwiązanie L2 (ang. Layer 2)? -By scaling The Graph on L2, network participants can expect: +Dzięki procesowi skalowania protokołu The Graph na L2, uczestnicy ekosystemu mogą liczyć na: - Upwards of 26x savings on gas fees -- Faster transaction speed +- Szybsza prędkość transakcji - Security inherited from Ethereum @@ -18,60 +18,60 @@ Scaling the protocol smart contracts onto L2 allows network participants to inte The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. -## What do I need to do to use The Graph on L2? +## Co należy zrobić by móc używać protokołu The Graph w L2? -Users bridge their GRT and ETH  using one of the following methods: +Użytkownicy muszą najpierw przenieść swoje tokeny GRT i ETH między sieciami L1 i L2 (ang. bridge tokens), używając do tego jednej z trzech poniższych metod: - [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +By w pełni wykorzystać wszystkie zalety używania protokołu The Graph w L2 warto używać rozwijanej listy aby przełączać się między łańcuchami. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## Co powinien wiedzieć na ten temat subgraf developer, konsument danych, indekser, kurator lub delegator? There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +Od 10 kwietnia 2023 roku 5% wszystkich nagród za indeksowanie jest emitowane w sieci Arbitrum. W miarę wzrostu udziału w sieci i zatwierdzenia przez Radę Fundacji The Graph, nagrody za indeksowanie stopniowo będą przenoszone z sieci Ethereum (L1) do sieci Arbitrum (L2), a ostatecznie całkowicie przeniosą się na Arbitrum. -## If I would like to participate in the network on L2, what should I do? +## Co trzeba zrobić by zacząć uczestniczyć w sieci Arbitrum (L2)? Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Czy w związku ze skalowaniem sieci do L2 wiąże się jakieś ryzyko? All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## Czy subgrafy, funkcjonujące już w sieci Ethereum, będą dalej działać? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Tak. kontrakty z The Graph Network będą funkcjonować równolegle w sieciach Ethereum i Arbitrum dopóki nie nastąpi całkowite przeniesienie do sieci Arbitrum w późniejszym etapie. -## Will GRT have a new smart contract deployed on Arbitrum? +## Czy GRT będzie miało nowy smart kontrakt wdrożony w sieci Arbitrum? Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. -## Billing on Arbitrum FAQs +## Najczęstsze pytania o: płatności w sieci Arbitrum -## What do I need to do about the GRT in my billing balance? +## Co powinienem zrobić ze swoim saldem GRT w aktualnym rozliczeniu? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Nie jest wymagana żadna akcja. Tokeny GRT zostały bezpiecznie zmigrowane do sieci Arbitrum i są używane do płatności za zapytania już teraz. -## How do I know my funds have migrated securely to Arbitrum? +## Skąd mam wiedzieć, czy moje środki zostały bezpiecznie zmigrowane do sieci Arbitrum? All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Skąd mam wiedzieć że używanie "Arbitrum bridge" jest bezpieczne? The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## O czym muszę pamiętać dodając nowe tokeny GRT z mojego portfela Ethereum? Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. diff --git a/website/pages/pl/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/pl/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..b3d8571455ab 100644 --- a/website/pages/pl/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/pl/arbitrum/l2-transfer-tools-faq.mdx @@ -2,314 +2,410 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### Czym są narzędzia przesyłania L2? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. -## Subgraph Transfer +### Czy mogę używać tego samego portfela, którego używam w mainnecie Ethereum? -## How do I transfer my subgraph? +Jeśli korzystasz z portfela [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), możesz użyć tego samego adresu. Jeśli portfel głównej sieci Ethereum jest kontraktem (np. multisig), musisz podać [Adres portfela Arbitrum](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-use-the-graph-on-l2), na który zostanie dokonany przelew. Prosimy o dokładne sprawdzenie adresu, ponieważ wszelkie przelewy na nieprawidłowy adres mogą spowodować ich trwałą utratę. Jeśli chcesz korzystać z multisig na L2, upewnij się, że wdrożyłeś kontrakt multisig na Arbitrum One. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Co się stanie, jeśli nie zrealizuję przesyłania w ciągu 7 dni? -3. Confirm subgraph transfer on Arbitrum\* +Narzędzia przesyłania L2 używają natywnego mechanizmu Arbitrum do wysyłania wiadomości z L1 do L2. Mechanizm ten nazywany jest "ponowny bilet" i jest używany przez wszystkie natywne mosty tokenowe, w tym most Arbitrum GRT. Więcej informacji na temat "ponownych biletów" można znaleźć w [dokumentacji Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +Kiedy przesyłasz swoje aktywa (subgraf, stake, delegowanie lub kuratorstwo) do L2, wiadomość jest wysyłana przez most Arbitrum GRT, który tworzy bilet z możliwością ponownej próby w L2. Narzędzie transferu zawiera pewną wartość ETH w transakcji, która jest wykorzystywana do 1) zapłaty za utworzenie biletu i 2) zapłaty za gaz do wykonania biletu w L2. Ponieważ jednak ceny gazu mogą się różnić w czasie do momentu, gdy bilet będzie gotowy do zrealizowania w L2, możliwe jest, że ta próba automatycznego wykonania zakończy się niepowodzeniem. Gdy tak się stanie, most Arbitrum utrzyma ten bilet aktywnym przez maksymalnie 7 dni, i każdy może ponowić próbę "zrealizowania" biletu (co wymaga portfela z pewną ilością ETH pzesłanego do Arbitrum). -5. Update Query URL (recommended) +Nazywamy to etapem "Potwierdzenia" we wszystkich narzędziach do przesyłania - w większości przypadków będzie on wykonywany automatycznie, ponieważ najczęściej kończy się sukcesem, ale ważne jest, aby sprawdzić i upewnić się, że się powiódł. Jeśli się nie powiedzie i w ciągu 7 dni nie będzie skutecznych ponownych prób, most Arbitrum odrzuci bilet, a twoje zasoby ( subgraf, stake, delegowanie lub kuratorstwo) zostaną utracone i nie będzie można ich odzyskać. Główni programiści Graph mają system monitorowania, który wykrywa takie sytuacje i próbuje zrealizować bilety, zanim będzie za późno, ale ostatecznie to ty jesteś odpowiedzialny za zapewnienie, że przesyłanie zostanie zakończone na czas. Jeśli masz problemy z potwierdzeniem transakcji, skontaktuj się z nami za pomocą [tego formularza] \(https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), a nasi deweloperzy udzielą Ci pomocy. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## Przesyłanie Subgrafu + +### Jak mogę przesłać swój subgraf? + + + +Aby przesłać swój subgraf, należy wykonać następujące kroki: + +1. Zainicjuj przesyłanie w sieci głównej Ethereum + +2. Poczekaj 20 minut na potwierdzenie + +3. Potwierdź przesyłanie subgrafu na Arbitrum\* + +4. Zakończ publikowanie subgrafu na Arbitrum + +5. Zaktualizuj adres URL zapytania (zalecane) \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Skąd powinienem zainicjować przesyłanie? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Przesyłanie można zainicjować ze strony [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) lub dowolnej strony zawierającej szczegóły subgrafu. Kliknij przycisk "Prześlij subgraf " na tej stronie, aby zainicjować proces przesyłania. -## How long do I need to wait until my subgraph is transferred +### Jak długo muszę czekać, aż mój subgraf zostanie przesłany -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +Przesyłanie trwa około 20 minut. Most Arbitrum działa w tle, automatycznie kończąc przesyłanie danych. W niektórych przypadkach koszty gazu mogą wzrosnąć i konieczne będzie ponowne potwierdzenie transakcji. -## Will my subgraph still be discoverable after I transfer it to L2? +### Czy mój subgraf będzie nadal wykrywalny po przesłaniu go do L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Twój subgraf będzie można znaleźć tylko w sieci, w której został opublikowany. Na przykład, jeśli subgraf znajduje się w Arbitrum One, można go znaleźć tylko w Eksploratorze w Arbitrum One i nie będzie można go znaleźć w Ethereum. Upewnij się, że wybrałeś Arbitrum One w przełączniku sieci u góry strony i że jesteś we właściwej sieci. Po przesłaniu subgraf L1 będzie oznaczony jako nieaktualny. -## Does my subgraph need to be published to transfer it? +### Czy mój subgraf musi zostać opublikowany, aby móc go przesłać? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +Aby skorzystać z narzędzia do przesyłania subgrafów, musi on być już opublikowany w sieci głównej Ethereum i musi mieć jakiś sygnał kuratorski należący do portfela, który jest właścicielem subgrafu. Jeśli subgraf nie został opublikowany, zaleca się po prostu opublikowanie go bezpośrednio na Arbitrum One - związane z tym opłaty za gaz będą znacznie niższe. Jeśli chcesz przesłać opublikowany subgraf, ale konto właściciela nie ma na nim żadnego sygnału, możesz zasygnalizować niewielką kwotę (np. 1 GRT) z tego konta; upewnij się, że wybrałeś sygnał "automatycznej migracji". -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Co stanie się z wersją mojego subgrafu w sieci głównej Ethereum po przesłaniu go do Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +Po przesłaniu subgrafu do Arbitrum, wersja głównej sieci Ethereum zostanie wycofana. Zalecamy zaktualizowanie adresu URL zapytania w ciągu 48 godzin. Istnieje jednak okres prolongaty, dzięki któremu adres URL sieci głównej będzie dalej funkcjonował, tak aby można było zaktualizować obsługę innych aplikacji. -## After I transfer, do I also need to re-publish on Arbitrum? +### Czy po przesłaniu muszę również ponownie opublikować na Arbitrum? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Po upływie 20-minutowego okna przesyłania konieczne będzie jego potwierdzenie za pomocą transakcji w interfejsie użytkownika, aby zakończyć przesyłanie, przy czym narzędzie do przesyłania poprowadzi Cię przez ten proces. Twój endpoint L1 będzie nadal obsługiwany podczas okna przesyłania i okresu prolongaty po jego zakończeniu. Zachęcamy do aktualizacji swojego endpointa w dogodnym dla siebie momencie. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Czy publikowanie i wersjonowanie jest takie samo w L2 jak w sieci głównej Ethereum? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Czy kurator mojego subgrafu będzie się przemieszczał wraz z moim subgrafem? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Jeśli wybrałeś automatyczną migrację sygnału, 100% twojego własnego kuratorstwa zostanie przeniesione wraz z subgrafem do Arbitrum One. Cały sygnał kuratorski subgrafu zostanie przekonwertowany na GRT w momencie transferu, a GRT odpowiadający sygnałowi kuratorskiemu zostanie użyty do zmintowania sygnału na subgrafie L2. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Inni kuratorzy mogą zdecydować, czy wycofać swoją część GRT, czy też przesłać ją do L2 w celu zmintowania sygnału na tym samym subgrafie. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Czy mogę przenieść swój subgraf z powrotem do głównej sieci Ethereum po jego przesłaniu? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Po przesłaniu, wersja tego subgrafu w sieci głównej Ethereum zostanie wycofana. Jeśli chcesz ją przywrócić do sieci głównej, musisz ją ponownie wdrożyć i opublikować. Jednak przeniesienie z powrotem do sieci głównej Ethereum nie jest zalecane, ponieważ nagrody za indeksowanie zostaną całkowicie rozdzielone na Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Dlaczego potrzebuję bridgowanego ETH do przesłania? -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +Opłaty za gaz w Arbitrum One są uiszczane za pomocą bridgowanego ETH (tj. ETH, które zostało przeniesione do Arbitrum One). Opłaty za gaz są jednak znacznie niższe w porównaniu do głównej sieci Ethereum. -## Curation Signal +## Delegowanie -## How do I transfer my curation? +### Jak mogę przesłać swoją delegację? -To transfer your curation, you will need to complete the following steps: + -1. Initiate signal transfer on Ethereum mainnet +Aby przesłać delegację, należy wykonać następujące kroki: -2. Specify an L2 Curator address\* +1. Zainicjuj przesyłanie delegacji w sieci głównej Ethereum +2. Poczekaj 20 minut na potwierdzenie +3. Potwierdź przesyłanie delegacji na Arbitrum -3. Wait 20 minutes for confirmation +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*If necessary - i.e. you are using a contract address. +### Co stanie się z moimi nagrodami, jeśli zainicjuję przesył z otwartą alokacją w sieci głównej Ethereum? -## How will I know if the subgraph I curated has moved to L2? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Co się stanie, jeśli indeksator, do którego obecnie deleguję, nie jest dostępny w Arbitrum One? -## What if I do not wish to move my curation to L2? +Narzędzie przesyłania L2 aktywuje się tylko wtedy, gdy delegowany przez Ciebie indeksator prześle swój stake do Arbitrum. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Czy Delegaci mają możliwość delegowania do innego Indeksera? -## How do I know my curation successfully transferred? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +### Co jeśli nie mogę znaleźć Indeksera, do którego deleguję w L2? -## Can I transfer my curation on more than one subgraph at a time? +Narzędzie przesyłania L2 automatycznie wykryje indexera, do którego wcześniej delegowałeś. -There is no bulk transfer option at this time. +### Czy będę mógł łączyć i modyfikować lub "rozkładać" moje delegacje pomiędzy jednym nowym lub kilkoma Indekserami zamiast dotychczasowego? -## Indexer Stake +Narzędzie przesyłania L2 zawsze przenosi delegację do tego samego Indeksera, do którego delegowano wcześniej. Po przeniesieniu do L2 możesz cofnąć delegację, poczekać do momentu rozmrożenia i zdecydować, czy chcesz rozdzielić delegację. -## How do I transfer my stake to Arbitrum? +### Czy podlegam okresowi zawieszenia, czy też mogę wypłacić środki natychmiast po skorzystaniu z narzędzia przesyłania delegacji L2? -To transfer your stake, you will need to complete the following steps: +Narzędzie przesyłania umożliwia natychmiastowe przeniesienie do L2. Jeśli chcesz cofnąć delegację, będziesz musiał poczekać na okres rozmrażania. Jeśli jednak Indekser przesłał cały swój stake do L2, możesz natychmiast wypłacić środki w sieci głównej Ethereum. -1. Initiate stake transfer on Ethereum mainnet +### Czy może to mieć negatywny wpływ na moje nagrody, jeśli nie przekażę delegacji? -2. Wait 20 minutes for confirmation +Przewiduje się, że w przyszłości cały udział w sieci zostanie przeniesiony do Arbitrum One. -3. Confirm stake transfer on Arbitrum +### Jak długo trwa przesyłanie delegacji do L2? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Czy mogę przesłać swoją delegację, jeśli korzystam z kontraktu GRT/portfela z zablokowanymi tokenami? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Tak! Proces jest nieco inny, ponieważ kontrakty vestingowe nie mogą przekazywać ETH niezbędnych do opłacenia gazu L2, więc musisz je wcześniej wpłacić. Jeśli twój kontrakt vestingowy nie jest w pełni vestowany, najpierw będziesz musiał zainicjować odpowiednik kontraktu vestingowego na L2 i dopiero wtedy będziesz mógł przenieść delegację do tego właśnie kontraktu. Interfejs użytkownika w Eksploratorze poprowadzi Cię przez ten proces, gdy połączysz się z Eksploratorem za pomocą portfela z blokadą vestingu. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## How much time do I have to confirm my stake transfer to Arbitrum? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### Czy istnieje podatek od delegacji? -## What if I have open allocations? +Nie. Otrzymane tokeny na L2 są delegowane do określonego Indeksera w imieniu określonego Delegatora bez naliczania podatku od delegacji. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +### Will my unrealized rewards be transferred when I transfer my delegation? -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## How long will it take to transfer my stake? +### Is moving delegations to L2 mandatory? Is there a deadline? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## Do I have to index on Arbitrum before I transfer my stake? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## Can Delegators move their delegation before I move my indexing stake? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### I don't see a button to transfer my delegation. Why is that? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## How do I transfer my delegation? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -To transfer your delegation, you will need to complete the following steps: +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -1. Initiate delegation transfer on Ethereum mainnet +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -2. Wait 20 minutes for confirmation +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -3. Confirm delegation transfer on Arbitrum +## Sygnał kuratorski -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Jak mogę przesłać swoje kuratorstwo? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Aby przesłać swoje kuratorstwo, należy wykonać następujące kroki: -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +1. Zainicjuj przesyłanie sygnału w sieci głównej Ethereum -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +2. Podaj adres kuratora L2\* -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +3. Poczekaj 20 minut na potwierdzenie -## Do Delegators have the option to delegate to another Indexer? +\*Jeżeli będzie wymagane - np. w przypadku korzystania z adresu kontraktu. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Skąd będę wiedzieć, czy subgraf, którego jestem kuratorem, został przeniesiony do L2? -## What if I can't find the Indexer I'm delegating to on L2? +Podczas przeglądania strony ze szczegółami subgrafu pojawi się baner informujący, że subgraf został przeniesiony. Możesz postępować zgodnie z wyświetlanymi instrukcjami, aby przesłać swoje kuratorstwo. Informacje te można również znaleźć na stronie ze szczegółami subgrafu każdego z tych, które zostały przeniesione. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Co jeśli nie chcę przenosić swojego kuratorstwa do L2? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Gdy subgraf jest nieaktualny, masz możliwość wycofania swojego sygnału. Podobnie, jeśli subgraf został przeniesiony do L2, możesz wycofać swój sygnał w sieci głównej Ethereum lub wysłać sygnał do L2. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### Skąd mam wiedzieć, że moje kuratorstwo zostało pomyślnie przesłane? -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +Szczegóły sygnału będą dostępne za pośrednictwem Eksploratora po upływie ok. 20 minut od uruchomienia narzędzia do przesyłania L2. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Czy mogę przesłać swoje kuratorstwo do więcej niż jednego subgrafu na raz? -## Can my rewards be negatively impacted if I do not transfer my delegation? +Obecnie nie ma opcji zbiorczego przesyłania. -It is anticipated that all network participation will move to Arbitrum One in the future. +## Stake Indeksera -## How long does it take to complete the transfer of my delegation to L2? +### Jak mogę przesłać swój stake do Arbitrum? -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +> Zastrzeżenie: Jeśli obecnie dokonujesz unstakingu jakiejkolwiek części GRT w swoim Indekserze, nie będziesz mógł korzystać z Narzędzi Przesyłania L2. + + + +Aby przesłać swój stake, należy wykonać następujące kroki: + +1. Zainicjuj przesłanie stake'u w sieci głównej Ethereum -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +2. Poczekaj 20 minut na potwierdzenie -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +3. Potwierdź przesyłanie stake'u na Arbitrum -## Is there any delegation tax? +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Czy wszystkie moje stake'i zostaną przesłane? + +Możesz wybrać, jaką część stake'u chcesz przesłać. Jeśli zdecydujesz się przenieść cały swój stake za jednym razem, będziesz musiał najpierw zamknąć wszystkie otwarte alokacje. + +Jeśli planujesz przesłać część swojego stake'u w kilku transakcjach, musisz zawsze podawać ten sam adres beneficjenta. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +Ważne: Przy pierwszym użyciu narzędzia do przesyłania musisz spełnić minimalne wymagania dotyczące stake'u na L2. Indeksatorzy muszą wysłać minimum 100 tys. GRT (przy pierwszym wywołaniu tej funkcji). W przypadku pozostawienia części stake'u na L1, musi ona również przekraczać minimum 100 tys. GRT i być wystarczająca (wraz z delegacjami) do pokrycia otwartych alokacji. -## Vesting Contract Transfer +### Ile mam czasu na potwierdzenie przesłania stake'u do Arbitrum? -## How do I transfer my vesting contract? +\*\*\* Aby zakończyć przesyłanie stake'u na Arbitrum, należy potwierdzić transakcję. Ten krok należy wykonać w ciągu 7 dni, w przeciwnym razie stake może zostać utracony. -To transfer your vesting, you will need to complete the following steps: +### Co jeśli mam otwarte alokacje? -1. Initiate the vesting transfer on Ethereum mainnet +Jeśli nie wysyłasz całego swojego stake'a, narzędzie do przesyłania L2 zweryfikuje, czy w sieci głównej Ethereum pozostało co najmniej 100 tys. GRT, a pozostała część stake'a i delegacji jest wystarczająca do pokrycia wszelkich otwartych alokacji. Może być konieczne zamknięcie otwartych alokacji, jeśli saldo GRT nie pokrywa minimum + otwartych alokacji. -2. Wait 20 minutes for confirmation +### Korzystając z narzędzi do przesyłania, czy konieczne jest czekanie 28 dni na odblokowanie w sieci głównej Ethereum przed dokonaniem przesłania? -3. Confirm vesting transfer on Arbitrum +Nie, możesz przesłać swój stake do L2 natychmiast, nie ma potrzeby unstake'owania i czekania przed użyciem narzędzia do przesyłania. 28-dniowy okres oczekiwania ma miejsce tylko wtedy, gdy chcesz wypłacić stake z powrotem do swojego portfela, w sieci głównej Ethereum lub L2. -## How do I transfer my vesting contract if I am only partially vested? +### Jak długo potrwa przesłanie mojego stake'u? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +Przesyłanie stake'a przez narzędzie do przesyłania L2 zajmie około 20 minut. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +### Czy muszę indeksować na Arbitrum, zanim przekażę swój stake? -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Możesz skutecznie przesłać swój stake przed skonfigurowaniem indeksowania, lecz nie będziesz w stanie odebrać żadnych nagród na L2, dopóki nie alokujesz do subgrafów na L2, nie zindeksujesz ich i nie podasz POI. + +### Czy delegaci mogą przenieść swoje delegacje, zanim ja przeniosę swój indeksujący stake? + +No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -4. Withdraw any remaining ETH from the transfer tool contract +### Czy mogę przesłać swój stake, jeśli korzystam z kontraktu GRT / portfela z blokadą tokena? -## How do I transfer my vesting contract if I am fully vested? +Tak! Proces jest nieco inny, ponieważ kontrakty vestingowe nie mogą przesyłać ETH potrzebnych do opłacenia gazu L2, więc musisz je wcześniej wpłacić. Jeśli Twój kontrakt vestingowy nie jest w pełni vestowany, będziesz musiał również najpierw zainicjować odpowiednik kontraktu vestingowego na L2 i będziesz mógł przesłać stake tylko do tego kontraktu vestingowego L2. Interfejs użytkownika w Eksploratorze poprowadzi Cię przez ten proces, gdy połączysz się z Eksploratorem za pomocą portfela vesting lock. -For those that are fully vested, the process is similar: +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -2. Set your L2 address with a call to the transfer tool contract +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -4. Withdraw any remaining ETH from the transfer tool contract +## Przesłanie Kontraktu Vestingowego -## Can I transfer my vesting contract to Arbitrum? +### Jak mogę przesłać kontrakt vestingowy? -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +Aby przesłać vesting, należy wykonać następujące kroki: -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +1. Zainicjuj przesyłanie vestingu w sieci głównej Ethereum -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +2. Poczekaj 20 minut na potwierdzenie -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +3. Potwierdź przesłanie vestingu na Arbitrum -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### W jaki sposób mogę przesłać swój kontrakt vestingowy, jeśli mam tylko część vestingu? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? + -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +1. Wpłać trochę ETH do kontraktu narzędzia przesyłania (UI pomoże oszacować odpowiednią kwotę) -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +2. Wyślij zablokowane GRT za pośrednictwem kontraktu narzędzia przesyłania do L2, aby zainicjować blokadę vestingu L2. Ustawi to również adres beneficjenta L2. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +3. Wyślij swój stake/delegację do L2 poprzez "zablokowane" funkcje narzędzia przesyłania w kontrakcie stakingu L1. -## Can I specify a different beneficiary for my vesting contract on L2? +4. Wycofaj pozostałe ETH z kontraktu narzędzia przesyłania -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### W jaki sposób mogę przesłać kontrakt vestingowy, jeśli mam pełny vesting? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. + -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +Dla tych, którzy mają pełny vesting, proces wygląda podobnie: -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +1. Wpłać trochę ETH do kontraktu narzędzia przesyłania (UI pomoże oszacować odpowiednią kwotę) -This allows you to transfer your stake or delegation to any L2 address. +2. Ustaw adres L2 za pomocą wywołania kontraktu narzędzia przesyłania -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +3. Wyślij swój stake/delegację do L2 poprzez "zablokowane" funkcje narzędzia przesyłania w kontrakcie stakingu L1. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +4. Wycofaj pozostałe ETH z kontraktu narzędzia przesyłania -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +### Czy mogę przesłać swój kontrakt vestingowy do Arbitrum? -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +Możesz przesłać saldo GRT kontraktu vestingowego do kontraktu vestingowego w L2. Jest to warunek wstępny do przesłania stake'u lub delegacji z kontraktu vestingowego do L2. Kontrakt vestingowy musi posiadać jakąś ilość GRT (w razie potrzeby możesz przesłać do niego niewielką ilość, np. 1 GRT). -2. Revoke protocol access to the vesting contract (needed for the next step) +Przesyłając GRT z kontraktu vestingowego L1 do L2, możesz wybrać wysokość kwoty do wysłania i zrobić to tyle razy, ile chcesz. Kontrakt vestingowy L2 zostanie zainicjowany przy pierwszym przesłaniu GRT. -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Przesyłanie odbywa się za pomocą narzędzia do przesyłania, które będzie widoczne na profilu Eksploratora po połączeniu się z kontem kontraktu vestingowego. -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +Należy pamiętać, że nie będzie można odblokować/wycofać GRT z kontraktu vestingowego L2 do końca okresu vestingu, gdy kontrakt zostanie w pełni vestingowany. Jeśli musisz odblokować GRT przed tym terminem, możesz przesłać GRT z powrotem do kontraktu vestingowego L1 za pomocą innego narzędzia przesyłania, które jest przeznaczone właśnie dla tego celu. -5. Wait 20 minutes for confirmation +Jeśli nie przesłałeś żadnego salda kontraktu vestingowego do L2, a Twój kontrakt vestingowy jest w pełni opłacony, nie powinieneś przenosić swojego kontraktu vestingowego do L2. Zamiast tego możesz użyć narzędzi do przesyłania, aby ustawić adres portfela L2 i bezpośrednio przenieść swój stake lub delegację do tego zwykłego portfela na L2. -6. Confirm the balance transfer on L2 +### Używam kontraktu vestingowego do stakingu w mainnecie. Czy mogę przenieść swój stake do Arbitrum? -\*If necessary - i.e. you are using a contract address. +Tak, lecz jeżeli Twój kontrakt nadal się vestinguje, możesz przesłać stake tylko w taki sposób, aby był on własnością Twojego kontraktu vestingowego L2. Musisz najpierw zainicjować ten kontrakt L2, przesyłając saldo GRT za pomocą narzędzia do przesyłania kontraktów vestingowych w Eksploratorze. Jeśli Twój kontrakt jest w pełni vestowany, możesz przesłać swój stake na dowolny adres w L2, ale musisz go wcześniej ustawić i wpłacić trochę ETH, aby narzędzie do przesyłania L2 zapłaciło za gaz w L2. + +### Używam swojego kontraktu vestingowego do delegowania w mainnecie. Czy mogę przesłać swoje delegacje do Arbitrum? + +Tak, ale jeśli Twój kontrakt nadal się vestinguje, możesz przesłać delegację tylko w taki sposób, aby była własnością Twojego kontraktu vestingowego L2. Musisz najpierw zainicjować ten kontrakt L2, przesyłając saldo GRT za pomocą narzędzia do przesyłania kontraktów vestingowych w Eksploratorze. Jeśli twój kontrakt jest w pełni vestowany, możesz przesłać swoją delegację na dowolny adres w L2, ale musisz go wcześniej ustawić i wpłacić trochę ETH do narzędzia do przesyłania L2, aby zapłacić za gaz w L2. + +### Czy mogę wybrać innego beneficjenta dla kontraktu vestingowego na L2? + +Tak, przy pierwszym przesłaniu salda i ustawieniu kontraktu vestingowego L2 można określić beneficjenta L2. Upewnij się, że ten beneficjent jest portfelem, który może wykonywać transakcje na Arbitrum One, tj. musi to być EOA lub multisig wdrożony w Arbitrum One. + +Jeśli Twój kontrakt jest w pełni vestowany, nie będziesz musiał ustawiać kontraktu vestingowego na L2; zamiast tego ustawisz adres portfela L2 i będzie to portfel odbiorczy dla Twojego stake'a lub delegacji na Arbitrum. + +### Mój kontrakt jest w pełni vestowany. Czy mogę przesłać swój stake lub delegację na inny adres, który nie jest kontraktem vestingowym L2? + +Tak. Jeśli nie przesłałeś żadnego salda kontraktu vestingowego do L2, a Twój kontrakt vestingowy jest w pełni vestowany, nie powinieneś przesyłać swojego kontraktu vestingowego do L2. Zamiast tego możesz użyć narzędzi do przesyłania, aby ustawić adres portfela L2 i bezpośrednio przesłać swój stake lub delegację do tego zwykłego portfela na L2. + +Pozwala to na przesłanie stake'a lub delegacji na dowolny adres L2. + +### Mój kontrakt vestingowy jest nadal vestowany. Jak mogę przesłać saldo kontraktu vestingowego do L2? + +Kroki te mają zastosowanie tylko wtedy, gdy Twój kontrakt jest nadal vestowany lub jeśli wykonałeś te kroki, gdy Twój kontrakt był jeszcze vestowany. + +Aby przesłać kontrakt vestingowy do L2, należy przesłać saldo GRT do L2 za pomocą narzędzi przesyłania, co spowoduje zainicjowanie kontraktu vestingowego L2: + +1. Wpłać trochę ETH do kontraktu narzędzia przesyłania (zostanie to wykorzystane do zapłaty za gaz L2) + +2. Anuluj dostęp protokołu do kontraktu vestingowego (potrzebne do następnego kroku) + +3. Udziel protokołowi dostępu do kontraktu vestingowego (pozwoli to na interakcję kontraktu z narzędziem przesyłania) + +4. Określ adres beneficjenta L2\* i zainicjuj przesyłanie salda w sieci głównej Ethereum + +5. Poczekaj 20 minut na potwierdzenie + +6. Potwierdź przesłanie salda na L2 + +\*Jeżeli będzie wymagane - np. w przypadku korzystania z adresu kontraktu. \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Czy mogę przesłać swój kontrakt vestingowy z powrotem do L1? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +Nie ma takiej potrzeby, ponieważ Twój kontrakt vestingowy nadal znajduje się w L1. Kiedy korzystasz z narzędzi przesyłania, po prostu tworzysz nowy kontrakt w L2, który jest połączony z kontraktem vestingowym w L1 i możesz wysyłać GRT pomiędzy nimi. -## Why do I need to move my vesting contract to begin with? +### Dlaczego muszę przenosić kontrakt vestingowy? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +Musisz ustawić kontrakt vestingowy L2, aby to konto mogło mieć Twój stake lub delegację na L2. W przeciwnym razie nie będzie możliwości przesłania stake'u/delegacji do L2 bez "uniknięcia" kontraktu vestingowego. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Co się stanie, jeśli spróbuję wypłacić środki z mojego kontraktu, gdy są one tylko częściowo vestowane? Czy jest to możliwe? -This is not a possibility. You can move funds back to L1 and withdraw them there. +Nie ma takiej możliwości. Możesz przenieść środki z powrotem do L1 i stamtąd je wypłacić. -## What if I don't want to move my vesting contract to L2? +### Co zrobić, jeśli nie chcę przenosić mojego kontraktu vestingowego do L2? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +Możesz nadal stake'ować/delegować na L1. Z czasem możesz rozważyć przeniesienie się do L2, aby umożliwić otrzymywanie nagród w miarę skalowania protokołu na Arbitrum. Należy pamiętać, że te narzędzia przesyłania są przeznaczone dla kontraktów vestingowych, które mogą stake'ować i delegować w protokole. Jeśli twój kontrakt nie pozwala na stake'owanie lub delegowanie, lub podlega unieważnieniu, wówczas nie ma dostępnego narzędzia do przesyłania. Nadal będziesz mógł wypłacić swój GRT z L1, gdy będzie dostępny. diff --git a/website/pages/pl/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/pl/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/pl/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/pl/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/pl/billing.mdx b/website/pages/pl/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/pl/billing.mdx +++ b/website/pages/pl/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/pl/chain-integration-overview.mdx b/website/pages/pl/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/pl/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/pl/cookbook/arweave.mdx b/website/pages/pl/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/pl/cookbook/arweave.mdx +++ b/website/pages/pl/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/pl/cookbook/grafting.mdx b/website/pages/pl/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/pl/cookbook/grafting.mdx +++ b/website/pages/pl/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/pl/cookbook/near.mdx b/website/pages/pl/cookbook/near.mdx index 879e8e5c15aa..304e1202e278 100644 --- a/website/pages/pl/cookbook/near.mdx +++ b/website/pages/pl/cookbook/near.mdx @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/pl/cookbook/upgrading-a-subgraph.mdx b/website/pages/pl/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/pl/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/pl/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/pl/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/pl/deploying/hosted-service.mdx b/website/pages/pl/deploying/hosted-service.mdx index 2e6093531110..f46d0b235964 100644 --- a/website/pages/pl/deploying/hosted-service.mdx +++ b/website/pages/pl/deploying/hosted-service.mdx @@ -10,7 +10,7 @@ If you don't have an account on the hosted service, you can sign up with your Gi For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). -## Create a Subgraph +## Jak stworzyć subgraf First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/pl/deploying/subgraph-studio.mdx b/website/pages/pl/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/pl/deploying/subgraph-studio.mdx +++ b/website/pages/pl/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/pl/developing/creating-a-subgraph.mdx b/website/pages/pl/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/pl/developing/creating-a-subgraph.mdx +++ b/website/pages/pl/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/pl/developing/developer-faqs.mdx b/website/pages/pl/developing/developer-faqs.mdx index 0b925a79dce2..c3769743472b 100644 --- a/website/pages/pl/developing/developer-faqs.mdx +++ b/website/pages/pl/developing/developer-faqs.mdx @@ -1,5 +1,5 @@ --- -title: Developer FAQs +title: FAQs dla developerów --- ## 1. What is a subgraph? @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/pl/developing/graph-ts/api.mdx b/website/pages/pl/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/pl/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/pl/developing/graph-ts/common-issues.mdx b/website/pages/pl/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/pl/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/pl/developing/supported-networks.mdx b/website/pages/pl/developing/supported-networks.mdx index 58ce56345f7c..7ebc9a3bf6cf 100644 --- a/website/pages/pl/developing/supported-networks.mdx +++ b/website/pages/pl/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Wspierane sieci --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/pl/docsearch.json b/website/pages/pl/docsearch.json index 8cfff967936d..e1e129e0fcca 100644 --- a/website/pages/pl/docsearch.json +++ b/website/pages/pl/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Szukaj", + "buttonAriaLabel": "Szukaj" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", - "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "resetButtonTitle": "Usuń zapytanie", + "resetButtonAriaLabel": "Usuń zapytanie", + "cancelButtonText": "Anuluj", + "cancelButtonAriaLabel": "Anuluj" }, "startScreen": { - "recentSearchesTitle": "Recent", - "noRecentSearchesText": "No recent searches", - "saveRecentSearchButtonTitle": "Save this search", - "removeRecentSearchButtonTitle": "Remove this search from history", - "favoriteSearchesTitle": "Favorite", - "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + "recentSearchesTitle": "Ostatnie", + "noRecentSearchesText": "Brak wyszukiwań", + "saveRecentSearchButtonTitle": "Zapamiętaj to wyszukiwanie", + "removeRecentSearchButtonTitle": "Usuń to wyszukiwanie z historii", + "favoriteSearchesTitle": "Ulubione", + "removeFavoriteSearchButtonTitle": "Usuń to wyszukiwanie z ulubionych" }, "errorScreen": { - "titleText": "Unable to fetch results", - "helpText": "You might want to check your network connection." + "titleText": "Nie można wyświetlić wyników", + "helpText": "Sprawdź połączenie sieciowe." }, "footer": { - "selectText": "to select", - "selectKeyAriaLabel": "Enter key", - "navigateText": "to navigate", - "navigateUpKeyAriaLabel": "Arrow up", - "navigateDownKeyAriaLabel": "Arrow down", - "closeText": "to close", - "closeKeyAriaLabel": "Escape key", - "searchByText": "Search by" + "selectText": "Wybierz", + "selectKeyAriaLabel": "Przycisk enter", + "navigateText": "Nawiguj", + "navigateUpKeyAriaLabel": "Strzałka w górę", + "navigateDownKeyAriaLabel": "Strzałka w dół", + "closeText": "Zamknij", + "closeKeyAriaLabel": "Przycisk Escape", + "searchByText": "Wyszukiwane przez" }, "noResultsScreen": { - "noResultsText": "No results for", - "suggestedQueryText": "Try searching for", - "reportMissingResultsText": "Believe this query should return results?", - "reportMissingResultsLinkText": "Let us know." + "noResultsText": "Brak wyników dla", + "suggestedQueryText": "Spróbuj", + "reportMissingResultsText": "Uważasz, że to zapytanie powinno odnieść rezultat?", + "reportMissingResultsLinkText": "Daj nam znać." } } } diff --git a/website/pages/pl/firehose.mdx b/website/pages/pl/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/pl/firehose.mdx +++ b/website/pages/pl/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/pl/global.json b/website/pages/pl/global.json index 6a3eb234bfce..3868b46a2392 100644 --- a/website/pages/pl/global.json +++ b/website/pages/pl/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", - "video": "Video" + "collapse": "Zwiń", + "expand": "Rozwiń", + "previous": "Poprzedni", + "next": "Następny", + "editPage": "Edytuj stronę", + "pageSections": "Sekcje strony", + "linkToThisSection": "Link do tej sekcji", + "technicalLevelRequired": "Wymagana wiedza techniczna", + "notFoundTitle": "Ups! Ta strona mogła zagubić się w kosmosie...", + "notFoundSubtitle": "Sprawdź, czy został wpisany poprawny adres strony lub odwiedź naszą stronę, używając linku poniżej.", + "goHome": "Wróć do strony glównej", + "video": "Wideo" } diff --git a/website/pages/pl/glossary.mdx b/website/pages/pl/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/pl/glossary.mdx +++ b/website/pages/pl/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/pl/graphcast.mdx b/website/pages/pl/graphcast.mdx index e397aad36e43..cbee7c5e11a3 100644 --- a/website/pages/pl/graphcast.mdx +++ b/website/pages/pl/graphcast.mdx @@ -2,20 +2,20 @@ title: Graphcast --- -## Introduction +## Wstęp -Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? +Czy jest coś, czego chciałbyś/chciałabyś się dowiedzieć lub czym chciałbyś/chciałabyś podzielić się z indekserami w bardziej zautomatyzowany sposób, ale wydaje Ci się to zawracaniem głowy lub kosztuje za dużo gazu? -Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. +Obecnie koszt przekazywania informacji innym uczestnikom sieci jest uzależniony od opłat za gaz w sieci blockchain Ethereum. Graphcast rozwiązuje ten problem umożliwiając praktycznie bezpłatną wymianę wiadomości P2P (ang. peer-to-peer - równy z równym) między indekserami różnych sieci w czasie rzeczywistym. Choć koszt wymiany takich wiadomości P2P jest bliski zeru to jednak brakuje gwarancji integralności danych. Niemniej jednak, Graphcast dąży do zapewnienia wiarygodności wiadomości (tzn.gwarancji że wiadomość jest poprawna i podpisana przez znanego uczestnika protokołu) przy jednoczesnym zachowaniu otwartego modelu reputacji. -The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: +SDK Graphcast (Software Development Kit) umożliwia programistom budowanie "Radios", czyli aplikacji opartych na przekazywaniu plotek, które indekserzy mogą uruchamiać w celu spełnienia określonego zadania. Planujemy również stworzyć kilka takich aplikacji Radios (lub udzielać wsparcia innym programistom/zespołom, które chcą w ich budowaniu uczestniczyć) dla następujących przypadków użycia: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. -- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). +- Przeprowadzanie aukcji i koordynacja synchronizacji warp subgrafów, substreamów oraz danych Firehose od innych indekserów. +- Raportowanie na temat aktywnej analizy zapytań, w tym wolumenów zapytań do subgrafów, wolumenów opłat itp. +- Raportowanie na temat analizy indeksowania, w tym czasu indeksowania subgrafów, kosztów gazu dla osób obsługujących zapytanie, napotkanych błędów indeksowania itp. +- Raportowanie informacji na temat stosu, w tym wersji graph-node, wersji Postgres oraz wersji klienta Ethereum itp. -### Learn More +### Dowiedz się więcej -If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) +Jeśli chcesz dowiedzieć się więcej o Graphcast, [zapoznaj się z dokumentacją tutaj.](https://docs.graphops.xyz/graphcast/intro) diff --git a/website/pages/pl/index.json b/website/pages/pl/index.json index 9e28e13d5001..e7d99cdd5886 100644 --- a/website/pages/pl/index.json +++ b/website/pages/pl/index.json @@ -1,77 +1,76 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "title": "Jak zacząć?", + "intro": "Dowiedz się więcej o The Graph - zdecentralizowanym protokole indeksującym dane sieci blockchain i umożliwiającym tworzenie zapytań.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Więcej o The Graph", + "description": "Dowiedz się więcej o The Graph" }, "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "title": " Na start", + "description": "Wskakuj i zacznij z The Graph" }, "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "title": "FAQs dla developerów", + "description": "Najczęściej zadawane pytania" }, "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "title": "Zapytania z aplikacji", + "description": "Dowiedz się jak tworzyć zapytania z aplikacji" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Jak stworzyć subgraf", + "description": "Użyj aplikacji \"Studio\" by stworzyć subgraf" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", + "title": "Role w sieci", + "description": "Dowiedz się więcej o rolach w sieci The Graph.", "roles": { "developer": { "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "description": "Stwórz subgraf lub użyj istniejącego subgrafa w zdecentralizowanej aplikacji (dApp)" }, "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "title": "Indekser", + "description": "Indeksuj dane i obsługuj zapytania przez prowadzenie własnego node'a" }, "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "title": "Kurator", + "description": "Organizuj dane przez sygnalizowanie subgrafów" }, "delegator": { "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "description": "Zabezpiecz sieć przez delegowanie tokenu GRT do wybranych indekserów" } } }, - "readMore": "Read more", + "readMore": "Dowiedz się więcej", "products": { - "title": "Products", + "title": "Produkty", "products": { "subgraphStudio": { "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" + "description": "Twórz, zarządzaj i publikuj subgrafy i klucze API" }, "graphExplorer": { "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "description": "Eksploruj subgrafy i zacznij korzystać z protokołu" }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Wspierane sieci", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/pl/mips-faqs.mdx b/website/pages/pl/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/pl/mips-faqs.mdx +++ b/website/pages/pl/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/pl/network/benefits.mdx b/website/pages/pl/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/pl/network/benefits.mdx +++ b/website/pages/pl/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/pl/network/explorer.mdx b/website/pages/pl/network/explorer.mdx index b3a549900b83..a769912c6128 100644 --- a/website/pages/pl/network/explorer.mdx +++ b/website/pages/pl/network/explorer.mdx @@ -6,7 +6,7 @@ Welcome to the Graph Explorer, or as we like to call it, your decentralized port -## Subgraphs +## Subgrafy First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. diff --git a/website/pages/pl/network/indexing.mdx b/website/pages/pl/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/pl/network/indexing.mdx +++ b/website/pages/pl/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/pl/new-chain-integration.mdx b/website/pages/pl/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/pl/new-chain-integration.mdx +++ b/website/pages/pl/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/pl/operating-graph-node.mdx b/website/pages/pl/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/pl/operating-graph-node.mdx +++ b/website/pages/pl/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/pl/publishing/publishing-a-subgraph.mdx b/website/pages/pl/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/pl/publishing/publishing-a-subgraph.mdx +++ b/website/pages/pl/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/pl/querying/querying-the-hosted-service.mdx b/website/pages/pl/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/pl/querying/querying-the-hosted-service.mdx +++ b/website/pages/pl/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/pl/querying/querying-with-python.mdx b/website/pages/pl/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/pl/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/pl/quick-start.mdx b/website/pages/pl/quick-start.mdx new file mode 100644 index 000000000000..c16b047e0c75 --- /dev/null +++ b/website/pages/pl/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: ' Na start' +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/pl/substreams.mdx b/website/pages/pl/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/pl/substreams.mdx +++ b/website/pages/pl/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/pl/sunrise.mdx b/website/pages/pl/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/pl/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/pl/tokenomics.mdx b/website/pages/pl/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/pl/tokenomics.mdx +++ b/website/pages/pl/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/pt/arbitrum/arbitrum-faq.mdx b/website/pages/pt/arbitrum/arbitrum-faq.mdx index 6ddd8d73639b..6a518e9e356b 100644 --- a/website/pages/pt/arbitrum/arbitrum-faq.mdx +++ b/website/pages/pt/arbitrum/arbitrum-faq.mdx @@ -14,7 +14,7 @@ Ao escalar o The Graph na L2, os participantes da rede podem: - Herdar segurança do Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +A escala dos contratos inteligentes do protocolo à L2 permite que os participantes da rede interajam com mais frequência por menos custos em taxas de gás. Por exemplo, os Indexadores podem abrir e fechar alocações para indexar um número maior de subgraphs com mais frequência; os programadores podem lançar e atualizar subgraphs com mais facilidade; os Delegadores podem delegar GRT com mais frequência; e os Curadores podem adicionar ou retirar sinais de um número maior de subgraphs–ações que, antigamente, eram consideradas caras demais para realizar com frequência devido ao gas. A comunidade do The Graph prosseguiu com o Arbitrum no ano passado, após o resultado da discussão [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). diff --git a/website/pages/pt/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/pt/arbitrum/l2-transfer-tools-faq.mdx index 5edf59aa959e..d060247cbfad 100644 --- a/website/pages/pt/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/pt/arbitrum/l2-transfer-tools-faq.mdx @@ -2,23 +2,47 @@ title: Perguntas Frequentes sobre Ferramentas de Transferência para L2 --- -> As ferramentas de transferência para L2 ainda não foram lançadas. Elas devem ser disponibilizadas até o final de 2023. +## Geral -## O que são Ferramentas de Transferência para L2? +### O que são Ferramentas de Transferência para L2? -O The Graph reduziu em até 26 vezes o custo para a participação de contribuintes na rede lançando o protocolo no Arbitrum One. As Ferramentas de Transferência para L2 foram criadas por desenvolvedores importantes para facilitar a mudança para a L2. Para cada participante no protocolo, um conjunto de helpers de transferência será compartilhado para suavizar a experiência, evitando períodos de degelo ou a necessidade de ter que sacar e fazer bridge manualmente de GRT. Estas ferramentas exigirão que você siga um conjunto específico de passos dependendo do seu papel dentro do Graph, e do que você transferirá para a L2. +O The Graph diminuiu em 26x o custo para contribuintes participarem na rede através do lançamento do protocolo ao Arbitrum One. As Ferramentas de Transferência para L2 foram criadas por programadores centrais para facilitar a mudança à L2. -## Posso usar a mesma carteira que uso na mainnet do Ethereum? +Para cada participante na rede, um conjunto de Ferramentas de Transferência para L2 é disponibilizado para suavizar a experiência, de modo a evitar períodos de degelo ou ter que sacar GRT e colocá-lo em bridge manualmente. -Se usa uma carteira [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), você poderá usar o mesmo endereço. Se a sua carteira na mainnet do Ethereum for um contrato (uma multisig, por ex.), então você deve especificar um [endereço de carteira no Arbitrum](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) onde a sua transferência será enviada. Por favor, tenha cuidado ao conferir o endereço, pois transferências a um endereço errado pode resultar em perda permanente. Se você quiser usar uma multisig na L2, lance um contrato multisig no Arbitrum One. +Estas ferramentas exigem aderência a um conjunto específico de passos a depender do seu papel dentro do The Graph e o que será transferido para a L2. + +### Posso usar a mesma carteira que uso na mainnet do Ethereum? + +Se usa uma carteira [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), você poderá usar o mesmo endereço. Se a sua carteira na mainnet do Ethereum for um contrato (uma multisig, por ex.), então deve ser especificado um [endereço de carteira no Arbitrum](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) para onde a sua transferência será enviada. Por favor, tenha cuidado ao conferir o endereço, pois transferir a um endereço errado pode causar prejuízos permanentes. Se quiser usar uma multisig na L2, lance um contrato multisig no Arbitrum One. + +Carteiras em blockchains EVM como Ethereum e Arbitrum são um par de chaves (pública e privada), que você cria sem precisar interagir com a blockchain. Qualquer carteira criada para o Ethereum também funcionará no Arbitrum sem ter que fazer qualquer outra coisa. + +A exceção é com carteiras de contrato inteligente como multisigs: estas são contratos inteligentes publicados separadamente em cada chain, que buscam os endereços necessários quando lançados. Se uma multisig foi publicada no Ethereum, ela não existirá com o mesmo endereço no Arbitrum. Uma nova multisig deve ser criada primeiro no Arbitrum; esta tem chances de receber um endereço diferente. + +### O que acontece se a minha transferência não for finalizada em 7 dias? + +As Ferramentas de Transferência para L2 usam o mecanismo nativo do Arbitrum para enviar mensagens da L1 à L2. Este mecanismo é chamado de "retryable ticket" (bilhete retentável) e é usado por todos os bridges de tokens nativos, incluindo o bridge de GRT do Arbitrum. Leia mais na [documentação do Arbitrum](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +Ao transferir os seus ativos (subgraph, stake, delegação ou curadoria) à L2, é enviada uma mensagem através do bridge de GRT do Arbitrum, que cria um retryable ticket na L2. A ferramenta de transferência inclui um valor de ETH na transação, que é usado para pagar 1) pela criação do ticket e 2) pelo gas da execução do ticket na L2. Porém, devido à possível variação dos preços de gas no tempo até a execução do ticket na L2, esta tentativa de execução automática pode falhar. Se isto acontecer, o bridge do Arbitrum tentará manter o retryable ticket ativo por até 7 dias; assim, qualquer pessoa pode tentar novamente o "resgate" do ticket (que requer uma carteira com algum ETH em bridge ao Arbitrum). + +Este é o passo de "Confirmação" em todas as ferramentas de transferência. Ele será executado automaticamente e com êxito na maioria dos casos, mas é importante verificar que ele foi executado. Se não tiver êxito na primeira execução e nem em quaisquer das novas tentativas dentro de 7 dias, o bridge do Arbitrum descartará o ticket, e os seus ativos (subgraph, stake, delegação ou curadoria) serão perdidos sem volta. Os programadores-núcleo do The Graph têm um sistema de monitoria para detectar estas situações e tentar resgatar os tickets antes que seja tarde, mas no final, a responsabilidade é sua de que a sua transferência complete a tempo. Caso haja problemas ao confirmar a sua transação, contacte-nos com [este formulário](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) e o núcleo tentará lhe ajudar. + +### Eu comecei a transferir a minha delegação/meu stake/minha curadoria e não tenho certeza se ela chegou à L2, como posso ter certeza de que a mesma foi transferida corretamente? + +Caso não veja um banner no seu perfil pedindo que termine a transferência, a transação pode ter chegado com segurança na L2 e você não precisa fazer mais nada. Caso haja dúvidas, é possível conferir se o Explorer mostra a sua delegação, seu stake ou a sua curadoria no Arbitrum One. + +Se tiver o hash de transação da L1 (confira as transações recentes na sua carteira), podes também confirmar se o "bilhete retentável" que entregou a mensagem à L2 foi resgatado aqui: https://retryable-dashboard.arbitrum.io/ - se o resgate automático falhar, podes também conectar a sua carteira lá e resgatá-lo. Os programadores-núcleo também procuram mensagens que se perdem no caminho, e tentarão resgatar os bilhetes antes que vençam. ## Transferência de Subgraph -## Como transfiro o meu subgraph? +### Como transfiro o meu subgraph? -Para transferir o seu subgraph, será necessário completar os seguintes passos: + -1. Inicie a transferência na mainnet do Ethereum +Para transferir o seu subgraph, complete os seguintes passos: + +1. Inicie a transferência na mainnet Ethereum 2. Espere 20 minutos pela confirmação @@ -28,267 +52,321 @@ Para transferir o seu subgraph, será necessário completar os seguintes passos: 5. Atualize o URL de Query (recomendado) -\*Note que você deve confirmar a transferência dentro de 7 dias, caso contrário, o seu subgraph poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas uma confirmação manual pode ser necessária caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, há recursos para ajuda: Contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +\*Você deve confirmar a transferência dentro de 7 dias, ou o seu subgraph poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). -## De onde devo iniciar a minha transferência? +### De onde devo iniciar a minha transferência? -Você pode iniciar a sua transferência do [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) ou de qualquer página de detalhes de subgraph. Clique no botão "Transfer Subgraph" (Transferir Subgraph) na página de detalhes de subgraph para começar a transferência. +Do [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) ou de qualquer página de detalhes de subgraph. Clique no botão "Transfer Subgraph" (Transferir Subgraph) na página de detalhes de subgraph para começar a transferência. -## Quanto tempo devo esperar até que o meu subgraph seja transferido? +### Quanto tempo devo esperar até que o meu subgraph seja transferido -O tempo de transferência leva cerca de 20 minutos. O bridge do Arbitrum trabalha no fundo para completar a transferência automaticamente. Em alguns casos, os custos de gas podem entrar num pico e você deverá confirmar a transação novamente. +A transferência leva cerca de 20 minutos. O bridge do Arbitrum trabalha em segundo plano para completar a transferência automaticamente. Às vezes, os custos de gas podem subir demais e a transação deverá ser confirmada novamente. -## O meu subgraph ainda poderá ser descoberto após transferi-lo para a L2? +### O meu subgraph ainda poderá ser descoberto após ser transferido para a L2? -O seu subgraph só será descobrível na rede em qual ele foi editado. Por exemplo, se o seu subgraph estiver no Arbitrum One, então você só poderá encontrá-lo no Explorer do Arbitrum One e não no Ethereum. Garanta que tem o Arbitrum One selecionado no trocador de rede no topo da página para garantir que está na rede correta. Após a transferência, o subgraph na L1 aparecerá como depreciado. +O seu subgraph só será descobrível na rede em qual foi editado. Por exemplo, se o seu subgraph estiver no Arbitrum One, então só poderá encontrá-lo no Explorer do Arbitrum One e não no Ethereum. Garanta que o Arbitrum One está selecionado no seletor de rede no topo da página para garantir que está na rede correta.  Após a transferência, o subgraph na L1 aparecerá como depreciado. -## O meu subgraph precisa ser editado para poder ser transferido? +### O meu subgraph precisa ser editado para poder ser transferido? -Para tomar vantagem da ferramenta de transferência de subgraph, o seu subgraph já deve estar editado na mainnet Ethereum, e ter algum sinal de curadoria que esteja em posse da carteira titular do subgraph. Se o seu subgraph não estiver editado, é recomendado que o edite diretamente no Arbitrum One - as taxas de gas associadas serão consideravelmente menores. Se quiser transferir um subgraph editado, mas a conta titular não curou qualquer sinal nele, você pode sinalizar uma quantidade pequena (por ex. 1 GRT) daquela conta; garanta que escolheu o sinal "auto-migratório". +Para aproveitar a ferramenta de transferência de subgraph, o seu subgraph já deve estar editado na mainnet Ethereum e deve ter algum sinal de curadoria em posse da carteira titular do subgraph. Se o seu subgraph não estiver editado, edite-o diretamente no Arbitrum One - as taxas de gas associadas serão bem menores. Se quiser transferir um subgraph editado, mas a conta titular não curou qualquer sinal nele, você pode sinalizar uma quantidade pequena (por ex. 1 GRT) daquela conta; escolha o sinal "migração automática". -## O que acontece com a versão da mainnet Ethereum do meu subgraph após eu transferi-lo ao Arbitrum? +### O que acontece com a versão da mainnet Ethereum do meu subgraph após eu transferi-lo ao Arbitrum? -Após transferir o seu subgraph ao Arbitrum, a versão na mainnet do Ethereum será depreciada. Recomendamos que atualize o seu URL de query em dentro de 28 horas. Porém, há um período de tempo que mantém o seu URL na mainnet em funcionamento para que qualquer apoio de dapp de terceiros seja atualizado. +Após transferir o seu subgraph ao Arbitrum, a versão na mainnet Ethereum será depreciada. Recomendamos que atualize o seu URL de query em dentro de 28 horas. Porém, há um período que mantém o seu URL na mainnet em funcionamento, para que qualquer apoio de dapp de terceiros seja atualizado. -## Após a transferência, preciso reeditar no Arbitrum? +### Após a transferência, preciso reeditar no Arbitrum? -Após a janela de 20 minutos de transferência, você deverá confirmar a transferência com uma transação na interface para finalizar a transferência, mas a ferramenta de transferência te guiará através disto. O seu endpoint na L1 continuará a ser apoiado durante a janela de transferência e um por período de graça após isto. É recomendado atualizar o seu endpoint quando lhe for conveniente. +Após a janela de transferência de 20 minutos, confirme a transferência com uma transação na interface para finalizá-la; a ferramenta de transferência te guiará no processo. O seu endpoint na L1 continuará a ser apoiado durante a janela e por um período de graça após isto. Vale atualizar o seu endpoint quando lhe for conveniente. -## Haverá um período de inatividade no meu endpoint durante a reedição? +### O meu endpoint estará fora do ar durante a reedição? -Não deve haver inatividade durante o uso da ferramenta de transferência para mover o seu subgraph à L2. O seu endpoint na L1 continuará a ser apoiado durante a janela de transferência e um por período de graça após isto. É recomendado atualizar o seu endpoint quando lhe for conveniente. +É improvável, mas é possível passar por um breve desligamento a depender de quais Indexadores apoiam o subgraph na L1, e de se eles continuarão a indexá-lo até o subgraph ter apoio total na L2. -## Editar e versionar na L2 funciona da mesmam forma que na mainnet do Ethereum? +### Editar e versionar na L2 funcionam da mesma forma que na mainnet Ethereum? -Sim. Garanta que selecionou o Arbitrum One como a sua rede editada ao editar no Subgraph Studio. No Studio, o último endpoint disponível apontará à versão atualizada mais recente do subgraph. +Sim. Selcione o Arbitrum One como a sua rede editada ao editar no Subgraph Studio. No Studio, o último endpoint disponível apontará à versão atualizada mais recente do subgraph. -## A curadoria do meu subgraph se mudará junto com o meu subgraph? +### A curadoria do meu subgraph se mudará com o meu subgraph? Caso tenha escolhido o sinal automigratório, 100% da sua própria curadoria se mudará ao Arbitrum One junto com o seu subgraph. Todo o sinal de curadoria do subgraph será convertido em GRT na hora da transferência, e o GRT correspondente ao seu sinal de curadoria será usado para mintar sinais no subgraph na L2. -Outros Curadores podem escolher se querem sacar a sua fração de GRT, ou transferi-la à L2 para mintar sinais no mesmo subgraph. +Outros Curadores podem escolher se querem sacar a sua fração de GRT, ou também transferi-la à L2 para mintar sinais no mesmo subgraph. -## Posso mover o meu subgraph de volta à mainnet do Ethereum após a transferência? +### Posso devolver o meu subgraph à mainnet Ethereum após a transferência? -Depois da transferência, a versão da mainnet do Ethereum deste subgraph será depreciada. Se quiser mudá-lo de volta à mainnet, será necessário relançá-lo e publicá-lo de volta à mainnet. Porém, transferir de volta à mainnet do Ethereum é altamente desencorajado, já que recompensas de indexação logo serão distribuidas totalmente no Arbitrum One. +Após a transferência, a versão da mainnet Ethereum deste subgraph será depreciada. Se quiser devolvê-lo à mainnet, será necessário relançá-lo e editá-lo de volta à mainnet. Porém, transferir de volta à mainnet do Ethereum é muito arriscado, já que as recompensas de indexação logo serão distribuidas apenas no Arbitrum One. -## Por que preciso de ETH em bridge para completar a minha transferência? +### Por que preciso de ETH em bridge para completar a minha transferência? -Taxas de gás no Arbitrum One são pagas com ETH em bridge (por ex. ETH que foi conectado ao Arbitrum One). Porém, as taxas de gas são muito menores comparadas à mainnet do Ethereum. +As taxas de gás no Arbitrum One são pagas com ETH em bridge (por ex. ETH que foi conectado ao Arbitrum One). Porém, as taxassão muito menores comparadas à mainnet Ethereum. -## Sinal de Curadoria +## Delegação -## Como transfiro a minha curadoria? +### Como transfiro a minha delegação? -Para transferir a sua curadoria, será necessário completar os seguintes passos: + -1. Inicie a transferência de sinal na mainnet do Ethereum +Para transferir a sua delegação, complete os seguintes passos: -2. Especifique um endereço de Curador na L2\* +1. Inicie a transferência de delegação na mainnet Ethereum +2. Espere 20 minutos pela confirmação +3. Confirme a transferência da delegação no Arbitrum -3. Espere 20 minutos pela confirmação +\*\*\*\*Confirme a transferência dentro de 7 dias, ou o seu subgraph poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). -\*Se necessário - por ex. se você usar um endereço de contrato. +### O que acontece com as minhas recompensas se eu iniciar uma transferência com uma alocação aberta na mainnet Ethereum? -## Como saberei se o subgraph que eu curei foi movido para a L2? +Se o Indexador a qual você delega ainda está a operar no L1, ao transferir ao Arbitrum, você perderá quaisquer recompensas de delegação oriundas de alocações abertas na mainnet do Ethereum. Isto significa que perderá as recompensas do último período de 28 dias, no máximo. Se executar a transferência logo após o Indexador fechar as suas alocações, verifique se esta é a menor quantia possível. Se houver um canal de comunicação com o(s) seu(s) Indexador(es), considere discutir com eles o melhor tempo para executar a sua transferência. -Ao visualizar a página de detalhes do subgraph, um banner notificará-lhe que este subgraph foi transferido. Você pode seguir o prompt para transferir a sua curadoria. Esta informação também pode ser encontrada na página de detalhes de qualquer subgraph que foi movido. +### O que acontece se o Indexador ao qual eu atualmente delego não estiver no Arbitrum One? -## E se eu não quiser mover a minha curadoria à L2? +A ferramenta de transferência a L2 só será ativada se o Indexador ao qual delegaste transferiu o stake dele ao Arbitrum. -Quando um subgraph é depreciado, é disponibilizada a opção de retirar o seu sinal. De forma parecida, se um subgraph for movido à L2, você pode escolher retirar o seu sinal na mainnet do Ethereum ou enviar o sinal à L2. +### Os Delegadores têm a opção de delegar a outro Indexador? -## Como sei se a minha curadoria foi transferida com êxito? +Se desejar delegar a outro Indexador, dá para transferir ao mesmo Indexador no Arbitrum, para depois retirar a delegação e esperar o período de degelo. Após isto, pode selecionar outro Indexador ativo para quem delegar. -Os detalhes do sinal serão acessíveis através do Explorer em cerca de 20 minutos após o início da ferramenta de transferência à L2. +### E se eu não conseguir achar o Indexador ao qual delego na L2? -## Posso transferir a minha curadoria em mais de um subgraph de uma vez? +A ferramenta de transferência à L2 detectará automaticamente o Indexador ao qual delegou anteriormente. -Não há opção de transferência em conjunto no momento. +### Eu posso misturar ou 'dividir' a minha delegação entre novos (ou vários) Indexadores em vez do Indexador anterior? -## Stake de Indexador +A ferramenta de transferência à L2 sempre mudará a sua delegação ao mesmo Indexador ao qual delegaste anteriormente. Após você se mudar para a L2, pode retirar a delegação, esperar o período de degelo, e decidir se quer separar a sua delegação. -## Como transfiro o meu stake ao Arbitrum? +### Estou sujeito ao período de recarga ou posso retirar-me imediatamente após usar a ferramenta de transferência de delegação na L2? -Para transferir o seu stake, será necessário completar os seguintes passos: +A ferramenta de transferência permite-lhe uma mudança imediata à L2. Se quiser desdelegar, espere pelo período de degelo. Porém, se um Indexador transferir todo o stake dele à L2, dá para fazer saque na mainnet Ethereum imediatamente. -1. Inicie a transferência de stake na mainnet do Ethereum +### As minhas recompensas podem ser prejudicadas se eu não transferir a minha delegação? -2. Espere 20 minutos pela confirmação +Espera-se que toda a participação na rede seja transferida ao Arbitrum One no futuro. -3. Confirme a transferência do stake no Arbitrum +### Quanto tempo leva para completar a transferência da minha delegação à L2? -\*Note que você deve confirmar a transferência dentro de 7 dias, caso contrário, o seu stake poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas uma confirmação manual pode ser necessária caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, há recursos para ajuda: Contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +Após este período, complete o terceiro passo do processo de transferência dentro de 7 dias, ou a sua delegação poderá ser perdida. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente caso este falhe. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). -## Todo o meu stake será transferido? +### Posso transferir a minha delegação se eu usar um contrato de vesting / carteira de bloqueio de token de GRT? -Você pode escolher quanto do seu stake quer transferir. Se quiser escolher transferir todo o seu stake de uma vez, você deverá fechar quaisquer alocações abertas primeiro. +Sim! O processo é um pouco diferente, já que contratos de vesting não podem repassar o ETH necessário para pagar o gas da L2, então é necessário depositá-lo primeiro. Se o seu contrato de vesting não for totalmente vestido, inicie também um contrato de vesting na L2 — assim, a delegação só poderá ser transferida a este contrato na L2. A interface no Explorer pode te guiar através deste processo após conectar ao Explorer com a carteira de bloqueio de vesting. -Se planeja transferir partes do seu stake através de múltiplas transações, você sempre deve especificar o mesmo endereço beneficiário. +### O meu contrato de vesting no Arbitrum permite liberar GRT da mesma forma que na mainnet? -Nota: Você deve atender aos requerimentos mínimos de stake na L2 na primeira vez que usar a ferramenta de transferência. Indexadores devem enviar o mínimo de 100.000 GRT (ao chamar esta função pela primeira vez.). Se uma porção do stake for deixada na L1, ela deve ser maior que o mínimo de 100 mil, e ser suficiente (junto com as suas delegações) para cobrir as suas alocações abertas. +Não, o contrato de vesting criado no Arbitrum não permitirá liberar qualquer GRT até o fim da agenda de vesting, por ex., até o seu contrato for totalmente vestido. Isto serve para impedir gastos duplos, pois caso contrário, seria possível liberar as mesmas quantidades em ambas as camadas. -## Quanto tempo eu tenho para confirmar a minha transferência de stake ao Arbitrum? +Se quiser liberar GRT do contrato de vesting, é possível transferi-lo de volta ao contrato de vesting na L1 com o Explorer: no seu perfil no Arbitrum One, há um banner que diz que você pode transferir GRT de volta ao contrato de vesting na mainnet. Isto exige uma transação no Arbitrum One, uma espera de 7 dias, e uma transação final na mainnet, já que isto usa o mesmo mecanismo nativo de bridging da bridge de GRT. -\*\*\* A sua transação deve ser confirmada para completar a transferência de stake no Arbitrum. Este passo deve ser finalizado em dentro de 7 dias, caso contrário, o stake pode ser perdido. +### Há alguma taxa de delegação? -## E se eu tiver alocações abertas? +Não. Os tokens recebidos na L2 são delegados ao Indexador especificado, em nome do Delegador especificado, sem cobrar taxas pela delegação. -Caso você não envie todo o seu stake, a ferramenta de transferência para L2 validará que o mínimo de 100 mil GRT permaneça na mainnet do Ethereum, e que o seu stake e delegação restantes sejam o suficiente para cobrir quaisquer alocações abertas. Você pode precisar fechar alocações abertas se o seu saldo de GRT não cobrir os mínimos e as alocações abertas. +### As minhas recompensas não realizadas serão transferidas quando eu transferir a minha delegação? -## Com o uso das ferramentas de transferência, é necessário esperar 28 dias para um unstake na mainnet do Ethereum antes da transferência? +Sim! As únicas recompensas que não podem ser transferidas são por alocações abertas, já que essas não existirão até o Indexador fechar as alocações (normalmente a cada 28 dias). Se já delega há um bom tempo, isto deve compor apenas uma pequena fração de recompensas. -Não. Você pode transferir o seu stake à L2 imediatamente. Não há necessidade de fazer unstake e esperar antes de usar a ferramenta de transferência. A espera de 28 dias só é aplicada se quiser sacar o stake de volta à sua carteira, na mainnet do Ethereum ou na L2. +No nível do contrato inteligente, as recompensas não realizadas já fazem parte do seu saldo de delegação, então elas serão transferidas quando a sua delegação for transferida à L2. -## Quanto tempo leva para transferir o meu stake? +### É obrigatório mudar delegações para a L2? Há uma data limite? -Levará cerca de 20 minutos para que a ferramenta de transferência à L2 finalize a transferência do seu stake. +Não é obrigatório transferir delegações para a L2 — mas lá, as recompensas de indexação aumentam com base na linha do tempo descrita no [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventualmente, se o Conselho continuar a aprovar os aumentos, todas as recompensas serão distribuídas na L2 e não haverão mais recompensas de indexação para Indexadores e Delegantes na L1 -## Devo indexar no Arbitrum antes de transferir o meu stake? +### Se eu delegar a um Indexador que já transferiu stake à L2, eu paro de receber recompensas na L1? -Você pode transferir o seu stake antes de preparar a indexação, mas você não poderá resgatar quaisquer recompensas na L2 até alocar a subgraphs na L2, indexá-los, e apresentar POIs. +Muitos Indexadores transferem stakes gradualmente para que Indexadores na L1 continuem a ganhar recompensas e taxas na L1, que então, são compartilhadas com Delegantes. Quando um Indexador tiver transferido todo o seu stake, ele parará de operar na L1, então os Delegantes não receberão mais recompensas até se transferirem à L2. -## Delegadores podem mover a sua delegação antes que eu mova o meu stake de indexação? +Eventualmente, caso o Conselho continue a aprovar os aumentos de recompensas de indexação na L2, todas serão distribuídas na L2 e não haverão mais recompensas para Indexadores e Delegantes na L1. ​ -Não. Para que Delegadores transfiram o seu GRT delegado ao Arbitrum, o Indexador a quem eles delegam devem estar ativos na L2. +### Não vejo um botão para transferir a minha delegação. O que houve? -## Posso transferir o meu stake se eu usar um contrato de vesting / carteira de bloqueio de token de GRT? +O seu Indexador provavelmente ainda não transferiu stakes com as ferramentas de transferência para L2. -Sim! O processo é um pouco diferente, já que contratos de vesting não podem encaminhar o ETH necessário para o pagamento do gas da L2, então será necessário depositá-lo primeiro. Se o seu contrato de vesting não for totalmente vestido, você também deverá inicializar um contrato de vesting na L2 e só poderá transferir o stake a este contrato de vesting na L2. A interface no Explorer pode te guiar através deste processo ao conectar ao Explorer com a carteira de bloqueio de vesting. +Se puder contactar o Indexador, tente convencê-lo a usar as Ferramentas de Transferência para L2 para que os Delegantes possam transferir delegações ao próprio endereço de Indexador na L2 -## Delegação +### O meu Indexador também está no Arbitrum, mas eu não vejo nenhum botão para transferir a delegação no meu perfil. Porquê? + +O Indexador pode ter preparado operações na L2 sem ter transferido stake com as ferramentas de transferência à L2. Assim, os contratos inteligentes na L1 não saberão o endereço do Indexador na L2. Caso seja possível contatar o Indexador, convença-o a usar a ferramenta de transferência para que Delegantes possam transferir delegações ao seu endereço de Indexador na L2 + +### Posso transferir a minha delegação à L2 caso eu tenha começado o processo de cancelamento da delegação sem tê-la sacado? -## Como transfiro a minha delegação? +Não. Se a sua delegação estiver em degelo, espere os 28 dias para sacá-la. -Para transferir a sua delegação, será necessário completar os seguintes passos: +Os tokens a serem desdelegados estão "trancados", e assim, não podem ser transferidos à L2. -1. Inicie a transferência de delegação na mainnet do Ethereum +## Sinal de Curadoria + +### Como transfiro a minha curadoria? + +Para transferir a sua curadoria, complete os seguintes passos: + +1. Inicie a transferência de sinal na mainnet Ethereum + +2. Especifique um endereço de Curador na L2\* + +3. Espere 20 minutos pela confirmação + +\*Se necessário - por ex. se você usar um endereço de contrato. + +### Como saberei se o subgraph que eu curei foi transferido para a L2? + +Ao visualizar a página de detalhes do subgraph, um banner notificará-lhe que este subgraph foi transferido. Siga o prompt para transferir a sua curadoria. Esta informação também aparece na página de detalhes de qualquer subgraph transferido. + +### E se eu não quiser mudar a minha curadoria para a L2? + +Quando um subgraph é depreciado, há a opção de retirar o seu sinal. Desta forma, se um subgraph for movido à L2, dá para escolher retirar o seu sinal na mainnet Ethereum ou enviar o sinal à L2. + +### Como sei se a minha curadoria foi transferida com êxito? + +Os detalhes do sinal serão acessíveis através do Explorer cerca de 20 minutos após a ativação da ferramenta de transferência à L2. + +### Posso transferir a minha curadoria em vários subgraphs de uma vez? + +Não há opção de transferências em conjunto no momento. + +## Stake de Indexador + +### Como transfiro o meu stake ao Arbitrum? + +> Aviso: Caso esteja atualmente a desfazer o stake em qualquer porção do seu GRT no seu Indexador, não haverá como usar as Ferramentas de Transferência à L2. + + + +Para transferir o seu stake, complete os seguintes passos: + +1. Inicie a transferência de stake na mainnet Ethereum 2. Espere 20 minutos pela confirmação -3. Confirme a transferência da delegação no Arbitrum +3. Confirme a transferência do stake no Arbitrum + +\*Confirme a transferência dentro de 7 dias, ou o seu stake poderá ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente se houver um aumento repentino no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). -\*\*\*\*Você deve confirmar a transação para finalizar a transferência da delegação no Arbitrum. Este passo deve ser completado dentro de 7 dias, caso contrário, a delegação pode ser perdida. Na maioria dos casos, este passo será executado automaticamente, mas uma confirmação manual pode ser necessária caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, há recursos para ajuda: Contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +### Todo o meu stake será transferido? -## O que acontece com as minhas recompensas se eu iniciar uma transferência com uma alocação aberta na mainnet do Ethereum? +Você pode escolher quanto quer transferir do seu stake. Se escolher transferir todo o seu stake de uma vez, feche quaisquer alocações abertas primeiro. -Se o indexador a qual você delega ainda está em operação no L1, ao transferir ao Arbitrum, você perderá quaisquer recompensas de delegação de alocações abertas na mainnet do Ethereum. Isto significa que você perderá as recompensas, no máximo, do último período de 28 dias. Se você executar a transferência logo após o indexador fechar as suas alocações, você pode garantir que esta seja a menor quantia possível. Se houver um canal de comunicação com o(s) seu(s) Indexador(es), considere uma discussão com eles para saber o melhor tempo para executar a sua transferência. +Se deseja transferir partes do seu stake através de múltiplas transações, sempre especifique o mesmo endereço beneficiário. -## O que acontece se o Indexador ao qual eu atualmente delego não estiver no Arbitrum One? +Nota: Você deve atender aos requerimentos mínimos de stake na L2 ao usar a ferramenta de transferência pela primeira vez. Os Indexadores devem enviar o mínimo de 100.000 GRT (ao chamar esta função pela primeira vez.). Se uma porção do stake for deixada na L1, ela deve ser maior que o mínimo de 100 mil e ser suficiente (junto com as suas delegações) para cobrir as suas alocações abertas. -A ferramenta de transferência a L2 só será habilitada se o Indexador ao qual você delegou transferiu o stake dele ao Arbitrum. +### Quanto tempo eu tenho para confirmar a minha transferência de stake ao Arbitrum? -## Delegadores têm a opção de delegar a outro Indexador? +\*\*\* A sua transação deve ser confirmada para completar a transferência de stake no Arbitrum. Confirme a transferência dentro de 7 dias, ou o seu stake poderá ser perdido. -Se desejar delegar a outro Indexador, você pode transferir ao mesmo indexador no Arbitrum, depois desdelegar e esperar o período de degelo. Após isto, você pode selecionar outro Indexador ativo para delegar. +### E se eu tiver alocações abertas? -## E se eu não conseguir achar o Indexador ao qual delego na L2? +Caso não envie todo o seu stake, a ferramenta de transferência para L2 validará que o mínimo de 100 mil GRT permaneça na mainnet do Ethereum, e que o seu stake e delegação restantes sejam o suficiente para cobrir quaisquer alocações abertas. Você pode precisar fechar alocações abertas se o seu saldo de GRT não cobrir os mínimos + alocações abertas. -A ferramenta de transferência à L2 detectará automaticamente o Indexador ao qual você delegou anteriormente. +### Com as ferramentas de transferência, devo esperar 28 dias para retirar um stake na mainnet Ethereum antes da transferência? -## Eu poderei misturar ou 'espalhar' a minha delegação por novos ou vários Indexadores em vez do Indexador anterior? +Não. Dá para transferir o seu stake à L2 imediatamente. Não há necessidade de retirar um stake e esperar antes de usar a ferramenta de transferência. A espera de 28 dias só vale se quiser sacar o stake de volta à sua carteira, na mainnet Ethereum ou na L2. -A ferramenta de transferência à L2 sempre moverá a sua delegação ao mesmo Indexador ao qual você delegou anteriormente. Após se mudar para a L2, você pode desdelegar, esperar o período de degelo, e decidir se você quer separar a sua delegação. +### Quanto tempo demora para trandferir o meu stake? -## Estou sujeito ao período de recarga ou posso retirar-me imediatamente após usar a ferramenta de transferência de delegação na L2? +A ferramenta de transferência à L2 finalizará a transferência do seu stake em aproximadamente 20 minutos. -A ferramenta de transferência lhe permite uma mudança imediata à L2. Se quiser desdelegar, você deverá esperar pelo período de degelo. Porém, se um Indexador transferir todo o stake dele à L2, você pode se retirar na mainnet do Ethereum imediatamente. +### Devo indexar no Arbitrum antes de transferir o meu stake? -## As minhas recompensas podem ser impactadas negativamente se eu não transferir a minha delegação? +Você pode transferir o seu stake antes de preparar a indexação, mas não terá como resgatar recompensas na L2 até alocar para subgraphs na L2, indexá-los, e apresentar POIs. -É antecipado que toda a participação na rede se mova ao Arbitrum One no futuro. +### Os Delegadores podem mudar a sua delegação antes que eu mude o meu stake de indexação? -## Quanto tempo leva para a finalização da transferência da minha delegação à L2? +Não. Para que os Delegadores transfiram os GRT que delegaram ao Arbitrum, o Indexador a quem eles delegam devem estar ativos na L2. -Uma confirmação de 20 minutos é necessária para a transferência da delegação. Note que após o período de 20 minutos, você deve voltar e completar o terceiro passo do processo de transferência dentro de 7 dias. Se você não fizer isto, a sua delegação pode ser perdida. Note que na maioria dos casos, a ferramenta de transferência completará este passo para você automaticamente. Em caso de uma ação automática sem êxito, você deverá completá-la manualmente. Caso surja algum problema durante este processo, não se preocupe, pois estaremos disposto a ajudar: contacte-nos no support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +### Posso transferir o meu stake se eu usar um contrato de vesting / carteira de bloqueio de token de GRT? -## Posso transferir a minha delegação se eu usar um contrato de vesting / carteira de bloqueio de token de GRT? +Sim! O processo é um pouco diferente, já que contratos de vesting não podem repassar o ETH necessário para pagar o gas da L2, então ele deve ser depositado primeiro. Se o seu contrato de vesting não for totalmente vestido, inicie também um contrato de vesting na L2; o stake só poderá ser transferido a este mesmo contrato. A interface no Explorer pode te guiar através deste processo quando estiver conectado ao Explorer com a carteira de bloqueio de vesting. -Sim! O processo é um pouco diferente, já que contratos de vesting não podem encaminhar o ETH necessário para o pagamento do gas da L2, então será necessário depositá-lo primeiro. Se o seu contrato de vesting não for totalmente vestido, você também deverá inicializar um contrato de vesting na L2 e só poderá transferir a delegação a este contrato de vesting na L2. A interface no Explorer pode te guiar através deste processo ao conectar ao Explorer com a carteira de bloqueio de vesting. +### Já tenho stake na L2. Ainda devo enviar 100 mil GRT no primeiro uso das ferramentas de transferência? -## Há alguma taxa de delegação? +Sim. Os contratos inteligentes na L1 não terão conhecimento do seu stake na L2, então será necessário transferir no mínimo 100.000 GRT na sua primeira transferência -Não. Tokens recebidos na L2 são delegados ao Indexador especificado em nome do Delegador especificado, sem cobrar uma taxa de delegação. +### Posso transferir o meu stake à L2 enquanto retirar o stake do meu GRT? + +Não. Se qualquer fração do seu stake estiver em degelo, espere os 28 dias e a saque antes de poder transferir o seu stake. Os tokens a serem postos em stake são "trancados" e impedirão quaisquer transferências ou stake na L2. ## Transferência de Contrato de Vesting -## Como transfiro o meu contrato de vesting? +### Como transfiro o meu contrato de vesting? -Para transferir o seu vesting, será necessário completar os seguintes passos: +Para transferir o seu vesting, complete os seguintes passos: -1. Inicie a transferência de vesting na mainnet do Ethereum +1. Inicie a transferência de vesting na mainnet Ethereum 2. Espere 20 minutos pela confirmação 3. Confirme a transferência do vesting no Arbitrum -## Como transfiro o meu contrato de vesting se estiver vestido apenas parcialmente? +### Como transfiro o meu contrato de vesting se estiver vestido apenas parcialmente? + + -1. Deposite ETH no contrato da ferramenta de transferência (a interface pode ajudar a estimar uma quantidade razoável) +1. Deposite ETH no contrato da ferramenta de transferência (a interface pode ajudar a estimar uma quantia razoável) -2. Envie um pouco de GRT através do contrato da ferramenta de transferência para que a L2 inicialize o contrato de vesting na L2. Isto também configurará o endereço beneficiário na L2. +2. Envie um pouco de GRT através do contrato da ferramenta de transferência, para que a L2 inicialize o contrato de vesting na L2. Isto também configurará o endereço beneficiário na L2. 3. Envie o seu stake/a sua delegação à L2 através das funções "bloqueadas" da ferramenta de transferência no contrato de staking na L1. -4. Saque qualquer ETH restante do contrato da ferramenta de transferência +4. Saque qualquer quantia restante de ETH do contrato da ferramenta de transferência -## Como transfiro o meu contrato de vesting se estiver totalmente vestido? +### Como transfiro o meu contrato de vesting, se eu estiver totalmente vestido? -Para quem estiver totalmente vestido, o processo é parecido: + -1. Deposite ETH no contrato da ferramenta de transferência (a interface pode ajudar a estimar uma quantidade razoável) +Para quem estiver totalmente vestido, o processo não é muito diferente: + +1. Deposite ETH no contrato da ferramenta de transferência (a interface pode estimar uma quantidade razoável) 2. Determine o seu endereço na L2 com uma chamada ao contrato da ferramenta de transferência 3. Envie o seu stake/a sua delegação à L2 através das funções "bloqueadas" da ferramenta de transferência no contrato de staking na L1. -4. Saque qualquer ETH restante do contrato da ferramenta de transferência +4. Saque qualquer quantia de ETH que restar do contrato da ferramenta de transferência -## Posso transferir o meu contrato de vesting ao Arbitrum? +### Posso transferir o meu contrato de vesting ao Arbitrum? -Você pode transferir o saldo de GRT do seu contrato de vesting a um contrato de vesting na L2. Este é um pré-requisito para a transferência de stake ou delegação do seu contrato de vesting à L2. O contrato de vesting deve ter um saldo de GRT maior que zero (é possível transferir uma quantidade pequena, como 1 GRT, se necessário). +Você pode transferir o saldo de GRT do seu contrato de vesting a um contrato na L2. Isto é necessário para transferir stake ou delegações do seu contrato de vesting à L2. O contrato deve ter um saldo de GRT acima de zero (é possível transferir uma quantia pequena, como 1 GRT, se precisar). -Ao transferir GRT do seu contrato de vesting na L1 à L2, você pode escolher a quantidade a ser enviada e fazer isto quantas vezes quiser. O contrato de vesting na L2 será inicializado na primeira vez que você transferir GRT. +Ao transferir GRT do seu contrato de vesting na L1 à L2, dá para escolher a quantia a ser enviada e repetir quantas vezes quiser. O contrato de vesting na L2 será inicializado na sua primeira transferência de GRT. -As transferências são feitas com o uso de uma Ferramenta de Transferência que será visível no seu perfil do Explorer ao conectar com a conta do contrato de vesting. +As transferências são feitas com uma Ferramenta de Transferência, que aparecerá no seu perfil do Explorer ao conectar com a conta do contrato de vesting. -Por favor, note que você não poderá liberar ou sacar GRT do contrato de vesting na L2 até o fim da sua linha do tempo de vesting, quando o seu contrato estiver totalmente vestido. Se precisar liberar GRT até então, você pode transferir o GRT de volta ao contrato de vesting na L1 com o uso de outra ferramenta de transferência disponível para este propósito. +Por favor, lembre que você não poderá liberar ou sacar GRT do contrato de vesting na L2 até o fim da sua agenda de vesting, quando o seu contrato estiver totalmente vestido. Se precisar liberar GRT até então, dá para devolver o GRT ao contrato de vesting na L1, com outra ferramenta de transferência especializada. -Se não tiver transferido qualquer saldo de contrato de vesting à L2, e o seu contrato de vesting estiver totalmente vestido, você não deve transferir o seu contrato de vesting à L2. Em vez disto, você pode usar as ferramentas de transferência para determinar um endereço de carteira na L2, e transferir diretamente o seu stake ou a sua delegação a esta carteira regular na L2. +Se não tiver transferido qualquer saldo de contrato de vesting à L2, e o seu contrato de vesting estiver totalmente vestido, não transfira o seu contrato de vesting à L2. Em vez disto, use as ferramentas de transferência para firmar um endereço de carteira na L2, e transfira diretamente o seu stake/delegação a esta carteira regular na L2. -## Estou a usar o meu contrato de vesting para fazer stake na mainnet. Posso transferir o meu stake ao Arbitrum? +### Estou a usar o meu contrato de vesting para fazer stake na mainnet. Posso transferir o meu stake ao Arbitrum? -Sim, mas se o seu contrato ainda estiver vestindo, você só pode transferir o stake para que esteja em posse do seu contrato de vesting na L2. Você deve primeiro inicializar este contrato na L2 com a transferência de um saldo de GRT com o uso de uma ferramenta de transferência de contrato de vesting no Explorer. Se o seu contrato estiver totalmente vestido, você pode transferir o seu stake a qualquer endereço na L2, mas deve configurá-lo previamente e depositar um pouco de ETH para que a ferramenta de transferência à L2 pague por gas na L2. +Sim, mas se o seu contrato ainda estiver vestindo, você só pode transferir o stake para que ele esteja em posse do seu contrato de vesting na L2. Primeiro, inicie este contrato na L2 com a transferência de um saldo de GRT, com uma ferramenta de transferência de contrato de vesting no Explorer. Se o seu contrato estiver totalmente vestido, transfira o seu stake a qualquer endereço na L2 após configurá-lo previamente e deposite ETH para que a ferramenta de transferência pague o gás na L2. -## Estou a usar o meu contrato de vesting para delegar na mainnet. Posso transferir as minhas delegações ao Arbitrum? +### Estou a usar o meu contrato de vesting para delegar na mainnet. Posso transferir as minhas delegações ao Arbitrum? -Sim, mas se o seu contrato ainda estiver vestindo, você só pode transferir a delegação para que esteja em posse do seu contrato de vesting na L2. Você deve primeiro inicializar este contrato na L2 com a transferência de um saldo de GRT com o uso de uma ferramenta de transferência de contrato de vesting no Explorer. Se o seu contrato estiver totalmente vestido, você pode transferir a sua delegação a qualquer endereço na L2, mas deve configurá-lo previamente e depositar um pouco de ETH para que a ferramenta de transferência à L2 pague por gas na L2. +Sim, mas se o seu contrato ainda estiver no processo de vesting, você só pode transferir a delegação para que ela fique em posse do seu contrato de vesting na L2. Você deve primeiro inicializar este contrato na L2 com a transferência de um saldo de GRT com o uso de uma ferramenta especializada no Explorer. Se o seu contrato estiver totalmente vestido, você pode transferir a sua delegação a qualquer endereço na L2, mas deve configurá-lo previamente e depositar ETH para que a ferramenta pague o gás na L2. -## Posso especificar um beneficiário diferente para o meu contrato de vesting na L2? +### Posso especificar um beneficiário diferente para o meu contrato de vesting na L2? -Sim. Na primeira vez que você transferir um saldo e preparar o seu contrato de vesting na L2, você pode especificar um beneficiário na L2. Garanta que este beneficiário é uma carteira que possa realizar transações no Arbitrum One, por ex. deve ser uma EOA ou uma multisig editada no Arbitrum One. +Sim. Na primeira vez que transferir um saldo e preparar o seu contrato de vesting na L2, dá para especificar um beneficiário na L2. Veja se este beneficiário tem uma carteira que possa realizar transações no Arbitrum One, por ex. uma EOA ou uma multisig lançada ao Arbitrum One. -Se o seu contrato estiver totalmente vestido, você não poderá preparar um contrato de vesting na L2; em vez disto, você configurará um endereço de carteira na L2 e esta será a carteira destinatária para o seu stake ou sua delegação no Arbitrum. +Se o seu contrato estiver totalmente vestido, não será preparado um contrato de vesting na L2; em vez disto, será configurado um endereço de carteira na L2. Esta será a carteira destinatária para o seu stake ou a sua delegação no Arbitrum. -## O meu contrato está totalmente vestido. Posso transferir o meu stake ou a minha delegação para outro endereço que não seja um contrato de vesting na L2? +### O meu contrato está totalmente vestido. Posso transferir o meu stake ou a minha delegação para outro endereço que não seja um contrato de vesting na L2? -Sim. Se não tiver transferido qualquer saldo de contrato de vesting à L2, e o seu contrato de vesting estiver totalmente vestido, você não deve transferir o seu contrato de vesting à L2. Em vez disto, você pode usar as ferramentas de transferência para determinar um endereço de carteira na L2, e transferir diretamente o seu stake ou a sua delegação a esta carteira regular na L2. +Sim. Se não tiver transferido qualquer saldo de contrato de vesting à L2, e o seu contrato estiver totalmente vestido, não transfira o seu contrato à L2. Em vez disto, use as ferramentas de transferência para determinar um endereço de carteira na L2 e transfira diretamente o seu stake/delegação a esta carteira regular na L2. -Isto lhe permite transferir o seu stake ou sua delegação para qualquer endereço na L2. +Isto permite-lhe transferir o seu stake ou a sua delegação para qualquer endereço na L2. -## Meu contrato de vesting ainda está vestindo. Como transfiro o saldo do meu contrato de vesting à L2? +### O meu contrato de vesting ainda está no processo de vesting. Como transfiro o saldo do meu contrato à L2? -Estes passos só se aplicam se o seu contrato ainda estiver vestindo, ou se você já usou este processo antes, quando o seu contrato ainda estava vestindo. +Estes passos só se aplicam se o seu contrato ainda estiver em processo de vesting, ou se já usou este processo quando seu contrato ainda estava com vesting em andamento. -Para transferir o seu contrato de vesting à L2, você deverá enviar qualquer saldo de GRT à L2 com as ferramentas de transferência, que inicialização o seu contrato de vesting na L2: +Para transferir o seu contrato de vesting à L2, envie qualquer saldo de GRT à L2 com as ferramentas de transferência. Isto iniciará o seu contrato de vesting na L2: -1. Deposite um pouco de ETH no contrato da ferramenta de transferência (isto será usado para pagar por gas na L2) +1. Deposite ETH no contrato da ferramenta de transferência (isto será usado para pagar gás na L2) -2. Revogue o acesso do protocolo ao contrato de vesting (necessário para o próximo passo) +2. Revogue o acesso protocolar ao contrato de vesting (necessário para o próximo passo) -3. Conceda ao protocolo acesso ao contrato de vesting (isto permitirá que o seu contrato interaja com a ferramenta de transferência) +3. Conceda acesso protocolar ao contrato de vesting (isto permitirá que o seu contrato interaja com a ferramenta de transferência) -4. Especifique um endereço beneficiário na L2\* e inicie a transferência do saldo na mainnet do Ethereum +4. Especifique um endereço beneficiário na L2\* e inicie a transferência do saldo na mainnet Ethereum 5. Espere 20 minutos pela confirmação @@ -296,20 +374,38 @@ Para transferir o seu contrato de vesting à L2, você deverá enviar qualquer s \*Se necessário - por ex. se você usar um endereço de contrato. -\*\*\*\*Você deve confirmar a sua transação para finalizar a transferência do saldo no Arbitrum. Este passo deve ser completado dentro de 7 dias, caso contrário, o saldo pode ser perdido. Na maioria dos casos, este passo será executado automaticamente, mas uma confirmação manual pode ser necessária caso haja um surto no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, há recursos para ajuda: Contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). +\*\*\*\*Confirme a sua transação para finalizar a transferência do saldo no Arbitrum. Este passo deve ser completado dentro de 7 dias, ou o saldo será perdido. Na maioria dos casos, este passo será executado automaticamente, mas pode ser necessário confirmar manualmente se houver um aumento repentino no preço de gas no Arbitrum. Caso haja quaisquer dificuldades neste processo, contacte o suporte em support@thegraph.com ou no [Discord](https://discord.gg/graphprotocol). + +### O meu contrato de vesting mostra 0 GRT, assim, não posso transferi-lo. Porquê? O que fazer? + +Para inicializar o seu contrato de vesting na L2, transfira uma quantia de GRT maior que zero à L2. Isto é exigido pelo bridge de GRT no Arbitrum em uso pelas Ferramentas de Transferência na L2. O GRT deve originar do saldo do contrato, e não deve incluir GRT delegado ou em stake. + +Se tiver delegado ou posto em stake todo o seu GRT do contrato de vesting, é possível enviar manualmente uma pequena quantia, como 1 GRT, ao endereço do contrato de vesting a partir de qualquer outro lugar (por ex., de outra carteira, ou de uma corretora) + +### Estou a usar um contrato de vesting para fazer stake ou delegar, mas não vejo um botão para transferir o meu stake ou a minha delegação à L2. O que fazer? + +Caso o seu contrato de vesting não esteja concluído, crie logo um contrato de vesting na L2 que receba o seu stake ou a sua delegação na L2. Este contrato não permitirá o lançamento de tokens na L2 até o fim da agenda de vesting, mas lhe permitirá transferir GRT de volta ao contrato na L2 para lá ser lançado. + +Ao se conectar com o contrato de vesting no Explorer, procure um botão para inicializar o seu contrato de vesting na L2. Siga esse processo primeiro, e então verás os botões para transferir o seu stake ou a sua delegação no seu perfil + +### Caso eu inicialize o meu contrato de vesting na L2, isto também transferirá a minha delegação à L2 automaticamente? + +Não. Inicializar o seu contrato de vesting na L2 é necessário para a transferência de stakes ou delegações do contrato, mas estes ainda devem ser transferidos separadamente. + +Surgirá um banner no seu perfil que lhe pedirá para transferir o seu stake ou a sua delegação após ter inicializado o seu contrato de vesting na L2. -## Posso mover o meu contrato de vesting de volta à L1? +### Posso devolver o meu contrato de vesting à L1? -Não há necessidade de fazê-lo, pois o seu contrato de vesting ainda está na L1. Ao usar as ferramentas de transferência, basta criar um novo contrato na L2 conectado com o seu contrato de vesting na L1, e assim poderá enviar GRT entre os dois. +Não há necessidade, pois o seu contrato de vesting ainda está na L1. Ao usar as ferramentas de transferência, basta criar um contrato na L2 conectado com o seu contrato na L1; assim, poderá enviar GRT entre os dois. -## Por que tenho que mover o meu contrato de vesting? +### Por que tenho que mudar o local do meu contrato de vesting? -É necessário configurar um contrato de vesting na L2 para que esta conta tenha o seu stake ou delegação na L2. Caso contrário, não haveria como você transferir o stake/a delegação à L2 sem "escapar" do contrato de vesting. +É necessário configurar um contrato de vesting na L2 para que esta conta tenha o seu stake ou delegação na L2. Senão, não haveria como transferir o seu stake/delegação à L2 sem "escapar" do contrato de vesting. -## O que acontece se eu tentar dar sacar o meu contrato quando ele está parcialmente vestido. Isto é possível? +### O que acontece se eu tentar sacar o meu contrato quando estiver parcialmente vestido? Isto é possível? -Isto não é uma possibilidade. Você pode mover fundos de volta para a L1 e sacá-los lá. +Isto não é possível. Você pode devolver fundos para a L1 e sacá-los de lá. -## E se eu não quiser mover o meu contrato de vesting à L2? +### E se eu não quiser mudar o meu contrato de vesting para a L2? -Você pode continuar a fazer stake/delegar na L1. Com o tempo, você pode considerar mudar-se à L2 para ativar recompensas lá, uma vez que o protocolo escala no Arbitrum. Note que estas ferramentas de transferência são para contratos de vesting que podem fazer stake e delegar no protocolo. Se o seu contrato não permite staking ou delegação, ou é revogável, então não haverá uma ferramenta de transferência disponível. Você ainda poderá sacar o seu GRT da L1 quando disponível. +Você pode continuar a fazer stake/delegar na L1. Pouco a pouco, considere mudar-se à L2 para ativar recompensas lá, sendo que o protocolo escala no Arbitrum. Perceba que estas ferramentas de transferência são para contratos de vesting que podem fazer stake e delegar no protocolo. Se o seu contrato não permite staking ou delegação, ou é revogável, então não haverá ferramentas de transferência disponíveis. Você ainda poderá sacar o seu GRT da L1 quando disponível. diff --git a/website/pages/pt/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/pt/arbitrum/l2-transfer-tools-guide.mdx index 32381145dd1d..f799681d02ba 100644 --- a/website/pages/pt/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/pt/arbitrum/l2-transfer-tools-guide.mdx @@ -2,19 +2,19 @@ title: Guia das Ferramentas de Transferência para L2 --- -> As ferramentas de transferência para L2 têm previsão de lançamento até o final de 2023. - O The Graph facilitou muito o processo de se mudar para a L2 no Arbitrum One. Para cada participante no protocolo, há um conjunto de Ferramentas de Transferência para L2 que suavizam o processo para todos os participantes na rede. Estas ferramentas exigem que você siga um conjunto específico de passos, dependente no que você transferir. Algumas perguntas frequentes sobre estas ferramentas são respondidas nas [Perguntas Frequentes das Ferramentas de Transferência para L2](/arbitrum/l2-transfer-tools-faq). As Perguntas Frequentes contém explicações profundas sobre como usar as ferramentas, como elas funcionam, e coisas a lembrar ao usá-las. ## Como transferir o seu subgraph ao Arbitrum (L2) + + ## Benefícios de transferir os seus subgraphs A comunidade e os programadores centrais do The Graph andaram [preparando](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) as suas mudanças ao Arbitrum ao longo do último ano. O Arbitrum, uma blockchain layer 2, ou "L2", herda a segurança do Ethereum, mas providencia taxas de gas muito menores. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Ao publicar ou atualizar o seu subgraph na Graph Network, você interaje com contratos inteligentes no protocolo, e isto exige o pagamento de gas usando ETH. Ao mover os seus subgraphs ao Arbitrum, quaisquer atualizações futuras ao seu subgraph exigirão taxas de gas muito menores. As taxas menores, e o fato de que bonding curves de curadoria na L2 são planas, também facilitarão a curadoria no seu subgraph para outros Curadores, a fim de aumentar as recompensas para Indexadores no seu subgraph. Este ambiente de custo reduzido também barateia a indexação e o serviço de Indexadores no seu subgraph. As recompensas de indexação também aumentarão no Arbitrum e decairão na mainnet do Ethereum nos próximos meses, então mais e mais Indexadores transferirão o seu stake e preparando as suas operações na L2. ## Como entender o que acontece com o sinal, o seu subgraph na L1 e URLs de query @@ -30,7 +30,7 @@ Queries no subgraph na L2 deverão ser feitas para uma URL diferente (or 'arbitr ## Como escolher a sua carteira na L2 -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Ao publicar o seu subgraph na mainnet, você usou uma carteira conectada para criar o subgraph, e esta carteira é dona do NFT que representa este subgraph e lhe permite publicar atualizações. Ao transferir o subgraph ao Arbitrum, você pode escolher uma carteira diferente que será dona deste NFT de subgraph na L2. diff --git a/website/pages/pt/billing.mdx b/website/pages/pt/billing.mdx index cd24d46240ee..fba845ad154d 100644 --- a/website/pages/pt/billing.mdx +++ b/website/pages/pt/billing.mdx @@ -37,8 +37,12 @@ Enquanto o protocolo do The Graph opera na Mainnet do Ethereum, [o contrato de c ### Depósitos de GRT com uma carteira de cripto + + > Esta seção presume que já tens GRT na sua carteira de cripto, e que está na mainnet Ethereum. Caso não tenha GRT, aprenda como adquirir GRT [aqui](#getting-grt). +Para um guia em vídeo sobre como depositar GRT no seu saldo de cobrança com uma carteira de cripto, assista este [vídeo](https://youtu.be/4Bw2sh0FxCg). + 1. Vá para a [página de Cobrança do Subgraph Studio](https://thegraph.com/studio/billing/). 2. Clique no botão "Connect Wallet" (Conectar Carteira) no canto superior direito da página. Isto levará à página de seleção de carteira; lá, selecione a sua carteira e clique em "Connect". @@ -71,6 +75,8 @@ Enquanto o protocolo do The Graph opera na Mainnet do Ethereum, [o contrato de c ### Depósitos de GRT com uma carteira multisig + + 1. Vá para a [página de Cobrança do Subgraph Studio](https://thegraph.com/studio/billing/). 2. Clique no botão "Connect Wallet" no canto superior direito da página, selecione a sua carteira e clique em "Connect". Se usar o [Gnosis-Sa0fe](https://gnosis-safe.io/), poderá conectar a sua multisig além da sua carteira de assinatura. Depois, assine a mensagem associada — isto não custa gas. @@ -97,11 +103,11 @@ Enquanto o protocolo do The Graph opera na Mainnet do Ethereum, [o contrato de c ## Adquirir GRT -Esta seção lhe mostrará como adquirir GRT para pagar taxas de query. +Esta seção mostrará-lhe como adquirir GRT para pagar taxas de query. ### Coinbase -Este é um guia passo a passo para comprar GRT na Coinbase. +Este é um guia passo a passo sobre como comprar GRT na Coinbase. 1. Crie uma conta na [Coinbase](https://www.coinbase.com/). 2. Quando tiver criado uma conta, precisará verificar a sua identidade através do processo chamado KYC (sigla em inglês para "Conheça o Seu Cliente"). Este é um processo comum em todas as exchanges de cripto, centralizadas ou custodiais. @@ -153,6 +159,50 @@ Veja como comprar GRT no Uniswap. Saiba mais sobre como adquirir GRT no Uniswap [aqui](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Como adquirir Ethereum + +Esta seção mostrará-lhe como adquirir Ethereum (ETH) para pagar por taxas de transação ou gas. É necessário ter ETH para executar operações na rede Ethereum, como a transferência de tokens ou interação com contratos. + +### Coinbase + +Este é um guia passo a passo sobre como comprar ETH na Coinbase. + +1. Crie uma conta na [Coinbase](https://www.coinbase.com/). +2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. +3. Após verificar a sua identidade, compre ETH no botão "Comprar/Vender", no canto superior direito da página. +4. Selecione a moeda que deseja comprar — no caso, ETH. +5. Selecione o seu método de pagamento preferido. +6. Insira a quantia de ETH que deseja comprar. +7. Reveja a sua compra e clique em "Comprar ETH". +8. Confirme a sua compra, e o ETH será comprado com sucesso. +9. Pode transferir o ETH da sua conta à sua carteira de cripto, como o [MetaMask](https://metamask.io/). + - Para transferir o ETH à sua carteira de cripto, clique no botão "Contas" no canto superior direito da página. + - Clique em "Enviar", próximo à conta de ETH. + - Insira a quantia de ETH que deseja enviar, e o endereço da carteira que a receberá. + - Clique em "Continuar" e confirme a sua transação. + +Saiba mais sobre como adquirir ETH na Coinbase [aqui](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +Este é um guia passo a passo sobre como comprar ETH na Binance. + +1. Crie uma conta na [Binance](https://www.binance.com/en). +2. Quando tiver criado uma conta, verifique a sua identidade através do processo KYC (sigla em inglês para "Conheça o Seu Cliente"). Este processo é comum em todas as corretoras de cripto, centralizadas ou custodiais. +3. Após verificar a sua identidade, poderá comprar ETH no botão "Comprar/Vender", no banner da página principal. +4. Selecione a moeda que deseja comprar — no caso, ETH. +5. Selecione o seu método de pagamento preferido. +6. Insira a quantia de ETH que deseja comprar. +7. Reveja a sua compra e clique em "Comprar ETH". +8. Confirme a sua compra, e o seu ETH aparecerá na sua Carteira Spot da Binance. +9. Pode transferir o ETH da sua conta à sua carteira de cripto, como o [MetaMask](https://metamask.io/). + - Para transferir o ETH à sua carteira de cripto, adicione o endereço da sua carteira à whitelist de saques. + - Clique no botão "wallet", clique em "sacar" (withdraw), e selecione ETH. + - Insira a quantia de ETH que deseja enviar, e o endereço da carteira na whitelist à qual quer enviar. + - Clique em "Continuar" e confirme a sua transação. + +Saiba mais sobre como adquirir ETH na Binance [aqui](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge O contrato inteligente só é projetado para bridgear GRT da mainnet Ethereum até a rede Arbitrum. Se quiser transferir o seu GRT do Arbitrum de volta à mainnet Ethereum, precisará usar a [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/pt/chain-integration-overview.mdx b/website/pages/pt/chain-integration-overview.mdx new file mode 100644 index 000000000000..f33e0f7d74c8 --- /dev/null +++ b/website/pages/pt/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Visão Geral do Processo de Integração de Chains +--- + +Um processo de integração transparente e baseado em governança foi desenhado por equipas de blockchain que procuram [a integração com o protocolo do Graph](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). É um processo de três fases, como resumido abaixo. + +## Fase 1. Integração Técnica + +- Equipas constroem uma integração com o Graph Node e com o Firehose para chains sem base em EVM. [Aqui está](/new-chain-integration/). +- Equipas iniciam o processo de integração de protocolo com a criação de um tópico de Fórum [aqui](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (Nova subcategoria de Fontes de Dados sob Governança e GIPs). O uso do modelo padrão do Fórum é obrigatório. + +## Fase 2. Validação de Integração + +- Equipas colaboram com o núcleo de programadores, com a Graph Foundation, e com operadores de interfaces gráficas e gateways de redes, como o [Subgraph Studio](https://thegraph.com/studio/), para garantir um processo de integração suave. Isto envolve a providência da infraestrutura de backend necessária, como o JSON RPC da chain a ser integrada ou os endpoints do Firehose. Equipas que querem evitar a autohospedagem de tal infraestrutura podem usar a comunidade de operadores de nodes do The Graph (Indexadores) para fazê-lo, com qual a Foundation pode oferecer ajuda. +- Indexadores do Graph testam a integração na testnet do The Graph. +- O núcleo de programadores e os Indexadores monitoram a estabilidade, a performance e o determinismo dos dados. + +## Fase 3. Integração da Mainnet + +- As equipas propõem integração à mainnet com o envio de uma Proposta de Melhoria do Graph (GIP) e o início de um pull request (PR) no [matrix de apoio de funções](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (mais detalhes no link). +- O Conselho do Graph revisa o pedido e aprova o apoio à mainnet, providenciando uma Fase 2 com êxito e aprovação da comunidade. + +--- + +Se o processo parece difícil, não se preocupe! A Graph Foundation está comprometida com o apoio de integradores com o incentivo à colaboração, ofertas de informações essenciais, e guiá-los através de várias fases, incluindo a navegação de processos de governança, como Propostas de Melhoria do Graph (GIPs) e pull requests. Caso tenha alguma pergunta, entre em contacto no [info@thegraph.foundation](mailto:info@thegraph.foundation) ou através do Discord (por ex., Pedro, membro da Graph Foundation, IndexerDAO, ou outros programadores do núcleo). + +Tens o que é necessário para moldar o futuro da Graph Network? [Comece a sua proposta](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) agora e faça parte da revolução web3! + +--- + +## Perguntas frequentes + +### 1. Que relação isto tem com a [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +Este processo é relacionado ao Serviço de Dados de Subgraph, no momento aplicável apenas para novas `Fontes de Dados` de Subgraph. + +### 2. O que acontece se o apoio ao Firehose e Substreams chegar após a rede ser apoiada na mainnet? + +Isto só impactaria o apoio do protocolo a recompensas de indexação em subgraphs movidos a Substreams. A nova implementação do Firehose precisaria de testes na testnet, seguindo a metodologia sublinhada na Fase 2 deste GIP. De maneira parecida, ao assumir que a implementação seja confiável e de bom desempenho, um PR no [Matrix de Apoio de Funções](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) seria requerido (A função de Subgraph `Substreams data sources`), assim como um novo GIP para apoio do protocolo a recompensas de indexação. Qualquer pessoa pode criar o PR e a GIP; a Foundation ajudaria com o apoio do Conselho. + +### 3. Quanto tempo este processo levará? + +Espera-se que leve várias semanas, com variação a depender do tempo da programação da integração, da necessidade de pesquisas adicionais, testes e bugfixes, e como sempre, o timing do processo de governança que exige deliberações da comunidade. + +O apoio do protocolo às recompensas de indexação depende da banda dos acionistas para seguir com os testes, recepção de feedback, e gerenciamento de contribuições ao código-base central, se aplicável. Isto é ligado directamente à maturidade da integração e o quão responsiva a equipa de integração é (que pode ou não ser a equipa por trás da implementação do RPC/Firehose). A Foundation está aqui para ajudar durante todo o processo. + +### 4. Como as prioridades serão administradas? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/pt/cookbook/arweave.mdx b/website/pages/pt/cookbook/arweave.mdx index 6f537eaae21e..9e412c51fcb0 100644 --- a/website/pages/pt/cookbook/arweave.mdx +++ b/website/pages/pt/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Construindo Subgraphs no Arweave --- -> O apoio ao Arweave no Graph Node e no Serviço Hospedado está em beta: por favor nos contacte no [Discord](https://discord.gg/graphprotocol) se tiver alguma pergunta sobre a construção de subgraphs no Arweave! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! Neste guia, você aprenderá como construir e lançar Subgraphs para indexar a blockchain Arweave. @@ -83,7 +83,7 @@ dataSources: ``` - Subgraphs no Arweave introduzem uma nova categoria de fonte de dados (`arweave`) -- A rede deve corresponder a uma rede no Graph Node que a hospeda. No Serviço Hospedado, a _mainnet_ do Arweave é `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Fontes de dados no Arweave introduzem um campo `source.owner` opcional, a chave pública de uma carteira no Arweave Fontes de dados no Arweave apoiam duas categorias de _handlers_: @@ -150,9 +150,9 @@ _Handlers_ de bloco recebem um `Block`, enquanto transações recebem uma `Trans Escrever os mapeamentos de um Subgraph no Arweave é muito similar à escrita dos mapeamentos de um Subgraph no Ethereum. Para mais informações, clique [aqui](/developing/creating-a-subgraph/#writing-mappings). -## Lançando um Subgraph Arweave no Serviço Hospedado +## Deploying an Arweave Subgraph on the hosted service -Após o seu subgraph ter sido criado no painel de controlo do Serviço Hospedado, é possível lançá-lo usando o código de linha de comando `graph deploy`. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --token-de-acesso diff --git a/website/pages/pt/cookbook/base-testnet.mdx b/website/pages/pt/cookbook/base-testnet.mdx index 4bb884673189..71853220ac24 100644 --- a/website/pages/pt/cookbook/base-testnet.mdx +++ b/website/pages/pt/cookbook/base-testnet.mdx @@ -27,7 +27,7 @@ yarn global add @graphprotocol/graph-cli Entre no [Subgraph Studio](https://thegraph.com/studio/) e conecte a sua carteira de criptomoedas. -Once connected, click "Create a Subgraph" and enter a name for your subgraph. +Após conectada, clique em "Create a Subgraph" (Criar um Subgraph) e insira um nome para o seu subgraph. Selecione "Base (testnet)" como a blockchain indexada e clique em Create Subgraph (Criar Subgraph). diff --git a/website/pages/pt/cookbook/grafting.mdx b/website/pages/pt/cookbook/grafting.mdx index d80ca5bdca73..bcc5a99e21f3 100644 --- a/website/pages/pt/cookbook/grafting.mdx +++ b/website/pages/pt/cookbook/grafting.mdx @@ -24,6 +24,22 @@ Para mais informações, confira: Neste tutorial, cobriremos um caso de uso básico. Substituiremos um contrato existente com um contrato idêntico (com um novo endereço, mas o mesmo código). Depois, enxertaremos o subgraph existente ao subgraph "base" que rastreará o novo contrato. +## Notas Importantes sobre Enxertos ao Migrar Para a Graph Network + +> **Aviso**: Se atualizar o seu subgraph do Subgraph Studio, ou do serviço hospedado, à rede descentralizada, evite ao máximo usar enxertos durante o processo de migração. + +### Qual a Importância Disto? + +O enxerto é uma ferramenta poderosa que lhe permite "enxertar" um subgraph em outro — a fim de, efetivamente, transferir dados históricos do subgraph existente a uma nova versão. Enquanto isto é uma forma eficaz de preservar dados e poupar tempo de indexação, enxertos podem causar complexidades e possíveis problemas ao migrar de um ambiente hospedado até a rede descentralizada. Não é possível enxertar um subgraph da Graph Network de volta ao serviço hospedado ou ao Subgraph Studio. + +### Boas práticas + +**Migração Inicial**: na primeira publicação do seu subgraph à rede descentralizada, faça-o sem enxertos. Garanta que o subgraph está estável e que ele funciona como esperado. + +**Atualizações Subsequentes**: quando o seu subgraph estiver ao vivo e estável na rede descentralizada, use o enxerto em versões futuras para suavizar a transição e preservar dados históricos. + +Ao aderir a estas diretrizes, dá para minimizar riscos e garantir um processo de migração mais suave. + ## Como Construir um Subgraph Existente Construir subgraphs é uma parte essencial do Graph; o processo é descrito em mais detalhes [aqui](http://localhost:3000/en/cookbook/quick-start/). Para poder lançar o subgraph existente usado neste tutorial, há o seguinte repo: diff --git a/website/pages/pt/cookbook/near.mdx b/website/pages/pt/cookbook/near.mdx index 2142b2c2c246..80327a26b57c 100644 --- a/website/pages/pt/cookbook/near.mdx +++ b/website/pages/pt/cookbook/near.mdx @@ -277,7 +277,7 @@ No momento, não há apoio à funcionalidade de pendências para subgraphs na NE ### A minha pergunta não foi respondida. Onde posso conseguir mais ajuda sobre construir subgraphs na NEAR? -Se esta for uma pergunta geral sobre desenvolvimento de subgraphs, há muito mais informações no resto da [documentação para programadores](/cookbook/quick-start). Caso contrário, entre no [Discord do Graph Protocol](https://discord.gg/graphprotocol) e pergunte no canal #near, ou mande sua pergunta para near@thegraph.com. +Se esta for uma pergunta geral sobre desenvolvimento de subgraphs, há mais informações no resto da [documentação para programadores](/quick-start). Caso contrário, entre no [Discord do Graph Protocol](https://discord.gg/graphprotocol) e pergunte no canal #near, ou mande a sua pergunta para near@thegraph.com. ## Referências diff --git a/website/pages/pt/cookbook/upgrading-a-subgraph.mdx b/website/pages/pt/cookbook/upgrading-a-subgraph.mdx index 70ea939e6f43..ecaf3ea9a005 100644 --- a/website/pages/pt/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/pt/cookbook/upgrading-a-subgraph.mdx @@ -1,23 +1,23 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: Como Atualizar um Subgraph Existente à Graph Network --- ## Introdução -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +Este guia ensina como atualizar o seu subgraph do serviço hospedado à rede descentralizada do The Graph. Mais de mil subgraphs foram atualizados à Graph Network com êxito, incluindo projetos como Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido e muito mais! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +O processo é rápido, e os seus subgraphs só tem a ganhar com a confiabilidade e o desempenho da rede da The Graph Network. ### Pré-requisitos - Já lançaste um subgraph no serviço hospedado. -- O subgraph está a indexar uma chain disponível (ou disponível em beta) na The Graph Network. -- You have a wallet with ETH to publish your subgraph on-chain. -- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. +- The subgraph is indexing a chain available on The Graph Network. +- Tens uma carteira com ETH para publicar o seu subgraph on-chain. +- Tens cerca de 10 mil GRT para curar o seu subgraph para que Indexadores possam começar a indexá-lo. -## Upgrading an Existing Subgraph to The Graph Network +## Como Atualizar um Subgraph Existente à Graph Network -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> Encontre comandos específicos para o seu subgraph no [Subgraph Studio](https://thegraph.com/studio/). 1. Instale a versão mais recente do graph-cli: @@ -29,7 +29,7 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +Verifique se o seu `apiVersion` no subgraph.yaml está na versão 0.0.5 ou maior. 2. No repositório principal do subgraph, autentique o subgraph para lançar e construir no Studio: @@ -43,15 +43,15 @@ graph auth --studio graph codegen && graph build ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +Se o seu subgraph tiver erros de build, refira ao [Guia de Migração em AssemblyScript](/release-notes/assemblyscript-migration-guide/). -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. Entre no [Subgraph Studio](https://thegraph.com/studio/) com a sua carteira e lance o subgraph. O seu `` está no UI do Studio, baseado no nome do seu subgraph. ```sh graph deploy --studio ``` -5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): +5. Teste queries no playground do Studio. Aqui estão alguns exemplos para o [Sushi - Subraph de Trocas na Mainnet](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): ```sh { @@ -70,23 +70,23 @@ graph deploy --studio 6. Agora, seu subgraph já está lançado no Subgraph Studio, mas ainda não foi publicado na rede descentralizada. Agora é possível testar o subgraph para garantir que ele funciona como deve, com o uso do URL temporário de consulta como visto no topo da coluna acima. Como este nome já sugere, este é um URL temporário e não deve ser usado na produção. -- Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- Atualizar é apenas editar outra versão on-chain do seu subgraph existente. +- Como isto incorre um custo, recomendamos muito enviar e testar o seu subgraph no Subgraph Studio, com o uso da "URL de Query de Desenvolvimento" antes da edição. Veja um exemplo de transação [aqui](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Os preços giram em torno de 0.0425 ETH em 100 gwei. +- Sempre que precisar atualizar o seu subgraph, será cobrada uma taxa de atualização. Como isto incorre em um custo, é altamente recomendado lançar e testar seu subgraph no Goerli antes de lançar na mainnet. Isto pode, em alguns casos, exigir um pouco de GRT caso não haja sinal naquele subgraph. Se houver um sinal/curadoria naquela versão do subgraph (usando a automigração), as taxas serão divididas. 7. Publique o subgraph na rede descentralizada do The Graph com um clique no botão "Publish" (Publicar). -You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. +O seu subgraph deve ser curado com GRT para garantir que ele seja indexado por Indexadores. Para poupar custos em gas, dá para curar o seu subgraph na mesma transação em que o editou à rede. É recomendado curar o seu subgraph com pelo menos 10.000 GRT para mais qualidade de serviço. -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +Pronto! Após terminar de editar, seus subgraphs poderão ser visualizados ao vivo na rede descentralizada, através do [The Graph Explorer](https://thegraph.com/explorer). -Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. +Use o [canal #Curators](https://discord.gg/s5HfGMXmbW) no Discord à vontade para avisar aos Curadores que o seu subgraph está pronto para ser sinalizado. Também sugerimos compartilhar o seu volume esperado de consulta com eles. Assim, eles podem estimar quanto GRT devem sinalizar no seu subgraph. ### Como criar uma chave API -You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). +É possível gerar uma chave API no Subgraph Studio [aqui](https://thegraph.com/studio/apikeys/). -![API key creation page](/img/api-image.png) +![Página de criação de chave API](/img/api-image.png) Ao fim de toda semana, um recibo será gerado baseado nas taxas de query acumuladas durante este período. Este recibo será pago automaticamente com o GRT disponível em seu saldo. Seu saldo será atualizado após o saque do custo das suas taxas de query. As taxas de query são pagas em GRT através da rede Arbitrum. É necessário adicionar GRT ao contrato de cobrança do Arbitrum para ativar sua chave API através dos seguintes passos: @@ -94,14 +94,14 @@ Ao fim de toda semana, um recibo será gerado baseado nas taxas de query acumula - Envie o GRT à sua carteira. - Na página Billing (Cobrança) no Studio, clique em Add GRT (Adicionar GRT). -![Add GRT in billing](/img/Add-GRT-New-Page.png) +![Adição de GRT na cobrança](/img/Add-GRT-New-Page.png) - Siga os passos para adicionar o seu GRT ao saldo de cobrança. - Seu GRT será automaticamente ligado à rede Arbitrum e adicionado ao seu saldo de cobrança. -![Billing pane](/img/New-Billing-Pane.png) +![Painel de cobrança](/img/New-Billing-Pane.png) -> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. +> Nota: confira a [página oficial de cobrança](../billing.mdx) para instruções completas sobre como adicionar GRT ao seu saldo de cobrança. ### Como proteger a sua chave API @@ -110,27 +110,27 @@ Ao fim de toda semana, um recibo será gerado baseado nas taxas de query acumula 1. Subgraphs Autorizados 2. Domínio Autorizado -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +A sua chave API pode ser assegurada aqui: [here](https://thegraph.com/studio/apikeys/test/). -![Subgraph lockdown page](/img/subgraph-lockdown.png) +![Página de trancamento de subgraphs](/img/subgraph-lockdown.png) ### Como consultar o seu subgraph na rede descentralizada -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +Agora é possível verificar o estado dos Indexers da rede no Graph Explorer (exemplo [aqui](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)).. A linha verde no topo indica que na hora da postagem, 8 Indexadores indexaram aquele subgraph com sucesso. Na aba Indexer, dá para ver quais Indexadores captaram seu subgraph. -![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) +![Subgraph do Rocket Pool](/img/rocket-pool-subgraph.png) Assim que o primeiro Indexer tiver indexado o seu subgraph por completo, pode começar a consultar o subgraph na rede descentralizada. O URL de consulta para o seu subgraph pode ser copiado e colado com um clique no símbolo próximo ao URL de consulta. Aparecerá algo assim: `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. +Importante: Substitua o `[api-key]` com uma chave API verídica, gerada na seção acima. Agora, pode usar aquele URL de Consulta no seu dapp para enviar os seus pedidos no GraphQL. Parabéns! Viraste um pioneiro da descentralização! -> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: +> Nota: Devido à natureza distribuída da rede, pode ser que Indexadores diferentes tenham indexado até blocos diferentes. Para receber apenas dados recentes, especifique o bloco mínimo que um indexador deve indexar para servir seu query com o argumento block: `{ number_gte: $minBlock }` como no exemplo abaixo: ```graphql { @@ -140,11 +140,11 @@ Parabéns! Viraste um pioneiro da descentralização! } ``` -More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). +Veja mais informações sobre a rede, e como lidar com reorganizações, no artigo da documentação [Sistemas Distribuídos](/querying/distributed-systems/). -## Updating a Subgraph on the Network +## Como Atualizar um Subgraph na Rede -If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. +Caso queira atualizar um subgraph já existente na rede, isto é possível ao lançar uma nova versão do seu subgraph ao Subgraph Studio, através do Graph CLI. 1. Faça alterações no seu subgraph atual. É bom testar pequenos consertos no Subgraph Studio com publicações no Goerli. 2. Lance o seguinte e especifique a nova versão no comando (por ex. v0.0.1, v0.0.2, etc.): @@ -156,58 +156,58 @@ graph deploy --studio 3. Teste a nova versão no Subgraph Studio com queries no playground 4. Publique a nova versão na rede do The Graph. Não esqueça que isto exige gas (como descrito acima). -### Owner Update Fee: Deep Dive +### Sobre as Taxas de Upgrade para o Dono -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> Nota: A curadoria no Arbitrum não usa bonding curves. Aprenda mais sobre o Arbitrum [aqui](/arbitrum/arbitrum-faq/). -An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). +Upgrades exigem GRT para migrar da versão antiga do subgraph à versão nova. Portanto, a cada atualização, será criada uma bonding curve (curva de união; mais sobre bonding curves aqui: [here](/network/curating#bonding-curve-101)). -The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. +A nova bonding curve cobra a taxa de curação de 1% sobre todo GRT a ser migrado à nova versão. O titular deve pagar 50% disto, ou 1,25%. Os outros 1,25% são absorvidos por todos os curadores como um tributo. Este incentivo existe para que o dono de um subgraph não possa esvaziar os fundos dos curadores com chamadas recursivas de atualização. Se não houver atividade de curação, é necessário pagar no mínimo 100 GRT para sinalizar seu próprio subgraph. Vamos fazer um exemplo. Isto só acontece se o seu subgraph for curado ativamente: - São sinalizados 100.000 GRT com a função de migração automática na v1 de um subgraph -- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned -- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. +- O dono atualiza à v2. São migrados 100.000 GRT a uma nova bonding curve, sendo que 97,500 GRT entram na curva nova e 2.500 são queimados +- O dono então queima 1.250 GRT para pagar por metade da taxa. O dono deve ter isto na sua carteira antes da atualização; caso contrário, o upgrade falhará. Isto acontece na mesma transação do upgrade. -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_Enquanto este mecanismo permanece ao vivo na rede, a comunidade atualmente discute maneiras de reduzir o custo de atualizações para programadores de subgraphs._ ### Como Conservar uma Versão Estável de Subgraph -If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. +Se for fazer muitas mudanças ao seu subgraph, não é bom atualizá-lo constantemente e afrontar os custos da atualização. É importante conservar uma versão estável e consistente do seu subgraph; não só pelo custo, mas também para que os Indexadores tenham confiança em seus tempos de sincronização. Os Indexadores devem ser avisados dos seus planos de atualização, para que os tempos de sincronização dos Indexadores não sejam afetados. Use à vontade o [canal dos #Indexers](https://discord.gg/JexvtHa7dq) no Discord para avisar aos Indexadores quando for mudar a versão dos seus subgraphs. -Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. +Subgraphs são APIs abertas usadas por programadores externos. As APIs abertas devem seguir padrões estritos para não quebrarem os aplicativos de programadores externos. Na The Graph Network (rede do The Graph), um programador de Subgraph deve considerar os Indexadores e o tempo que levam para sincronizar um novo subgraph, **assim como** outros desenvolvedores a usarem seus subgraphs. ### Como Atualizar os Metadados de um Subgraph Os metadados dos seus subgraphs podem ser atualizados sem precisar publicar uma versão nova. Os metadados incluem o nome do subgraph, a imagem, a descrição, o URL do site, o URL do código fonte, e categorias. Os programadores podem fazê-lo a atualizar os detalhes dos seus subgraphs no Subgraph Studio, onde todos os campos aplicáveis podem ser editados. -Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +Marque a opção **Update Subgraph Details in Explorer\* (Atualizar Detalhes do Subgraph no Explorador) e clique em **Save\*\* (Salvar). Se marcada, será gerada uma transação on-chain que atualiza detalhes do subgraph no Explorer, sem precisar publicar uma nova versão com um novo lançamento. ## As Melhores Práticas para Lançar um Subgraph à Graph Network 1. Use um nome ENS para Desenvolvimento de Subgraph: -- Set up your ENS [here](https://app.ens.domains/) -- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). +- Prepare o seu ENS [aqui](https://app.ens.domains/) +- Adicione o seu nome ENS às suas configurações [aqui](https://thegraph.com/explorer/settings?view=display-name). 2. Quanto mais preenchidos os seus perfis, maiores serão as oportunidades de indexar e curar os seus subgraphs. ## Como Depreciar um Subgraph na The Graph Network -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +Siga os passos [aqui](/managing/deprecating-a-subgraph) para depreciar o seu subgraph e retirá-lo da The Graph Network. ## Queries em um Subgraph + Cobrança na The Graph Network -The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. +O Serviço Hospedado foi preparado para que os programadores lancem os seus subgraphs sem qualquer restrição. -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +Para que a The Graph Network seja descentralizada, é necessário pagar taxas de query como uma parte essencial dos incentivos do protocolo. Para saber mais sobre subscrições em APIs e pagamentos de taxas de query, confira a documentação das cobranças: [here](/billing/). ### Estimação de Taxas de Query na Rede Atualmente isto não está disponível na interface do produto, mas seu orçamento máximo por query pode ser determinado ao dividir a quantidade que deseja pagar por mês pelo volume esperado de consultas. -While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. +Enquanto podes decidir seu orçamento de consultas, não há garantia que um Indexador poderá servir queries naquele preço. Se um Gateway puder conectar-te com um Indexador disposto a servir uma query ao preço em que deseja pagar ou menos, será necessário pagar a delta/diferença do seu orçamento **e** o preço dele. Portanto, um preço menor de query reduz o pool de Indexadores disponíveis para ti, que pode afetar a qualidade do serviço que recebes. É bom ter taxas altas de query, pois isto pode atrair curação e Indexadores de renome ao seu subgraph. Lembre-se que este é um mercado dinâmico e emergente, mas como interages com ele só depende de ti. Não há especificações de preço máximo ou mínimo, no protocolo ou nos Gateways. Por exemplo, podes olhar o preço pago por alguns dos dapps na rede (numa base semanal) abaixo. Veja a última coluna, que mostra taxas de query em GRT. @@ -215,11 +215,11 @@ Lembre-se que este é um mercado dinâmico e emergente, mas como interages com e ## Outros Recursos -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +Se ainda tem dúvidas, não tem problema! Confira os seguintes recursos ou assista o nosso guia em vídeo sobre atualizar e migrar subgraphs à rede descentralizada abaixo: -- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [Subgraph Studio documentation](/deploying/subgraph-studio) +- [Contratos da Graph Network](https://github.com/graphprotocol/contracts) +- [Contrato de Curadoria](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - o contrato subjacente em qual o GNS se revolve + - Endereço - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Documentação do Subgraph Studio](/deploying/subgraph-studio) diff --git a/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx index e2b658217785..531ab452adeb 100644 --- a/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/pt/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Como lançar um Subgraph no Subgraph Studio --- -> Garanta que a rede de onde subgraph está a indexar dados é [apoiada](/developing/supported-chains) na rede descentralizada. +> Aprenda como editar subgraphs sem rate limit ao Subgraph Studio [aqui](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). Estes são os passos para lançar o seu subgraph no Subgraph Studio: diff --git a/website/pages/pt/deploying/hosted-service.mdx b/website/pages/pt/deploying/hosted-service.mdx index 2f23c050fb20..5d60dfbddf26 100644 --- a/website/pages/pt/deploying/hosted-service.mdx +++ b/website/pages/pt/deploying/hosted-service.mdx @@ -2,11 +2,11 @@ title: O que é o Serviço Hospedado? --- -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> Favor lembrar que o Serviço Hospedado começou a ser desativado no primeiro trimestre de 2023, mas permanecerá disponível para redes não apoiadas na rede descentralizada. Recomendamos que os programadores [migrem seus subgraphs](/cookbook/upgrading-a-subgraph) à medida que mais redes recebem apoio. Cada rede gradualmente terá seus equivalentes ao serviço hospedado desativados, para garantir que os programadores tenham tempo para migrar subgraphs à rede descentralizada. Leia mais sobre a desativação do Serviço Hospedado [aqui](https://thegraph.com/blog/sunsetting-hosted-service). -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). +Esta seção guiará-lhe pelo processo de lançar um subgraph ao [Serviço Hospedado](https://thegraph.com/hosted-service/). -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +Se não tem uma conta no Serviço Hospedado, faça um cadastro com a sua conta do GitHub. Após se autenticar, pode começar a criar subgraphs através da UI e lançá-los do seu terminal. O Serviço Hospedado apoia uma boa quantidade de redes, como Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum e mais. Para uma lista compreensiva, veja [Redes Apoiadas](/developing/supported-networks/#hosted-service). @@ -16,7 +16,7 @@ Primeiro, siga as instruções [aqui](/developing/defining-a-subgraph) para inst ### De um Contrato Existente -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. +Se já tem um contrato inteligente lançado na sua rede de escolha, iniciar um novo subgraph a partir deste contrato já é um bom começo para usar o serviço hospedado. Podes usar este comando para criar um subgraph que indexa todos os eventos de um contrato existente. Isto tentará retirar o ABI do contrato do [Etherscan](https://etherscan.io/). @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / O subgraph de exemplo é baseado no contrato Gravity por Dani Grant, que gesta avatares de usuários e emite eventos `NewGravatar` ou `UpdateGravatar` sempre que são criados ou atualizados avatares. O subgraph lida com estes eventos ao escrever entidades `Gravatar` ao armazenamento do Graph Node e garantir que estes são atualizados de acordo com os eventos. Continue até o [manifest do subgraph](/developing/creating-a-subgraph#the-subgraph-manifest) para entender melhor ao que você deve prestar atenção, como eventos dos seus contratos inteligentes, mapeamentos, e mais. -## Supported Networks on the hosted service +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## Redes apoiadas no Serviço Hospedado Confira a lista de redes apoiadas [aqui](/developing/supported-networks). diff --git a/website/pages/pt/deploying/subgraph-studio.mdx b/website/pages/pt/deploying/subgraph-studio.mdx index 496d7bf979be..f4c4d1c699d4 100644 --- a/website/pages/pt/deploying/subgraph-studio.mdx +++ b/website/pages/pt/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Consultar subgraphs gera taxas de consulta, usadas para recompensar [Indexers](/ 1. Cadastre-se com a sua carteira do MetaMask ou WalletConnect 1. Quando entrar, verás a sua chave de lançamento (deploy key) única na página principal da sua conta. Isto permitirá-lhe editar os seus subgraphs ou gerir as suas chaves de API e cobranças. Terá uma chave de lançamento única que pode ser gerada novamente, caso suspeite que ela foi comprometida. -## Como Criar o Seu Subgraph no Subgraph Studio +## Como Criar um Subgraph no Subgraph Studio -A melhor parte! Ao começar a criar um subgraph, preencha: - -- O nome do seu subgraph -- Imagem -- Descrição -- Categorias (por ex., `DeFi`, `NFTs`, `Governança`) -- Website + ## Compatibilidade de Subgraph com a Graph Network diff --git a/website/pages/pt/developing/creating-a-subgraph.mdx b/website/pages/pt/developing/creating-a-subgraph.mdx index fca569cb29b7..87cac406608b 100644 --- a/website/pages/pt/developing/creating-a-subgraph.mdx +++ b/website/pages/pt/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: As entradas importantes para atualizar para o manifest são: -- `description`: uma descrição legível a humanos do que é o subgraph. Esta descrição é exibida pelo Graph Explorer quando o subgraph é lançado ao Serviço Hospedado. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: a URL do repositório onde está o manifest do subgraph. Isto também é exibido pelo Graph Explorer. @@ -146,6 +154,10 @@ As entradas importantes para atualizar para o manifest são: - `dataSources.source.startBlock`: o número opcional do bloco de onde a fonte de dados começa a indexar. Em muitos casos, sugerimos usar o bloco em que o contrato foi criado. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: pares de key-value que podem ser usados dentro de mapeamentos de subgraph. Apoia vários tipos de dados como `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, e `BigInt`. Cada variável deve especificar o seu `type` e `data`. Estas variáveis de contexto são então acessíveis nos arquivos de mapeamento, a fim de oferecer opções mais configuráveis para o desenvolvimento de subgraphs. + - `dataSources.mapping.entities`: as entidades que a fonte de dados escreve ao armazenamento. O schema para cada entidade é definido no arquivo schema.graphql. - `dataSources.mapping.abis`: um ou mais arquivos ABI nomeados para o contrato-fonte, além de quaisquer outros contratos inteligentes com os quais interage de dentro dos mapeamentos. @@ -242,6 +254,7 @@ Nós apoiamos os seguintes escalares na nossa API do GraphQL: | `String` | Escalar para valores `string`. Caracteres nulos são removidos automaticamente. | | `Boolean` | Escalar para valores `boolean`. | | `Int` | A especificação do GraphQL define que o `Int` tem um tamanho de 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Números inteiros grandes. Usados para os tipos `uint32`, `int64`, `uint64`, ..., `uint256` do Ethereum. Nota: Tudo abaixo de `uint32`, como `int32`, `uint24` ou `int8` é representado como `i32`. | | `BigDecimal` | `BigDecimal` Decimais de alta precisão representados como um significando e um exponente. O alcance de exponentes é de -6143 até +6144. Arredondado para 34 dígitos significantes. | @@ -770,6 +783,8 @@ Além de se inscrever a eventos de contratos ou chamadas para funções, um subg ### Filtros Apoiados +#### Filtro Call + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Filtro Polling + +> **Requer `specVersion` >= 0.0.8** + +> **Nota:** Filtros de polling só estão disponíveis nas dataSources `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +O handler definido será chamado uma vez a cada `n` blocos, onde `n` é o valor providenciado no campo `every`. Esta configuração permite que o subgraph faça operações específicas em intervalos de blocos regulares. + +#### Filtro Once + +> **Requer `specVersion` >= 0.0.8** + +> **Nota:** Filtros de once só estão disponíveis nas dataSources `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +O handler definido com o filtro once só será chamado uma única vez antes da execução de todos os outros handlers (por isto, o nome "once" / "uma vez"). Esta configuração permite que o subgraph use o handler como um handler de inicialização, para realizar tarefas específicas no começo da indexação. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Função de Mapeamento A função de mapeamento receberá um `ethereum.block` como o seu único argumento. Assim como funções de mapeamento para eventos, esta função pode acessar entidades existentes no armazenamento do subgraph, chamar contratos inteligentes e criar ou atualizar entidades. @@ -934,6 +988,8 @@ Caso o subgraph encontre um erro, esse query retornará tanto os dados quanto o ### Como Enxertar em Subgraphs Existentes +> **Nota:** não é recomendado usar enxertos na primeira atualização para a Graph Network. Saiba mais [aqui](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + Quando um subgraph é lançado pela primeira vez, ele começa a indexar eventos no bloco gênese da chain correspondente (ou no `startBlock` definido com cada fonte de dados). Às vezes, há vantagem em reutilizar os dados de um subgraph existente e começar a indexar em um bloco muito mais distante. Este modo de indexar é chamado de _Enxerto_. O enxerto, por exemplo, serve para passar rapidamente por erros simples nos mapeamentos durante a programação, ou consertar temporariamente um subgraph existente após ele ter falhado. Um subgraph é enxertado em um subgraph base quando um manifest de subgraph no `subgraph.yaml` contém um bloco `graft` no maior nível: @@ -963,7 +1019,7 @@ O subgraph enxertado pode usar um schema GraphQL que não é idêntico ao schema ## Fontes de Dados de Arquivos -Fontes de dados de arquivos são uma nova funcionalidade para acessar dados off-chain de uma maneira robusta e extensível durante a indexação, começando pelo IPFS. +Fontes de dados de arquivos são uma nova funcionalidade de subgraph para acessar dados off-chain de forma robusta e extensível. As fontes de dados de arquivos apoiam o retiro de arquivos do IPFS e do Arweave. > Isto também abre as portas para indexar dados off-chain de forma determinística, além de potencialmente introduzir dados arbitrários com fonte em HTTP. @@ -975,7 +1031,7 @@ Isto é parecido com os [modelos de fontes de dados existentes](https://thegraph > Isto substitui a API `ipfs.cat` existente -### Upgrade guide +### Guia de atualização #### Atualizar `graph-ts` e `graph-cli` @@ -1030,7 +1086,7 @@ Se o relacionamento for perfeitamente proporcional entre a entidade parente e a > Podes usar [filtros aninhados](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) para filtrar entidades parentes na base destas entidades aninhadas. -#### Adicionar uma nova fonte de dado modelada com `kind: file/ipfs` +#### Adicione um novo modelo de fonte de dados com `kind: file/ipfs` ou `kind: file/arweave` Esta é a fonte de dados que será gerada quando um arquivo de interesse for identificado. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { Agora pode criar fontes de dados de arquivos durante a execução de handlers baseados em chain: - Importe o modelo do `templates` autogerado -- chame o `TemplateName.create(cid: string)` de dentro de um mapeamento, onde o cid é um identificador de conteúdo IPFS válido +- chame o `TemplateName.create(cid: string)` de dentro de um mapeamento, onde o cid é um identificador de conteúdo válido para IPFS ou Arweave + +Para o IPFS, o Graph Node apoia [identificadores de conteúdo v0 e v1](https://docs.ipfs.tech/concepts/content-addressing/) e identificadores com diretórios (por ex. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Atualmente, o Graph Node apoia [identificadores de conteúdo v0 e v1](https://docs.ipfs.tech/concepts/content-addressing/) e identificadores de conteúdo com diretórios (por ex. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +Para o Arweave, desde a versão 0.33.0, o Graph Node pode retirar arquivos baseado na sua [id de transação](https://docs.arweave.org/developers/server/http-api#transactions) de um gateway do Arweave ([arquivo de exemplo](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)) O Arweave apoia transações enviadas através do Bundlr, e o Graph Node também pode retirar arquivos baseados nos [manifests do Bundlr](https://docs.bundlr.network/learn/gateways#indexing). Exemplo: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -Isto criará uma fonte de dados de arquivos, que avaliará o ponto final IPFS configurado do Graph Node e tentará novamente caso não achá-lo. Quando o arquivo for localizado, o handler da fonte de dados de arquivos será executado. +Isto criará uma fonte de dados de arquivos, que avaliará o endpoint de IPFS ou Arweave configurado do Graph Node, e tentará novamente caso não achá-lo. Com o arquivo localizado, o handler da fonte de dados de arquivos será executado. Este exemplo usa a CID como a consulta entre a entidade parente `Token` e a entidade `TokenMetadata` resultante. diff --git a/website/pages/pt/developing/developer-faqs.mdx b/website/pages/pt/developing/developer-faqs.mdx index e0c3282000e3..4812d007f9ff 100644 --- a/website/pages/pt/developing/developer-faqs.mdx +++ b/website/pages/pt/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Atualmente, a abordagem recomendada para um dApp é adicionar a chave ao frontend e expô-la para utilizadores finais. Dito isto, pode limitar aquela chave a um hostname, como _seudapp.io_ e um subgraph. A gateway está atualmente a ser executada pelo Edge & Node. Parte da responsabilidade de uma gateway é monitorar comportamentos abusivos e bloquear tráfego de clientes maliciosos. -## 25. Onde posso achar meu subgraph atual no Serviço Hospedado? +## 25. Where do I go to find my current subgraph on the hosted service? -Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). +Vá para o Serviço Hospedado para achar subgraphs lançados por você ou outros ao Serviço Hospedado. Veja [aqui](https://thegraph.com/hosted-service). -## 26. O Serviço Hospedado começará a cobrar taxas de query? +## 26. Will the hosted service start charging query fees? -The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. +The Graph nunca cobrará pelo Serviço Hospedado. Este é um protocolo descentralizado, e cobrar por um serviço centralizado não condiz com os valores do Graph. O Serviço Hospedado sempre foi um degrau temporário para chegar à rede descentralizada; os programadores terão tempo suficiente para migrar à rede descentralizada quando estiverem preparados. -## 27. Quando o Serviço Hospedado será encerrado? +## 27. How do I update a subgraph on mainnet? -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? - -If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +Se for um programador de subgraph, podes atualizar para uma versão nova do seu subgraph no Studio com a CLI. Ela será privada por enquanto, mas se estiver contente, pode editá-la no Graph Explorer descentralizado. Isto criará uma nova versão do seu subgraph sobre a qual Curadores podem começar a sinalizar. diff --git a/website/pages/pt/developing/graph-ts/api.mdx b/website/pages/pt/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..afbbad0dc2cb --- /dev/null +++ b/website/pages/pt/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: API AssemblyScript +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +Esta página documenta quais APIs embutidas podem ser usadas ao escrever mapeamentos de subgraph. Há dois tipos de API disponíveis do início: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Referência da API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Primitivos de baixo nível para traduzir entre sistemas de tipos diferentes, como Ethereum, JSON, GraphQL e AssemblyScript. + +### Versões + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Versão | Notas de atualização | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Tipos Embutidos + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Armazenamento da API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Como criar entidades + +Este é um padrão comum para a criação de entidades de eventos do Ethereum. + +```typescript +// Importar a classe de evento de transferência gerada da ABI ERC20 +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Importar o tipo de entidade de transferência gerado do schema do GraphQL +import { Transfer } from '../generated/schema' + +// Handler de evento de transferência +export function handleTransfer(event: TransferEvent): void { + // Criar uma entidade de Transferência, usando o hash da transação como a ID da entidade + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Determinar propriedades na entidade, usando os parâmetros do evento + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Salvar a entidade no armazenamento + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Cada entidade deve ter um ID única para evitar colisões com outras entidades. É bem comum que parâmetros de eventos incluam um identificador único a ser usado. Nota: usar o mesmo hash de transação como ID presume que nenhum outro evento na mesma transação criará entidades a usar este hash como o ID. + +#### Como carregar entidades a partir do armazenamento + +Se uma entidade já existe, ela pode ser carregada do armazenamento com os seguintes comandos: + +```typescript +let id = event.transaction.hash // ou como a ID for construída +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use a entidade Transfer como antes +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Como consultar entidades criadas dentro de um bloco + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +A API do armazenamento facilita o resgate de entidades que foram criadas ou atualizadas no bloco atual. Um caso comum: um handler cria uma Transação de algum evento on-chain, e um handler seguinte quer acessar esta transação caso ela exista. Se a transação não existe, o subgraph deve acessar o banco de dados para descobrir que a entidade não existe; se o autor do subgraph já souber que a entidade deve ter sido criada no mesmo bloco, o uso do loadInBlock evita esta volta pelo banco de dados. Para alguns subgraphs, estas consultas perdidas podem contribuir muito para o tempo de indexação. + +```typescript +let id = event.transaction.hash // ou como a ID for construída +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use a entidade Transfer como antes +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Como buscar entidades derivadas + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +Isto permite o carregamento de campos de entidade derivada a partir de um event handler. Por exemplo, considerando o schema a seguir: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Carrega as entidades de Token associadas com um titular dado +let tokens = holder.tokens.load() +``` + +#### Como atualizar entidades existentes + +Há duas maneiras de atualizar uma entidade existente: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Geralmente é simples mudar propriedades, graças aos setters de propriedade gerados: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +Também é possível cancelar propriedades com uma das seguintes instruções: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Isto não funcionará +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// Isto funcionará +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Como remover entidades do armazenamento + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### API do Ethereum + +A API do Ethereum fornece acesso a contratos inteligentes, variáveis de estado público, funções de contrato, eventos, transações, blocos e a codificação/decodificação de dados no Ethereum. + +#### Apoio para Tipos no Ethereum + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +Veja um exemplo a seguir. Considerando um schema de subgraph como + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Eventos e Dados de Blocos/Transações + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Acesso ao Estado do Contrato Inteligente + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +É comum acessar o contrato de qual origina um evento. Isto é feito com o seguinte código: + +```typescript +// Importar a classe do contrato gerado e a classe do evento de transferência gerado +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Ligar o contrato ao endereço que emitiu o evento + let contract = ERC20Contract.bind(event.address) + + // Acessar variáveis e funções de estado fazendo chamadas + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Qualquer outro contrato que seja parte do subgraph pode ser importado do código gerado e ligado a um endereço válido. + +#### Como Lidar com Chamadas Revertidas + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note que um Graph Node conectado a um cliente Geth ou Infura pode não detectar todas as reversões; se depender disto, recomendamos usar um Graph Node conectado a um cliente Parity. + +#### ABI de Codificação/Decodificação + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +Para mais informações: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### API de Logging + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Como logar um ou mais valores + +##### Como logar um único valor + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Mostra : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Como logar uma única entrada de um arranjo existente + +No exemplo abaixo, só é logado o primeiro valor do arranjo do argumento, apesar de haver três valores no arranjo. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Apesar de três valores serem passados ao `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Como logar várias entradas de um arranjo existente + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Mostra : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Como logar uma entrada específica de um arranjo existente + +Para mostrar um valor específico no arranjo, forneça o valor indexado. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Mostra : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Como logar informações de eventos + +O exemplo abaixo loga o número do bloco, hash do bloco e o hash da transação de um evento: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### API do IPFS + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Considerando um hash ou local IPFS, um arquivo do IPFS é lido da seguinte maneira: + +```typescript +// Coloque isto dentro de um handler de evento no mapeamento +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Locais como `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// que incluem arquivos em diretorias também são apoiados +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // Ver a documentação do JSONValue para detalhes sobre + // como lidar com valores JSON + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks também podem criar entidades + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Coloque isto dentro de um handler de evento no mapeamento +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Como alternativa, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### API de Criptografia + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### API JSON + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Referência de Conversões de Tipos + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() ou s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() ou s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Metadados de Fontes de Dados + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entidade e DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext no Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +Este contexto, então, pode ser acessado nos seus arquivos de mapeamento de subgraph, o que resulta em subgraphs mais dinâmicos e configuráveis. diff --git a/website/pages/pt/developing/graph-ts/common-issues.mdx b/website/pages/pt/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..71b16d033656 --- /dev/null +++ b/website/pages/pt/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Problemas Comuns no AssemblyScript +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/pt/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/pt/developing/substreams-powered-subgraphs-faq.mdx index d812199defdf..c271abe4b6f8 100644 --- a/website/pages/pt/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/pt/developing/substreams-powered-subgraphs-faq.mdx @@ -4,9 +4,9 @@ title: Perguntas frequentes sobre subgraphs movidos por substreams ## O que são Substreams? -Desenvolvido pela [StreamingFast](https://www.streamingfast.io/), o Substreams é um motor de processamento de poder excepcional, capaz de consumir fluxos ricos de dados em blockchain. O Substreams lhe permite refinar e moldar dados em blockchain para serem digeridos rápida e suavemente por aplicativos de utilizador final. Mais especificamente, o Substreams é um motor paralelizado e agnóstico a blockchains, que põe transmissões em primeiro lugar e serve como uma camada de transformação de dados em blockchain. Movido pelo [Firehose](https://firehose.streamingfast.io/), ele permite que programadores escrevam módulos em Rust, construam sobre módulos da comunidade, provienciem indexação de altíssimo desempenho, e [mergulhem](https://substreams.streamingfast.io/developers-guide/sink-targets) os seus dados em qualquer lugar. +Desenvolvido pela [StreamingFast](https://www.streamingfast.io/), o Substreams é um motor de processamento de poder excepcional, capaz de consumir fluxos ricos de dados em blockchain. O Substreams lhe permite refinar e moldar dados em blockchain para serem digeridos rápida e suavemente por aplicativos de utilizador final. Mais especificamente, o Substreams é um motor paralelizado e agnóstico a blockchains, que põe transmissões em primeiro lugar e serve como uma camada de transformação de dados em blockchain. Movido pelo [Firehose](https://firehose.streamingfast.io/), ele permite que programadores escrevam módulos em Rust, construam sobre módulos da comunidade, providenciem indexações de altíssimo desempenho, e mandar seus dados para qualquer destino com [sink](https://substreams.streamingfast.io/developers-guide/sink-targets). -Vá à [Documentação de Substreams](/substreams) para aprender mais sobre Substreams. +Vá à [Documentação do Substreams](/substreams) para aprender mais sobre Substreams. ## O que são subgraphs movidos por substreams? @@ -22,7 +22,7 @@ Ao contrário, subgraphs movidos a substreams têm uma única fonte de dados que ## Quais os benefícios do uso de subgraphs movidos a Substreams? -Subgraphs movidos a Substreams combinam todos os benefícios do Substreams com o potencial de consulta de subgraphs. Eles também trazem mais composabilidade e indexação de alto desempenho ao The Graph. Eles também resultam em novos casos de uso de dados; por exemplo, após construir o seu Subgraph movido a Substreams, você pode reutilizar os seus [módulos de Substreams](https://substreams.streamingfast.io/developers-guide/modules) para usar [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) diferentes, como PostgreSQL, MongoDB e Kafka. +Subgraphs movidos a Substreams combinam todos os benefícios do Substreams com o potencial de query de subgraphs. Eles também trazem mais composabilidade e indexação de alto desempenho ao The Graph. Eles também resultam em novos casos de uso de dados; por exemplo, após construir o seu Subgraph movido a Substreams, você pode reutilizar os seus [módulos de Substreams](https://substreams.streamingfast.io/developers-guide/modules) para usar [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) diferentes, como PostgreSQL, MongoDB e Kafka. ## Quais os benefícios do Substreams? @@ -62,7 +62,7 @@ Há muitos benefícios do uso do Firehose, que incluem: ## Onde programadores podem acessar mais informações sobre Substreams e subgraphs movidos a Substreams? -A [documentação do Substreams](/substreams) te ensinará como construir módulos do Substreams. +A [documentação do Substreams](/substreams) lhe ensinará como construir módulos do Substreams. A [documentação de subgraphs movidos a Substreams](/cookbook/substreams-powered-subgraphs/) lhe ensinará como empacotá-los para a publicação no The Graph. diff --git a/website/pages/pt/developing/supported-networks.json b/website/pages/pt/developing/supported-networks.json index 5e12392b8c7d..cc6a172afb08 100644 --- a/website/pages/pt/developing/supported-networks.json +++ b/website/pages/pt/developing/supported-networks.json @@ -1,9 +1,9 @@ { - "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "network": "Rede", + "cliName": "Nome na CLI", + "chainId": "ID da Chain", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "Rede Descentralizada", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/pt/developing/supported-networks.mdx b/website/pages/pt/developing/supported-networks.mdx index 2121091ef401..d73a47daf437 100644 --- a/website/pages/pt/developing/supported-networks.mdx +++ b/website/pages/pt/developing/supported-networks.mdx @@ -7,11 +7,11 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' -O serviço hospedado depende da estabilidade e funcionamento das tecnologias subjacentes, principalmente os endpoints providenciados em JSON RPC. +The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan serão depreciados. Leia mais no [Blog da Ethereum Foundation](https://blog.ethereum.org/2022/06/21/testnet-deprecation). Desde 25 de fevereiro de 2023, Ropsten, Rinkeby e Kovan não são mais apoiados pelo Serviço Hospedado. O Goerli será mantido pelos programadores do cliente após o merge, e também é apoiado pelo Serviço Hospedado. Desenvolvedores que atualmente usam Ropsten, Rinkeby ou Kovan como o seu ambiente de encenação/testes são encorajados a migrarem ao Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. -Subgraphs que indexam a Gnosis Chain podem ser publicadas com a identidade de rede `gnosis`. O `xdai` ainda é apoiado para subgraphs existentes no serviço hospedado. +Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. Para uma lista completa de recursos apoiados na rede descentralizada, veja [esta página](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). @@ -19,6 +19,6 @@ Subgraphs movidas a Substreams que indexam a `mainnet` do Ethereum são apoiadas ## Graph Node -Se a sua rede preferida não for apoiada na rede descentralizada do The Graph, execute o seu próprio Graph Node para indexar qualquer rede compatível com EVM. Verifique se a [versão](https://github.com/graphprotocol/graph-node/releases) a ser usada apoia a rede, e que tens a configuração necessária. +Se a sua rede preferida não for apoiada na rede descentralizada do The Graph, execute o seu próprio [Graph Node](https://github.com/graphprotocol/graph-node) para indexar qualquer rede compatível com EVM. Verifique se a [versão](https://github.com/graphprotocol/graph-node/releases) em uso apoia a rede, e que tens a configuração necessária. O Graph Node também pode indexar outros protocolos, através de uma integração no Firehose. As integrações no Firehose foram criadas para redes baseadas em NEAR, Arweave e Cosmos. diff --git a/website/pages/pt/firehose.mdx b/website/pages/pt/firehose.mdx index 5e2b37ee4bb6..7d3d191b0fc2 100644 --- a/website/pages/pt/firehose.mdx +++ b/website/pages/pt/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Logo do Firehose](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +O Firehose é uma nova tecnologia desenvolvida pelo StreamingFast em colaboração com a Graph Foundation. O produto providencia **capacidades e velocidades inéditas para a indexação de dados em blockchain** com uma abordagem baseada em arquivos e focada no streaming em primeiro lugar. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +O Firehose extrai, transforma e salva dados de blockchain em uma estratégia baseada em arquivos e de alto desempenho. Os programadores de blockchain podem então acessar dados extraídos pelo Firehose através de streams de dados binários. A intenção do Firehose é substituir a camada original de extração de dados de blockchain do The Graph. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Documentação do Firehose + +A documentação do Firehose é atualmente mantida pela equipa do StreamingFast no [site oficial do StreamingFast](https://firehose.streamingfast.io/). + +### Como Começar + +- Leia esta [introdução ao Firehose](https://firehose.streamingfast.io/introduction/firehose-overview) para ter uma ideia de como ele é e saber por que ele foi construído. +- Aprenda sobre os [Pré-requisitos](https://firehose.streamingfast.io/introduction/prerequisites) para instalar e editar o Firehose. + +### Expanda o Seu Conhecimento + +- Aprenda sobre os [vários componentes](https://firehose.streamingfast.io/architecture/components) disponíveis do Firehose. diff --git a/website/pages/pt/glossary.mdx b/website/pages/pt/glossary.mdx index 47693b167d92..8930e0e88efc 100644 --- a/website/pages/pt/glossary.mdx +++ b/website/pages/pt/glossary.mdx @@ -12,7 +12,7 @@ title: Glossário - **Subgraph**: Uma API personalizada construída com dados de blockchain, que pode ser consultada usando o [GraphQL](https://graphql.org/). Os programadores podem construir, lançar e editar subgraphs na rede descentralizada do The Graph. Depois, os Indexadores podem começar a indexar subgraphs para abri-los a queries por consumidores de subgraphs. -- **Serviço Hospedado**: Um serviço de suporte temporário para construir e consultar subgraphs, enquanto a rede descentralizada do The Graph amadurece o seu custo e qualidade de serviço e experiência de programação. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexadores**: Participantes da rede que executam nodes de indexação para indexar dados de blockchains e servir consultas em GraphQL. @@ -24,6 +24,8 @@ title: Glossário - **Auto-Stake (Stake Próprio) do Indexador**: A quantia de GRT que os Indexadores usam para participar na rede descentralizada. A quantia mínima é 100.000 GRT, e não há limite máximo. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegantes**: Participantes na rede que são titulares de GRT, e delegam o seu GRT aos Indexadores. Isto permite aos Indexadores aumentar o seu stake nos subgraphs da rede. Em troca, os Delegantes recebem uma porção das Recompensas de Indexação que os Indexadores recebem por processar subgraphs. - **Taxa de Delegação**: Uma taxa de 0.5% paga pelos Delegantes ao delegar GRT aos Indexadores. O GRT usado para pagar a taxa é queimado. @@ -38,27 +40,21 @@ title: Glossário - **Manifest de Subgraph**: Um arquivo JSON que descreve o schema GraphQL, as fontes de dados, e outros metadados. [Veja um exemplo](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf). -- **Pool de Rebate**: Uma medida de segurança económica que guarda taxas de query pagas por consumidores de subgraphs, até que possam ser resgatas por Indexadores como rebates de taxas de query. - -- **Epoch**: Uma unidade de tempo na rede. Um epoch atualmente dura 6.646 blocos, ou cerca de um dia. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Alocação**: Um indexador pode alocar o seu stake total em GRT (incluindo o stake dos Delegantes) em subgraphs editados na rede descentralizada do The Graph. As alocações existem em uma de quatro fases. 1. **Ativa**: Uma alocação é considerada ativa quando é criada on-chain. Isto se chama abrir de uma alocação, e indica à rede que o Indexador está a indexar e servir consultas ativamente para um subgraph particular. Alocações ativas acumulam recompensas de indexação proporcionais ao sinal no subgraph, e à quantidade de GRT alocada. - 2. **Fechada**: Um Indexador pode resgatar as recompensas acumuladas em um certo subgraph ao enviar uma Prova de Indexação (POI) recente e válida. Isto se chama fechar uma alocação. Uma alocação deve ter ficado aberta por, no mínimo, um epoch antes que possa ser fechada. O período máximo de alocação é de 28 epochs; se um indexador deixar uma alocação aberta por mais que isso, ela se torna uma alocação obsoleta. Quando uma alocação está **Fechada**, um pescador ainda pode abrir uma disputa contra um Indexador por servir dados falsos. - - 3. **Finalizada**: O período de disputa está encerrado, e os rebates das taxas de query ainda estão abertos ao resgate por Indexadores. - - 4. **Resgatada**: A fase final de uma alocação; todas as recompensas válidas foram distribuídas, e os seus rebates de taxa de query foram reivindicados. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: um dApp poderoso para a construção, lançamento e edição de subgraphs. -- **Pescadores**: Participantes na rede que podem disputar as respostas de query e POIs de Indexadores. Uma disputa resolvida a favor do Pescador acaba em uma penalidade financeira para o Indexador e um prémio ao Pescador, a fim de incentivar a integridade da indexação e do trabalho com queries realizado pelos Indexadores na rede. A penalidade (slashing) atualmente é de 2.5% do auto-stake de um Indexador, com 50% do GRT cortado destinado ao Pescador, e os outros 50% queimados. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Árbitros**: Participantes da rede definidos por governança. O papel do Árbitro é decidir o resultado de disputas de indexação e consultas, e a sua meta é maximizar a utilidade e confiança da Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Corte**: Os Indexadores podem tomar cortes em seu GRT por fornecer uma prova de indexação (POI) incorreta ou servir dados imprecisos. A porcentagem de corte é um parâmetro do protocolo, atualmente configurado em 2.5% do auto-stake de um Indexador. 50% do GRT cortado vai ao Pescador que disputou os dados ou POI incorretos. Os outros 50% são queimados. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Recompensas de Indexação**: As recompensas que os Indexadores recebem por indexar subgraphs, distribuídas em GRT. @@ -66,7 +62,7 @@ title: Glossário - **GRT**: O token de utilidade do The Graph, que oferece incentivos económicos a participantes da rede por contribuir. -- **POI ou Prova de Indexação**: Quando um Indexador fecha a sua alocação e quer reivindicar as suas recompensas de indexador acumuladas em um certo subgraph, ele deve providenciar uma Prova de Indexação (POI) válida e recente. Os Pescadores podem disputar a POI providenciada por um Indexador; disputas resolvidas a favor do Pescador causam um corte para o Indexador. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: O componente que indexa subgraphs e disponibiliza os dados resultantes abertos a queries através de uma API GraphQL. Assim ele é essencial ao stack de indexadores, e operações corretas de um Graph Node são cruciais para executar um indexador com êxito. @@ -80,10 +76,10 @@ title: Glossário - **Período de Tempo de Recarga**: O tempo restante até que um Indexador que mudou os seus parâmetros de delegação possa fazê-lo novamente. -- **Ferramentas de Transferência para L2**: Contratos inteligentes e interfaces que permitem que os participantes na rede transfiram da mainnet da Ethereum ao Arbitrum One. Os participantes podem transferir GRT delegado, subgraphs, ações de curadoria e o autostake do Indexador. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. +- **_Atualização_ de um subgraph à Graph Network**: O processo de migrar um subgraph do serviço hospedado à Graph Network. -- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **_Atualização_ de um subgraph**: O processo de lançar uma nova versão de subgraph com atualizações ao manifest, schema e mapeamentos do subgraph. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/pt/graphcast.mdx b/website/pages/pt/graphcast.mdx index 31d297a5a58f..01efd5d935e5 100644 --- a/website/pages/pt/graphcast.mdx +++ b/website/pages/pt/graphcast.mdx @@ -10,7 +10,7 @@ Atualmente, o custo de transmitir informações para outros participantes de red O SDK (Kit de Programação de Software) do Graphcast permite aos programadores construir Rádios, que são aplicativos movidos a mexericos, que os Indexers podem executar por um certo propósito. Nós também pretendemos criar alguns Rádios (ou oferecer apoio para outros programadores/outras equipas que desejam construir Rádios) para os seguintes casos de uso: -- Verificação em tempo real de integridade de dados de subgraph ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Verificação em tempo real de integridade dos dados de um subgraph ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Condução de leilões e coordenação para a sincronização de subgraphs, substreams e dados do Firehose de outros Indexers. - Autorrelatos em analíticas ativas de queries, inclusive volumes de pedidos de subgraphs, volumes de taxas, etc. - Autorrelatos em analíticas de indexação, como tempo de indexação de subgraphs, custos de gas de handlers, erros encontrados, etc. diff --git a/website/pages/pt/index.json b/website/pages/pt/index.json index 5ff7bc3ae928..23fd790fa2be 100644 --- a/website/pages/pt/index.json +++ b/website/pages/pt/index.json @@ -23,8 +23,8 @@ "description": "Use o Studio para criar subgraphs" }, "migrateFromHostedService": { - "title": "Migre do Serviço Hospedado", - "description": "Como migrar Subgraphs à Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Serviço Hospedado", - "description": "Crie e explore subgraphs no Serviço Hospedado" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Redes Apoiadas", - "description": "O The Graph apoia as seguintes redes na Graph Network e no Serviço Hospedado.", - "graphNetworkAndHostedService": "The Graph Network & Serviço Hospedado", - "hostedService": "Serviço Hospedado", - "betaWarning": "Em beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/pt/mips-faqs.mdx b/website/pages/pt/mips-faqs.mdx index 943e48f9bfa4..1408b61422ac 100644 --- a/website/pages/pt/mips-faqs.mdx +++ b/website/pages/pt/mips-faqs.mdx @@ -4,13 +4,15 @@ title: Perguntas frequentes sobre Provedores de Infraestrutura de Migração (MI ## Introdução -É uma grande época para participar do ecossistema do The Graph! Durante o [Graph Day 2022](https://thegraph.com/graph-day/2022/), Yaniv Tal anunciou a [aposentadoria do serviço hospedado](https://thegraph.com/blog/sunsetting-hosted-service/), um momento para o qual o ecossistema do The Graph trabalhou por muitos anos. +> Nota: O programa de MIPs fechou em maio de 2023. Agradecemos a todos os Indexadores que participaram! -Para apoiar a aposentadoria do serviço hospedado e a migração de toda a sua atividade à rede descentralizada, a Graph Foundation anunciou o [programa de Provedores de Infraestrutura de Migração (MIPs)](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +É uma boa época para participar do ecossistema do The Graph! Durante o [Graph Day 2022](https://thegraph.com/graph-day/2022/), Yaniv Tal anunciou a [aposentadoria do serviço hospedado](https://thegraph.com/blog/sunsetting-hosted-service/), um momento para o qual o ecossistema do The Graph se preparou por muitos anos. -O programa de MIPs é um programa de incentivos para Indexadores, para apoiá-los com recursos para indexar chains além da mainnet Ethereum, e ajudar o protocolo The Graph a expandir a rede descentralizada numa camada de infraestrutura multi-chain. +Para apoiar o desligamento do serviço hospedado e a migração de toda a sua atividade à rede descentralizada, a Graph Foundation anunciou o [programa de Provedores de Infraestrutura de Migração (MIPs)](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. +O programa de MIPs é um programa de incentivos para Indexadores, para apoiá-los com recursos para indexar chains além da mainnet Ethereum e ajudar o protocolo The Graph a expandir a rede descentralizada numa camada de infraestrutura multi-chain. + +O programa de MIPs alocou 0,75% da reserva de GRT (75 milhões de GRT), com 0.5% reservados para recompensar Indexadores que contribuam à inicialização da rede e 0.25% alocados a bolsas de rede para programadores de subgraph a usar subgraphs multi-chain. ### Recursos Úteis @@ -24,33 +26,33 @@ The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to r Sim, é possível. -Para contexto, a carta de arbitragem — [aprenda mais sobre ela aqui](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) — especifica a metodologia para gerar uma POI para um subgraph falho. +Para mais contexto, a carta de arbitragem — [mais sobre ela aqui](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) — especifica a metodologia de gerar uma POI para um subgraph falho. Um membro da comunidade, [SunTzu](https://github.com/suntzu93), criou um script para automatizar este processo conforme a metodologia da carta de arbitragem. Confira o repo [aqui](https://github.com/suntzu93/get_valid_poi_subgraph). ### 2. Qual chain será incentivada primeiro pelo programa de MIPs? -A primeira chain a ser apoiada na rede descentralizada é a Gnosis Chain! Antigamente conhecida como xDAI, a Gnosis Chain é uma chain baseada em EVM. Esta foi selecionada como a primeira por sua facilidade em executar nodes, prontidão para Indexadores, alinhamento com o The Graph e adoção dentro da web3. +A primeira chain a ser apoiada na rede descentralizada é a Gnosis Chain! Antigamente conhecida como xDAI, a Gnosis Chain é baseada em EVM. Ela é a primeira por conta da sua facilidade em executar nodes; sua prontidão para Indexadores; alinhamento com o The Graph; e a sua adoção dentro da web3. -### 3. Como novas chains serão adicionadas ao programa de MIPs? +### 3. Como serão adicionadas novas chains ao programa de MIPs? -Novas chains serão anunciadas durante o programa de MIPs, com base na prontidão aos Indexadores, na demanda, e no sentimento da comunidade. Chains serão apoiadas primeiro na testnet, e depois, uma GIP será passada para apoiar aquela chain na mainnet. Indexadores que participam no programa de MIPs escolherão quais chains eles têm interesse em apoiar e ganharão recompensas por chain, além de ganhar taxas de consulta e recompensas de indexação na rede por servir subgraphs. Participantes no MIPs serão pontuados com base em seu desempenho, habilidade de servir às necessidades da rede, e apoio da comunidade. +Novas chains serão anunciadas durante o programa de MIPs — com base na prontidão aos Indexadores, na demanda, e no sentimento da comunidade. Chains serão apoiadas primeiro na testnet, e depois, uma GIP será passada para apoiar aquela chain na mainnet. Os Indexadores participantes escolherão quais chains eles têm interesse em apoiar e serão recompensados por chain; também ganharão taxas de query e recompensas de indexação na rede por servir subgraphs. Participantes no MIPs serão pontuados com base em seu desempenho, habilidade de servir às necessidades da rede, e apoio da comunidade. ### 4. Como saberemos quando a rede estará pronta para uma nova chain? -A Graph Foundation monitorizará as métricas de desempenho de qualidade de serviço, desempenho da rede e canais comunitários para melhor avaliar a prontidão. A prioridade é garantir que a rede atenda às necessidades da comunidade para que estes dApps multi-chain possam migrar os seus subgraphs. +A Graph Foundation monitorizará as métricas de desempenho de qualidade de serviço, o desempenho da rede, e os canais comunitários para melhor avaliar a prontidão. A prioridade é garantir que a rede atenda às necessidades da comunidade para que estes dApps multi-chain possam migrar os seus subgraphs. ### 5. Como as recompensas são divididas por chain? -Ao ver que chains variam em seus requisitos para a sincronização de nodes, e que diferem em volume de queries e adoção, as recompensas por chain serão decidades no fim do ciclo dessa chain para garantir o registo de todo o feedback e aprendizado. Porém, a toda hora, os Indexadores também poderão ganhar taxas de consulta e recompensas de indexação quando a chain for apoiada na rede. +Sabendo que chains variam em seus requisitos para a sincronização de nodes, e que diferem em volume de queries e adoção, as recompensas por chain serão decididas no fim do ciclo dessa chain para garantir o registo de todo o feedback e aprendizado. Porém, a toda hora, os Indexadores também poderão ganhar taxas de query e recompensas de indexação quando a chain for apoiada na rede. ### 6. Nós precisamos indexar todas as redes no programa de MIPs ou podemos só escolher uma chain e indexar esta? -Podes indexar quais chains quiser! A meta do programa de MIPs é equipar Indexadores com as ferramentas e conhecimento para indexar as chains que desejam e apoiar os ecossistemas web3 nos quais têm interesse. Porém, para cada chain, há fases da testnet à mainnet. Complete todas as fases para as chains que indexa. Aprenda mais sobre as fases na [página do Notion dos MIPs](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059). +Podes indexar quais chains quiser! A meta do programa de MIPs é equipar os Indexadores com as ferramentas e conhecimento para indexar as chains que desejam e apoiar os ecossistemas web3 nos quais têm interesse. Porém, para cada chain, há fases da testnet à mainnet. Complete todas as fases para as chains que indexa. Aprenda mais sobre as fases na [página do Notion dos MIPs](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059). ### 7. Quando as recompensas serão distribuídas? -As recompensas para MIPs serão distribuídas por chain quando as métricas de desempenho forem alcançadas os e subgraphs migrados forem apoiados por aqueles Indexadores. Preste atenção para ver informações sobre as recompensas totais por chain na metade do ciclo daquela chain. +As recompensas para MIPs serão distribuídas por chain quando as métricas de desempenho forem alcançadas e os subgraphs migrados forem apoiados por esses Indexadores. Fique atento a informações sobre as recompensas totais por chain na metade do ciclo daquela chain. ### 8. Como a pontuação funciona? @@ -66,7 +68,7 @@ Os indexadores competirão por recompensas baseadas na sua classificação de po - O Indexador serve uma boa Qualidade de Serviço à chain (latência, dados novos, uptime, etc.)? -- O Indexador que apoia programadores de dApp e é reativo às suas necessidades? +- O Indexador que apoia programadores de dApp é reativo às necessidades deles? O Indexador aloca com eficiência, para contribuir à saúde geral da rede? @@ -74,35 +76,35 @@ O Indexador aloca com eficiência, para contribuir à saúde geral da rede? - O Indexador colabora com os seus colegas para ajudá-los a se preparar para a multi-chain? -- O Indexador fornece parecer para programadores importantes no programa, ou compartilha informações com Indexadores no Fórum? +- O Indexador fornece opiniões para programadores importantes no programa, ou compartilha informações com Indexadores no Fórum? ### 9. Como o papel no Discord será atribuído? Os moderadores darão os papeis nos dias a seguir. -### 10. É bom começar o programa em uma testnet e depois trocar para a Mainnet? Poderão identificar o meu node e levá-lo em conta enquanto distribuem recompensas? +### 10. Vale começar o programa em uma testnet e depois trocar para a Mainnet? Poderão identificar o meu node e levá-lo em conta enquanto distribuem recompensas? -Sim, esperamos que o faça. Várias fases são no Görli, e uma é na mainnet. +Sim, isto é esperado. Várias fases são no Görli, e uma é na mainnet. -### 11. Em qual ponto esperais que os participantes adicionem um lançamento na mainnet? +### 11. Em qual ponto espera-se que os participantes adicionem um lançamento na mainnet? -Será obrigatório ter um indexador da mainnet durante a terceira fase. Mais informações sobre isto [nesta página do Notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +Será obrigatório ter um indexador da mainnet durante a terceira fase. Mais informações [nesta página do Notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) ### 12. As recompensas serão sujeitas ao vesting? -A porcentagem a ser distribuída no fim do programa será sujeita ao vesting. Compartilharemos mais sobre isto no Acordo do Indexador. +A porção a ser distribuída no fim do programa será sujeita ao vesting. Falaremos mais sobre isto no Acordo do Indexador. ### 13. Em equipas com mais de um membro, todos os membros da equipa receberão um papel de MIPs no Discord? -Sim. +Sim ### 14. Dá para usar os tokens trancados no programa de curadoria do The Graph para participar na testnet de MIPs? -Sim. +Sim ### 15. Durante o programa de MIPs, haverá um período para disputar POIs inválidas? -Isto ainda será decidido. Volte a esta página periodicamente para mais detalhes, ou se o seu pedido for urgente, mande uma mensagem para info@thegraph.foundation +Isto ainda será decidido. Fique atento a esta página para mais detalhes; se o seu pedido for urgente, mande uma mensagem para info@thegraph.foundation ### 17. Podemos combinar dois contratos de vesting? @@ -114,7 +116,7 @@ Mande uma mensagem para info@thegraph.foundation ### 19. Ainda não estou pronto para indexar a Gnosis Chain, posso começar a indexar de outra chain quando estiver pronto? -Sim. +Sim ### 20. Há regiões recomendadas para executar os servidores? @@ -122,4 +124,4 @@ Não damos recomendações sobre regiões. Ao escolher locais, pense sobre onde ### 21. O que é "custo de gas de handler"? -É a medida determinística do custo de executar um handler. Ao contrário do que o nome pode sugerir, não tem nada a ver com o custo de gas em blockchains. +É a medida determinística do custo de executar um handler. Ao contrário do que diz o nome, isto não tem a ver com o custo de gas em blockchains. diff --git a/website/pages/pt/network/benefits.mdx b/website/pages/pt/network/benefits.mdx index cf34574399c0..ce56b07f42a5 100644 --- a/website/pages/pt/network/benefits.mdx +++ b/website/pages/pt/network/benefits.mdx @@ -14,7 +14,7 @@ Aqui está uma análise: - Custo mensal 60-98% menor - Zero custos de preparação de infraestrutura - Mais tempo de disponibilidade -- Acesso a 438 Indexadores (e ainda a contar) +- Acesso a centenas de Indexadores independentes ao redor do mundo - Suporte técnico 24 horas pela comunidade global ## Os Benefícios @@ -79,9 +79,9 @@ Custos estimados apenas para subgraphs na Mainnet Ethereum—os custos aumentam Curar um sinal em um subgraph é um custo opcional, único, e zero-líquido (por ex., $1 mil em um subgraph pode ser curado em um subgraph, e depois retirado — com potencial para ganhar retornos no processo). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Talvez alguns utilizadores precisem atualizar a versão do seu subgraph. Devido aos custos de gas do Ethereum, uma atualização custa cerca de $50 dólares no momento desta escrita. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +Note que taxas de gas no [Arbitrum](/arbitrum/arbitrum-faq) são muito menores que as da mainnet do Ethereum. ## Zero Custos de Preparação & Eficiência Operacional Maior @@ -89,8 +89,8 @@ Zero taxas de setup. Comece imediatamente, sem custos de setup ou gerais. Nenhum ## Confiabilidade & Resiliência -A rede descentralizada do The Graph dá aos utilizadores acesso à redundância geográfica, que não existe ao auto-hospedar um `graph-node`. Os queries são servidos de maneira confiável graças ao uptime de mais de 99.9%, alcançado por 168 Indexadores (e ainda a contar) que protegem a rede globalmente. +A rede descentralizada do The Graph permite que os utilizadores acessem redundâncias geográficas que não existem ao auto-hospedar um `graph-node`. Os queries são servidos de maneira confiável graças ao uptime de mais de 99.9%, alcançado por centenas de Indexadores independentes que protegem a rede globalmente. Enfim: A Graph Network é mais barata e fácil de usar, e produz resultados superiores comparados à execução local de um `graph-node`. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Comece a usar a Graph Network hoje, e aprenda como [atualizar o seu subgraph para a rede descentralizada do The Graph](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/pt/network/curating.mdx b/website/pages/pt/network/curating.mdx index 8efa6fef9449..a9730f0ee6ab 100644 --- a/website/pages/pt/network/curating.mdx +++ b/website/pages/pt/network/curating.mdx @@ -4,7 +4,7 @@ title: Curadorias Os curadores são essenciais para a economia descentralizada do Graph. Eles usam o seu conhecimento do ecossistema web3 para avaliar e sinalizar nos subgraphs que devem ser indexados pela Graph Network. Através do Explorer, curadores podem ver dados da rede para deliberar sobre a sua sinalização. A Graph Network recompensa curadores que sinalizam em subgraphs de boa qualidade com uma porção das taxas de query geradas pelos subgraphs. Os curadores são incentivados economicamente a sinalizar cedo. Estes sinais dos curadores são importantes para Indexadores, que podem então processar ou indexar os dados destes subgraphs sinalizados. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +Durante a sinalização, os curadores podem decidir sinalizar em uma versão específica do subgraph, ou através da auto-migração. Com a auto-migração, as ações de um Curador sempre serão migradas à versão mais recente publicada pelo programador. Caso decida sinalizar em uma versão específica, as ações sempre permanecerão nesta mesma versão. Lembre-se que curadoria é um ato de risco. Tenha cuidado, e garanta que curará subgraphs de confiança. Criar um subgraph é um processo livre de permissões, para que o povo possa criar subgraphs e chamá-los do nome que quiser. Para mais conselhos sobre riscos de curadoria, confira o [Guia de Curadoria da Graph Academy.](https://thegraph.academy/curators/) @@ -60,7 +60,7 @@ Os indexadores podem achar subgraphs para indexar com base em sinais de curadori ## Riscos 1. O mercado de consulta é jovem por natureza no The Graph, e há sempre o risco do seu rendimento anual ser menor que o esperado devido às dinâmicas nascentes do mercado. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. +2. Taxa de Curadoria — quando um Curador sinaliza GRT em um subgraph, ele incorre uma taxa de 1% de curadoria. Esta taxa é queimada, e o resto é depositado na reserva da bonding curve. 3. Quando os curadores queimam as suas ações para sacar GRT, o valor das ações restantes em GRT é diminuído. Saiba que, em alguns casos, os curadores podem decidir queimar todas as suas ações **de uma vez só**. Isto pode ser comum se um programador de dApp parar de versionar/melhorar e consultar seu subgraph, ou se um subgraph falhar. Portanto, os curadores restantes podem não poder sacar mais do que uma fração do seu GRT inicial. Para um papel de rede com um perfil de risco menor, veja [Delegados](/network/delegating). 4. Um subgraph pode falhar devido a um erro de código. Um subgraph falho não acumula taxas de consulta. Portanto, espere até o programador consertar o erro e lançar uma nova versão. - Caso se inscreva à versão mais recente de um subgraph, suas ações migrarão automaticamente a esta versão nova. Isto incorrerá uma taxa de curadoria de 0.5%. @@ -79,13 +79,13 @@ Achar subgraphs de alta qualidade é uma tarefa complexa, mas ela pode ser abord - Os curadores podem usar o seu conhecimento de uma rede para tentar prever como um subgraph individual pode gerar um volume maior ou menor de queries no futuro - Os curadores também devem entender as métricas disponíveis através do Graph Explorer. Métricas como volume passado de consultas e quem é o programador do subgraph podem ajudar a determinar se um subgraph vale ou não o sinal. -### 3. What’s the cost of updating a subgraph? +### 3. Qual o custo de atualizar um subgraph? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +A migração das suas ações de curadoria para uma nova versão de subgraph incorre uma taxa de curadoria de 1%. Os Curadores podem escolher se inscrever na versão mais nova de um subgraph. Quando ações de curadoria são auto-migradas a uma nova versão, os Curadores também pagarão metade da taxa de curadoria (por ex., 0.5%), porque a atualização de subgraphs é uma ação on-chain que custa gas. -### 4. How often can I update my subgraph? +### 4. Com que frequência posso atualizar o meu subgraph? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Não atualize os seus subgraphs com frequência excessiva. Veja a questão acima para mais detalhes. ### 5. Posso vender as minhas ações de curadoria? diff --git a/website/pages/pt/network/developing.mdx b/website/pages/pt/network/developing.mdx index a20e52ee205b..bdfcb6c801d5 100644 --- a/website/pages/pt/network/developing.mdx +++ b/website/pages/pt/network/developing.mdx @@ -34,11 +34,11 @@ Para poder fazer queries, os programadores devem gerar uma chave de API no Subgr Os programadores também podem expressar uma preferência de Indexador ao gateway, por exemplo, ao preferir Indexadores com resposta de query mais rápida, ou cujos dados são mais atualizados. Estes controlos são programados no Subgraph Studio. -### Updating Subgraphs +### Atualização de Subgraphs Após um tempo, um programador de subgraphs pode querer atualizar o seu subgraph, talvez para consertar um erro ou adicionar funcionalidades. O programador pode lançar novas versões do seu subgraph ao Subgraph Studio, para fins de programação e testes com rate limit. -Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +Quando o Programador de Subgraph estiver pronto, ele pode iniciar uma transação para apontar seu subgraph à nova versão. Atualizar o subgraph migra qualquer sinal à versão nova (presumindo que o utilizador que aplicou o sinal selecionou "migrar automaticamente"), o que também incorre uma taxa de migração. Este sinal de migração deve incentivar os Indexadores a começar a indexar a nova versão do subgraph, para que ele logo fique aberto a queries. ### Depreciação de Subgraphs @@ -50,4 +50,4 @@ Alguns programadores engajarão com o ciclo de vida completo do subgraph na rede ### Programadores e Economia da Rede -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. +Programadores são agentes económicos importantes na rede, que trancam GRT para incentivar a indexação e fazem queries cruciais em subgraphs — a principal troca de valor da rede. Os programadores de subgraphs também queimam GRT quando um subgraph é atualizado. diff --git a/website/pages/pt/network/explorer.mdx b/website/pages/pt/network/explorer.mdx index 21438a0487e7..a360551b388c 100644 --- a/website/pages/pt/network/explorer.mdx +++ b/website/pages/pt/network/explorer.mdx @@ -74,7 +74,7 @@ Se quiser saber mais sobre o papel de Curador, pode visitar os seguintes atalhos Os Delegantes têm um papel importante em manter a segurança e descentralização da Graph Network. Eles participam na rede com a delegação (por ex., "staking") de tokens GRT a um ou vários indexadores. Sem Delegantes, os Indexadores têm menos chances de atrair recompensas e taxas significativas. Então, os Indexadores procuram atrair Delegantes ao oferecê-los uma porção das recompensas de indexação e das taxas de query que ganham. -Os Delegantes, por sua vez, selecionam Indexadores com base em um número de variáveis diferentes, como desempenho passado, recompensas de indexação, e porções de taxas de query. A reputação dentro da comunidade também pode importar! Vale conectar-se com os indexadores selecionados através do [Discord](https://discord.gg/graphprotocol) ou do [Fórum](https://forum.thegraph.com/) do The Graph! +Os Delegantes, por sua vez, selecionam Indexadores com base em um número de variáveis diferentes, como desempenho passado, recompensas de indexação, e porções de taxas de query. A reputação dentro da comunidade também importa! Vale a pena conectar-se com os indexadores selecionados através do [Discord](https://discord.gg/graphprotocol) ou do [Fórum](https://forum.thegraph.com/) do The Graph! ![Imagem do Explorer 7](/img/Delegation-Overview.png) diff --git a/website/pages/pt/network/indexing.mdx b/website/pages/pt/network/indexing.mdx index 8c9838c350d5..e22041a8bb18 100644 --- a/website/pages/pt/network/indexing.mdx +++ b/website/pages/pt/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexação --- -Indexadores são operadores de nodes na Graph Network que fazem staking em Graph Tokens (GRT) para prover serviços de indexação e processamento de consultas em query (queries). Os indexadores ganham taxas de consulta e recompensas de indexação pelos seus serviços. Eles também ganham de um Pool de Rebate que é compartilhado com todos os contribuidores à rede, proporcional ao seu trabalho, seguindo a Função de Rebate de Cobb-Douglas. +Indexadores são operadores de nodes na Graph Network que fazem staking em Graph Tokens (GRT) para prover serviços de indexação e processamento de consultas em query (queries). Os indexadores ganham taxas de consulta e recompensas de indexação pelos seus serviços. Eles também ganham taxas de query que são rebatadas de acordo com uma função de rebate exponencial. O GRT em staking no protocolo é sujeito a um período de degelo, e pode passar por slashing (recolhimento de fundos) se algum Indexador maliciosamente servir dados incorretos para aplicativos ou indexar incorretamente. Os Indexadores também recebem recompensas dos Delegantes por stake delegado, para contribuir à rede. @@ -26,7 +26,7 @@ O stake mínimo atual para um Indexador é de 100 mil GRT. As recompensas de indexação vêm da inflação do protocolo, que é configurada em 3% da emissão anual. Elas são distribuídas em subgraphs, com base na proporção de todos os sinais de curadoria em cada um, e depois distribuídos proporcionalmente a Indexadores baseado no stake que alocaram naquele subgraph. ** Para ser elegível a recompensas, uma alocação deve ser fechada com uma prova de indexação válida (POI) que atende aos padrões determinados pela carta de arbitragem.** -A comunidade criou várias ferramentas para calcular recompensas; há uma coleção delas organizada na [coleção de Guias da Comunidade](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Há também uma lista atualizada de ferramentas nos canais #Delegators e #Indexers no [servidor do Discord](https://discord.gg/graphprotocol). Aqui, linkamos um [otimizador de alocação recomendada](https://github.com/graphprotocol/AllocationOpt.jl) integrado com o stack de software de indexador. +A comunidade criou várias ferramentas para calcular recompensas; essas estão organizadas na [coleção de Guias da Comunidade](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Há também uma lista atualizada de ferramentas nos canais #Delegators e #Indexers no [servidor do Discord](https://discord.gg/graphprotocol). No próximo link, temos um [otimizador de alocações recomendadas](https://github.com/graphprotocol/AllocationOpt.jl) integrado com o stack de software de indexador. ### O que é uma prova de indexação (POI)? @@ -81,17 +81,17 @@ As disputas podem ser visualizadas na interface na página de perfil de um Index ### O que são rebates de taxas de consulta e quando eles são distribuídos? -As taxas de consulta são coletadas pelo gateway sempre que uma alocação é fechada e acumulada no pool de rebate de taxas de consulta em um subgraph. O objetivo do pool de rebate é encorajar Indexadores a alocar stakes, em proporção à quantidade de taxas de consulta que ganham para a rede. A porção de taxas de consulta no pool que estão alocadas a um Indexador particular é calculada utilizando a Função de Produção de Cobb-Douglas; a quantidade distribuída por Indexador é uma função das suas contribuições ao pool e a sua alocação de stake no subgraph. +As taxas de query são coletadas pelo gateway e distribuídas aos Indexadores de acordo com a função de rebate exponencial (veja o GIP [aqui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). A tal função é proposta como uma maneira de garantir que indexadores alcancem o melhor resultado ao servir queries fieis. Ela funciona com o incentivo de Indexadores para alocarem uma grande quantia de stake (que pode ser cortada por errar ao servir um query) relativa à quantidade de taxas de query que possam colecionar. -Quando uma alocação é fechada e o seu período de disputa passa, os rebates podem ser reivindicados pelo Indexador. Ao reivindicar, os rebates de taxa de consulta são distribuídos ao Indexador e os seus Delegantes com base na porção de taxas de consulta e nas proporções do pool de delegação. +Quando uma alocação é fechada, os rebates podem ser reivindicados pelo Indexador. Ao reivindicar, os rebates de taxa de consulta são distribuídos ao Indexador e os seus Delegantes com base na porção de taxas de consulta e na função de rebate exponencial. ### O que são porção de taxa de consulta e porção de recompensa de indexação? Os valores `queryFeeCut` e `indexingRewardCut` são parâmetros de delegação que o Indexador pode configurar junto com o `cooldownBlocks` para controlar a distribuição de GRT entre o Indexador e os seus Delegantes. Veja os últimos passos no [Staking no Protocolo](/network/indexing#stake-in-the-protocol) para instruções sobre como configurar os parâmetros de delegação. -- **queryFeeCut** - a % de rebates de taxa de query acumulados em um subgraph que serão distribuídos ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% do pool de rebate de taxas de query quando uma alocação for reivindicada, com os outros 5% indo aos Delegantes. +- **queryFeeCut** - o % de rebates de taxas de query a ser distribuído ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% das taxas de query ganhas quando uma alocação for fechada, com os outros 5% destinados aos Delegantes. -- **indexingRewardCut** - o % de recompensas de indexação acumuladas em um subgraph que serão distribuídas ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% do pool de recompensas de indexação ao fechamento de uma alocação e os Delegantes dividirão os outros 5%. +- **indexingRewardCut** - o % de recompensas de indexação a ser distribuído ao Indexador. Se isto for configurado em 95%, o Indexador receberá 95% do pool de recompensas de indexação ao fechamento de uma alocação e os Delegantes dividirão os outros 5%. ### Como os Indexadores podem saber quais subgraphs indexar? @@ -375,7 +375,7 @@ Para participar com êxito na rede, são necessários monitorados e interações #### Começando -O agente indexador e o serviço indexador devem ser co-localizados com a sua infraestrutura de Graph Node. Há várias maneiras de preparar ambientes de execução virtual para os seus componentes de Indexador. Aqui, explicaremos como executá-los em um bare-metal (servidor dedicado) com pacotes NPM ou código fonte, ou através do Kubernetes e o docker no Engine Kubernetes do Google Cloud. Se estes exemplos de setup não combinam bem com a sua infraestrutura, provavelmente deverá consultar um guia de comunidade no nosso [Discord](https://discord.gg/graphprotocol). Lembre-se de [fazer staking no protocolo](/network/indexing#stake-in-the-protocol) antes de iniciar os seus componentes de Indexador! +O agente indexador e o serviço indexador devem ser co-localizados com a sua infraestrutura de Graph Node. Há várias maneiras de preparar ambientes de execução virtual para os seus componentes de Indexador. Aqui, veja como executá-los em um bare-metal (servidor dedicado) com pacotes NPM ou código fonte, ou através do Kubernetes e o docker no Engine Kubernetes do Google Cloud. Se estes exemplos de setup não batem bem com a sua infraestrutura, provavelmente deverá consultar um guia de comunidade no nosso [Discord](https://discord.gg/graphprotocol). Lembre-se de [fazer staking no protocolo](/network/indexing#stake-in-the-protocol) antes de iniciar os seus componentes de Indexador! #### De pacotes NPM @@ -662,21 +662,21 @@ ActionType { Exemplo de uso da fonte: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note que os tipos apoiados de ações para gestão de alocação têm requisitos diferentes de entrada: @@ -799,8 +799,4 @@ Após criada por um Indexador, uma alocação sadia passa por quatro estados. - **Fechada** - Um Indexador pode fechar uma alocação após a passagem de um epoch ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)), ou o seu agente indexador a fechará automaticamente após o **maxAllocationEpochs** (atualmente, 28 dias). Quando uma alocação é fechada com uma prova de indexação válida (POI), as suas recompensas de indexação são distribuídas ao Indexador e aos seus Delegantes (veja "como são distribuídas as recompensas?" abaixo para saber mais). -- **Finalizada** - Quando uma alocação é fechada, há um período de disputa, após o qual a alocação é considerada **finalizada** e seus rebates de taxa de query podem ser resgatados (claim()). O agente indexador monitora a rede para detectar alocações **finalizadas** e as resgata se estiverem acima de um limite configurável (e opcional), **—-allocation-claim-threshold**. - -- **Resgatada** — O estado final de uma alocação; ela já cumpriu as suas funções como alocação ativa, todas as recompensas elegíveis foram distribuídas, e os seus rebates de taxa de query foram resgatados. - É ideal que os Indexadores utilizem a funcionalidade de sincronização off-chain para sincronizar lançamentos de subgraph à chainhead antes de criar a alocação on-chain. Esta ferramenta é mais útil para subgraphs que demorem mais de 28 epochs para sincronizar, ou que têm chances de falhar não-deterministicamente. diff --git a/website/pages/pt/new-chain-integration.mdx b/website/pages/pt/new-chain-integration.mdx index c5934efa6f87..16b0d35f387b 100644 --- a/website/pages/pt/new-chain-integration.mdx +++ b/website/pages/pt/new-chain-integration.mdx @@ -1,75 +1,75 @@ --- -title: Integrating New Networks +title: Integração de Novas Redes --- -Graph Node can currently index data from the following chain types: +Atualmente, Graph Nodes podem indexar dados dos seguintes tipos de chain: -- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) -- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) -- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) -- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) +- Ethereum, através de EVM JSON-RPC e [Firehose Ethereum](https://github.com/streamingfast/firehose-ethereum) +- NEAR, através de um [Firehose NEAR](https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, através de um [Firehose Cosmos](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, através de um [Firehose Arweave](https://github.com/graphprotocol/firehose-arweave) -If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. +Se tiver interesse em qualquer destas chains, a integração será uma questão de configuração e testes do Graph Node. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +Caso tenha interesse num tipo diferente de chain, será necessária a criação de uma chain nova com o Graph Node. Recomendamos programar um novo Firehose para a chain em questão e então a integração daquele Firehose com o Graph Node. Mais informações abaixo. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). +Se a blockchain for equivalente à EVM, e o cliente/node expor a API EVM JSON-RPC, o Graph Node deve ser capaz de indexar a nova chain. Para mais informações, confira [Como testar uma EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +Para chains que não são baseadas em EVM, o Graph Node deverá ingerir dados de blockchain através da gRPC e definições de tipos conhecidas. Isto pode ser feito através do [Firehose](firehose/), uma nova tecnologia desenvolvida pelo [StreamingFast](https://www.streamingfast.io/) que providencia uma solução de indexação de blockchain altamente escalável com o uso de uma abordagem baseada em arquivos e que prioriza a transmissão de dados. Contacte a [equipe do StreamingFast](mailto:integrations@streamingfast.io/) caso precise de ajuda com a programação do Firehose. -## Difference between EVM JSON-RPC & Firehose +## Diferenças entre EVM JSON-RPC e Firehose -While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. +Enquanto os dois são aptos para subgraphs, um Firehose é sempre exigido para programadores que querem construir com [Substreams](substreams/), como a construção de [subgraphs movidos a Substreams](cookbook/substreams-powered-subgraphs/). Além disso, o Firehose gera velocidades de indexação mais rápidas em comparação ao JSON-RPC. -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +Novos integradores de chain EVM também podem considerar a abordagem com base no Firehose, com consideração aos benefícios do substreams e as suas imensas capacidades paralelas de indexação. Apoiar ambos permite que programadores escolham entre a construção de substreams ou subgraphs para a nova chain. -> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **NOTA**: Uma integração baseada no Firehose para chains EVM ainda exigirá que Indexadores executem o node RPC de arquivo da chain para indexar subgraphs corretamente. Isto se deve à inabilidade do Firehose para providenciar estados de contratos inteligentes que são tipicamente acessíveis pelo método RPC `eth_call`. (Vale lembrar que eth_calls [não são uma boa prática para programadores](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) --- -## Testing an EVM JSON-RPC +## Como testar uma EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: +Para que o Graph Node possa ingerir dados de uma chain EVM, o node RPC deve expor os seguintes métodos em EVM JSON-RPC: - `eth_getLogs` -- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_call` \_(para blocos históricos, com EIP-1898 - requer node de arquivo): - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ +- `eth_getTransactionReceipt`, em um pedido conjunto em JSON-RPC +- _`trace_filter`_ _(opcional, para que o Graph Node tenha apoio a call handlers)_ -### Graph Node Configuration +### Configuração do Graph Node -**Start by preparing your local environment** +**Primeiro, prepare o seu ambiente local** -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ +1. [Clone o Graph Node](https://github.com/graphprotocol/graph-node) +2. Modifique [esta linha](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) para ela incluir o nome da nova rede e a URL do EVM JSON-RPC + > Não mude o nome do env var. Ele deve permanecer como `ethereum` mesmo se o nome da rede for diferente. +3. Execute um node IPFS ou use aquele usado pelo The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Teste a integração com o lançamento local de um subgraph** -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +1. Instale o [graph-cli](https://github.com/graphprotocol/graph-cli) +2. Crie um subgraph de exemplo simples. Aqui estão algumas opções: + 1. O contrato inteligente e o subgraph [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) pré-inclusos são bons para começar + 2. Inicie um subgraph local de qualquer contrato inteligente existente ou de um ambiente de programação em solidity [com o uso do Hardhat com um plugin do Graph](https://github.com/graphprotocol/hardhat-graph) +3. Adapte o `subgraph.yaml` resultante com a mudança do [`dataSources.network`](http://dataSources.network) para o mesmo nome passado anteriormente ao Graph Node. +4. Crie o seu subgraph no Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publique o seu subgraph no Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. +O Graph Node deve então sincronizar o subgraph lançado caso não haja erros. Dê um tempo para que ele sincronize, e depois envie alguns queries em GraphQL ao endpoint da API produzido pelos logs. --- -## Integrating a new Firehose-enabled chain +## Integração de uma nova chain com o Firehose -Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: +Integrar uma nova chain também é possível com a abordagem do Firehose. Esta é, atualmente, a melhor opção para chains não-EVM, e necessária para o apoio do substreams. Há mais documentações sobre como o Firehose funciona, como adicionar apoio ao Firehose para uma nova chain, e como integrá-la com o Graph Node. Documentos recomendados para integradores: -1. [General docs on Firehose](firehose/) -2. [Adding Firehose support for a new chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) -3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) +1. [Documentos gerais sobre o Firehose](firehose/) +2. [Como adicionar apoio do Firehose a uma nova chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) +3. [Integração do Graph Node com uma nova chain através do Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/pt/operating-graph-node.mdx b/website/pages/pt/operating-graph-node.mdx index 01ab22547050..507e1706e067 100644 --- a/website/pages/pt/operating-graph-node.mdx +++ b/website/pages/pt/operating-graph-node.mdx @@ -22,7 +22,7 @@ Para indexar uma rede, o Graph Node precisa de acesso a um cliente de rede atrav Enquanto alguns subgraphs exigem apenas um node completo, alguns podem ter recursos de indexing que exijam funcionalidades adicionais de RPC. Especificamente, subgraphs que usam o `eth_calls` como parte do indexing exigirão um node de arquivo que apoie o [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898); e subgraphs com `callHandlers`, ou `blockHandlers` com um filtro `call`, exigem apoio ao `trace_filter` ([veja a documentação sobre o trace module (módulo de rastreio) aqui](https://openethereum.github.io/JSONRPC-trace-module)). -**Em Breve: Firehoses de Rede** - um Firehose é um serviço gRPC que providencia uma transmissão ordenada, mas consciente de forks, de blocos, feito pelos programadores centrais do The Graph para melhorar o apoio a indexing eficiente em escala. Isto não é um requisito atual para indexadores, mas eles são encorajados a se familiarizarem com a tecnologia, à frente do apoio total à rede. Leia mais sobre o Firehose [aqui](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Nodes IPFS diff --git a/website/pages/pt/publishing/publishing-a-subgraph.mdx b/website/pages/pt/publishing/publishing-a-subgraph.mdx index f62214483602..b09693642e0e 100644 --- a/website/pages/pt/publishing/publishing-a-subgraph.mdx +++ b/website/pages/pt/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Quando o seu subgraph for [lançado ao Subgraph Studio](/deploying/deploying-a-s Editar um Subgraph na rede descentralizada o disponibiliza para a curadoria pelos [Curadores](/network/curating), e para indexação pelos [Indexadores](/network/indexing). -Para um guia sobre como editar um subgraph na rede descentralizada, veja [este vídeo](https://youtu.be/HfDgC2oNnwo?t=580). + A lista de redes apoiadas está [aqui](/developing/supported-networks). diff --git a/website/pages/pt/querying/querying-best-practices.mdx b/website/pages/pt/querying/querying-best-practices.mdx index 19bf844dd827..9bc85c9439d6 100644 --- a/website/pages/pt/querying/querying-best-practices.mdx +++ b/website/pages/pt/querying/querying-best-practices.mdx @@ -67,18 +67,18 @@ Para uma lista completa de regras com exemplos de código, veja o nosso guia de ### Como enviar um query a uma API GraphQL -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL é uma linguagem e conjunto de convenções que transportam através do HTTP. -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +Ou seja, dá para fazer um query numa API GraphQL com o `fetch` normal (nativamente ou via `@whatwg-node/fetch` ou `isomorphic-fetch`). -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +Porém, como dito em ["Queries num Aplicativo"](/querying/querying-from-an-application), é melhor usar o nosso `graph-client`, que apoia funções como: - Gestão de Subgraph Cross-chain: Queries de múltiplos subgraphs numa única consulta - [Rastreamento Automático de Blocos](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [Paginação Automática](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - Resultado totalmente digitado -Here's how to query The Graph with `graph-client`: +Aqui está como fazer queries para o The Graph com o `graph-client`: ```tsx import { execute } from '../.graphclient' @@ -102,9 +102,9 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +Falamos sobre mais alternativas de cliente GraphQL em ["Queries num Aplicativo"](/querying/querying-from-an-application). -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +Agora que cobrimos as regras básicas da sintaxe de queries GraphQL, vamos agora ver como escrever bons queries no GraphQL. --- @@ -112,7 +112,7 @@ Now that we covered the basic rules of GraphQL queries syntax, let's now look at ### Sempre escreva consultas estáticas -A common (bad) practice is to dynamically build query strings as follows: +É (um erro) comum construir strings de consulta dinamicamente, como a seguir: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +Enquanto o trecho acima produz um query válido no GraphQL, **isto traz muitas desvantagens**: - ela **dificulta** a consulta na totalidade - os programadores são **responsáveis por higienizar com segurança a interpolação de string** - não mandar os valores das variáveis como parte dos parâmetros de pedido **impede um possível caching no lado do servidor** - ela **impede as ferramentas de analisar estaticamente a consulta** (por ex. Linter ou ferramentas de geração de tipo) -For this reason, it is recommended to always write queries as static strings: +Por isto, é recomendado sempre escrever queries como strings estáticas: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +Isto traz **muitas vantagens**: - Queries **fáceis de ler e manter** - O **servidor GraphQL cuida da higienização de variáveis** - **Variáveis podem ser cacheadas** no nível do servidor - **Queries podem ser analisados estaticamente por ferramentas** (mais sobre isto nas seções seguintes) -**Note: How to include fields conditionally in static queries** +**Nota: Como incluir campos condicionalmente em queries estáticos** -We might want to include the `owner` field only on a particular condition. +Talvez você queira incluir o campo `owner` com uma condição particular. -For this, we can leverage the `@include(if:...)` directive as follows: +Para isto, use a diretiva `@include(if:...)` a seguir: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,21 +191,21 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +Nota: a diretiva oposta é `@skip(if: ...)`. ### Dicas de desempenho -**"Ask for what you want"** +**"Pergunte pelo que queres"** -GraphQL became famous for its "Ask for what you want" tagline. +O GraphQL ficou famoso por sua frase de efeito "pergunte pelo que queres". -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +Por isto, no GraphQL, não há como obter todos os campos disponíveis sem ter que listá-los individualmente. -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +Ao consultar APIs GraphQL, sempre considere fazer query apenas dos campos que serão usados. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +Over-fetching é normalmente causado pela coleção de entidades. Por natureza, os queries retirarão 100 entidades em uma coleção, muito mais do que realmente será usado; por ex., para fins de amostra ao usuário. Portanto, queries devem quase sempre ser configurados primeiro, com a garantia de que só retirarão quantas entidades forem necessárias. Isto serve não só para coleções de alto nível em uma consulta, mas mais ainda para coleções aninhadas de entidades. -For example, in the following query: +Por exemplo, no query seguinte: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +A resposta pode conter 100 transações para cada um dos 100 tokens. -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +Se o aplicativo só precisa de 10 transações, o query deve configurar explicitamente `first: 10` no campo de transações. -**Combining multiple queries** +**Como combinar múltiplos queries** -Your application might require querying multiple types of data as follows: +O seu aplicativo pode exigir queries de múltiplos tipos de dados, como a seguir: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +Enquanto esta implementação é totalmente válida, ela exigirá duas rondas totais com a API GraphQL. -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +Felizmente, também vale enviar múltiplos queries no mesmo pedido à GraphQL, como a seguir: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +Este método **melhorará o desempenho geral** ao reduzir o tempo gasto na rede (pois, poupa-lhe de uma viagem ao redor da API) e fornecerá **implementações mais concisas**. ### Como Aproveitar Fragmentos GraphQL -A helpful feature to write GraphQL queries is GraphQL Fragment. +Uma boa ferramenta para escrever queries GraphQL é o GraphQL Fragment. -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +No seguinte query, perceba que alguns campos são repetidos em vários Selection-Sets (`{ ... }`): ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +Estes campos repetidos (`id`, `active`, `status`) trazem muitos problemas: - difíceis de ler para consultas mais extensivas - ao usar ferramentas que geram tipos TypeScript baseados em queries (_mais sobre isto na última seção_), `newDelegate` e `oldDelegate` retornarão duas interfaces distintas em linha. -A refactored version of the query would be the following: +Refatorado, o query ficaria assim: ```graphql query { @@ -334,15 +334,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +Usar o `fragment` do GraphQL melhorará a legibilidade (especialmente em escala) e também melhorará a geração de tipos TypeScript. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +Ao usar a ferramenta de geração de tipos, o query acima gerará um tipo `DelegateItemFragment` apropriado (_veja a última seção "Ferramentas"_). ### O que fazer e o que não fazer em Fragments GraphQL -**Fragment base must be a type** +**A base do fragment deve ser um tipo** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +Um Fragment não pode ser baseado num tipo não aplicável; ou seja, **um tipo sem campos**: ```graphql fragment MyFragment on BigInt { @@ -350,11 +350,11 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +O `BigInt` é um **escalar** (tipo "plano" nativo) que não pode ser usado como a base de um fragment. -**How to spread a Fragment** +**Como espalhar um Fragment** -Fragments are defined on specific types and should be used accordingly in queries. +Fragments são definidos em tipos específicos e devem ser usados de acordo nos queries. Exemplo: @@ -377,17 +377,17 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` e `oldDelegate` são do tipo `Transcoder`. -It is not possible to spread a fragment of type `Vote` here. +Não é possível espalhar um fragment do tipo `Vote` aqui. -**Define Fragment as an atomic business unit of data** +**Defina o Fragment como uma unidade de negócios atômica de dados** -GraphQL Fragment must be defined based on their usage. +O Fragment GraphQL deve ser definido baseado no seu uso. -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +Para a maioria dos casos de uso, definir um fragment por tipo (no caso do uso repetido de campos ou geração de tipos) já basta. -Here is a rule of thumb for using Fragment: +Aqui estão algumas regras básicas para o uso de Fragments: - quando campos do mesmo tipo repetem em um query, agrupe-os em um Fragment - quando campos parecidos (mas não idênticos) repetem, crie múltiplos fragmentos, por ex: @@ -417,31 +417,31 @@ fragment VoteWithPoll on Vote { ### Exploradores do GraphQL baseados em web -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +Iterar sobre queries ao executá-los no seu aplicativo pode ser muito cansativo. Por isto, use o [Graph Explorer](https://thegraph.com/explorer) à vontade para testar os seus queries antes de adicioná-los ao seu aplicativo. O Graph Explorer proverá um playground GraphQL pré-configurado para testar os seus queries. -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +Se procura uma maneira mais flexível de debugar/testar as suas consultas, há outras ferramentas baseadas em web, como [Altair](https://altair.sirmuel.design/) e [GraphiQL](https://graphiql-online.com/graphiql). ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +Para acompanhar as melhores práticas e regras sintáticas ditas acima, vale muito usar o workflow e as ferramentas IDE a seguir. **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +O [GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) ajudará-lhe a ficar no topo das melhores práticas no GraphQL sem nenhum esforço. -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +[Preparar a configuração "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) aplicará regras essenciais como: - `@graphql-eslint/fields-on-correct-type`: um campo está num tipo apropriado? - `@graphql-eslint/no-unused variables`: uma variável usada deve ficar sem uso? - e mais! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +Isto permitirá-lhe **detectar erros até mesmo sem testar queries** no playground ou executá-las na produção! ### Plugins IDE -**VSCode and GraphQL** +**VSCode e GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +A [extensão GraphQL VSCode](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) é ótima para o seu workflow de desenvolvimento. Com ela, você consegue: - destaque de sintaxe - sugestões de autocomplete @@ -449,15 +449,15 @@ The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemNa - snippets - definições para fragments e tipos de entrada -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +Se usa o `graphql-eslint`, a [extensão ESLint VSCode](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) é indispensável para visualizar corretamente os erros e avisos em inline no seu código. -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij e GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +O [plugin JS GraphQL](https://plugins.jetbrains.com/plugin/8097-graphql/) melhorará muito a sua experiência de trabalho com o GraphQL, pois proverá: - destaque de sintaxe - sugestões de autocomplete - validação contra schema - snippets -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +Mais informações [neste artigo da WebStorm](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/), que mostra todos os recursos principais do plugin. diff --git a/website/pages/pt/querying/querying-from-an-application.mdx b/website/pages/pt/querying/querying-from-an-application.mdx index 4a280b225e7d..de13777f23d5 100644 --- a/website/pages/pt/querying/querying-from-an-application.mdx +++ b/website/pages/pt/querying/querying-from-an-application.mdx @@ -33,11 +33,11 @@ O The Graph fornece um cliente GraphQL próprio, `graph-client`, que apoia recur - [Paginação Automática](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - Resultado totalmente digitado -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +Também é integrado com clientes GraphQL populares como Apollo e URQL, e compatível com todos os ambientes, (React, Angular, Node.js, React Native); usar o `graph-client` proverá a melhor experiência para interagir com o The Graph. -Let's look at how to fetch data from a subgraph with `graphql-client`. +Vamos ver como retirar dados de um subgraph com o `graphql-client`. -To get started, make sure to install The Graph Client CLI in your project: +Para começar, instale a Graph Client CLI no seu projeto: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +Defina o seu query num arquivo `.graphql` (ou alinhado no seu arquivo `.js` ou `.ts`): ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +Depois, crie um arquivo de configuração (chamado `.graphclientrc.yml`) e aponte aos seus endpoints GraphQL oferecidos pelo The Graph, por exemplo: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +Executar o seguinte comando na Graph Client CLI gerará códigos JavaScript digitados e prontos para uso: ```sh graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +Finalmente, atualize o seu arquivo `.ts` para usar os documentos digitados em GraphQL já gerados: ```tsx import React, { useEffect } from 'react' @@ -135,17 +135,17 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ Aviso importante** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +O `graph-client` é perfeitamente integrado com outros clientes GraphQL como Apollo, URQL, ou React Query; [veja exemplos no repositório oficial](https://github.com/graphprotocol/graph-client/tree/main/examples). -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +Porém, se escolher usar outro cliente, lembre que **não haverá como usar a Gestão de Subgraph Cross-Chain ou a Paginação Automática, estes recursos importantes para queries no The Graph**. ### Apollo -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +O [cliente Apollo](https://www.apollographql.com/docs/) é o cliente GraphQL mais ubíquo no ecossistema do front-end. -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +Disponível para React, Angular, Vue, Ember, iOS e Android, o cliente Apollo, apesar de ser o mais pesado, traz muitos recursos para construir interfaces de utilizador avançadas em cima do GraphQL: - tratamento de erros avançado - paginação @@ -153,9 +153,9 @@ Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, altho - UI otimista - gestão de estado local -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +Vamos ver como retirar dados de um subgraph com o cliente Apollo num projeto de web. -First, install `@apollo/client` and `graphql`: +Primeiro, instale o `@apollo/client` e o `graphql`: ```sh npm install @apollo/client graphql @@ -194,7 +194,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +Para usar variáveis, dá para passar um argumento `variables` ao query: ```javascript const tokensQuery = ` @@ -227,16 +227,16 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +Há também o [URQL](https://formidable.com/open-source/urql/), que está disponível nos ambientes Node.js, React/Preact, Vue e Svelte, com ferramentas mais avançadas: - Sistema de cache flexível - Design extensível (mais fácil adicionar novas capacidades em cima dele) - Pacote leve (cerca de 5x mais leve que o Apollo) - Apoio para envios de arquivo e modo offline -Let's look at how to fetch data from a subgraph with URQL in a web project. +Vamos ver como retirar dados de um subgraph com o URQL num projeto de web. -First, install `urql` and `graphql`: +Primeiro, instale o `urql` e o `graphql`: ```sh npm install urql graphql diff --git a/website/pages/pt/querying/querying-the-hosted-service.mdx b/website/pages/pt/querying/querying-the-hosted-service.mdx index ba13bba09d56..5b7df3a729b7 100644 --- a/website/pages/pt/querying/querying-the-hosted-service.mdx +++ b/website/pages/pt/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Queries no Serviço Hospedado --- -Com o subgraph lançado, visite o [Serviço Hospedado](https://thegraph.com/hosted-service/) para abrir uma interface [GraphiQL](https://github.com/graphql/graphiql), onde pode explorar a API GraphQL lançada para o subgraph ao emitir queries e visualizar o schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. Há um exemplo abaixo, mas vale ver a [API de Queries](/querying/graphql-api) para uma referência completa em como fazer queries às entidades do subgraph. @@ -19,9 +19,9 @@ Esta consulta lista todos os counters criados pelo nosso mapeamento. Como só cr } ``` -## Como Usar o Serviço Hospedado +## Using the hosted service -O Graph Explorer e o seu playground GraphQL servem bem para exploração e queries de subgraphs lançados no Serviço Hospedado. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Alguns dos recursos principais são detalhados abaixo: diff --git a/website/pages/pt/querying/querying-with-python.mdx b/website/pages/pt/querying/querying-with-python.mdx new file mode 100644 index 000000000000..97eb3cbf3fcf --- /dev/null +++ b/website/pages/pt/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Como Começar + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/pt/quick-start.mdx b/website/pages/pt/quick-start.mdx new file mode 100644 index 000000000000..eca9c7157d7e --- /dev/null +++ b/website/pages/pt/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Começo Rápido +--- + +Neste guia, aprenda como inicializar, criar e lançar o seu subgraph ao Subgraph Studio ou ao [serviço hospedado](#hosted-service). + +Garanta que o seu subgraph indexará dados de uma [rede apoiada](/developing/supported-networks). + +Este guia presume que tem: + +- Um endereço de contrato inteligente na rede da sua escolha +- GRT para a curadoria do seu subgraph +- Uma carteira de criptomoedas + +## 1. Como criar um subgraph no Subgraph Studio + +Entre no [Subgraph Studio](https://thegraph.com/studio/) e conecte a sua carteira de criptomoedas. + +Após conectar, pode começar com um clique em "create a subgraph" (criar um subgraph). Selecione a sua rede preferida e clique em Continue. + +## 2. Como instalar o Graph CLI + +O Graph CLI é escrito em JavaScript. Para usá-lo, instale o `npm` ou `yarn`. + +Execute um dos seguintes comandos na sua máquina local: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Como Inicializar o seu Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Ao inicializar o seu subgraph, a ferramenta CLI pedirá pelas seguintes informações: + +- Protocolo: Escolha o protocolo do qual o subgraph indexará dados +- Slug do subgraph: crie um nome para o seu subgraph. O slug serve como uma identidade para o seu subgraph. +- Diretório onde o subgraph será criado: escolha o seu diretório local +- Rede Ethereum (opcional): é possível que tenha de especificar a rede compatível com EVM de onde o seu subgraph indexará dados +- Endereço de contrato: Localize o endereço do contrato inteligente do qual quer consultar dados +- ABI: Se a ABI não estiver preenchida automaticamente, insira-a manualmente como um arquivo JSON +- Bloco Inicial: Insira o bloco inicial para poupar tempo enquanto o seu subgraph indexa dados da blockchain. O bloco inicial pode ser localizado ao encontrar o bloco onde o seu contrato foi lançado. +- Nome do Contrato: insira o nome do seu contrato +- Indexar eventos de contrato como entidades: é sugerido configurar isto como true, já que ele automaticamente adicionará mapeamentos ao seu subgraph para cada evento emitido +- Adicionar outro contrato (opcional): adicionar outro contrato + +Inicialize o seu subgraph a partir de um contrato existente ao executar o seguinte comando: + +```sh +graph init --studio +``` + +Veja a imagem a seguir para um exemplo do que esperar quando for inicializar o seu subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Como escrever o seu Subgraph + +O comando interior cria um subgraph de altura que pode ser usado como ponto de partida para construir o seu subgraph. Enquanto faz mudanças ao subgraph, trabalhará principalmente com três arquivos: + +- Manifest (subgraph.yaml) - O manifest define quais fontes de dados serão indexadas pelos seus subgraphs. +- Schema (schema.graphql) - O schema GraphQL define quais dados deseja retirar do subgraph. +- Mapeamentos em AssemblyScript (mapping.ts) — Este é o código que traduz dados das suas fontes de dados às entidades definidas no schema. + +Para mais informações sobre como escrever o seu subgraph, veja [Criando um Subgraph](/desenvolvimento/criando-um-subgraph). + +## 5. Como fazer um lançamento ao Subgraph Studio + +Quando o seu subgraph estiver escrito, execute os seguintes comandos: + +```sh +$ graph codegen +$ graph build +``` + +- Autentique e lance o seu subgraph. A chave de lançamento (deploy key) pode ser encontrada na página "Subgraph" do Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Como testar o seu subgraph + +Podes testar o seu subgraph ao fazer um exemplo de query na secção do playground. + +Os logs dirão-lhe se há quaisquer erros com o seu subgraph. Os logs de um subgraph operacional parecerão com isto: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Como Publicar um Subgraph na Rede Descentralizada do The Graph + +Quando o seu subgraph for lançado ao Subgraph Studio, estiver testado, e estiver pronto para produção, pode então ser editado na rede descentralizada. + +No Subgraph Studio, clique no seu subgraph. Na página do subgraph, clique no botão Publish (editar) no canto superior direito. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Antes de poder consultar o seu subgraph, os Indexadores precisam começar a servir queries nele. O processo fica mais simples se curares o seu próprio subgraph com GRT. + +Até o fechamento deste texto, é recomendado que cure o seu próprio subgraph com 10.000 GRT para garantir que ele seja indexado e aberto a queries o mais rápido possível. + +Para poupar custos de gas, cure o seu subgraph na mesma transação em que ele foi publicado, ao selecionar este botão quando publicar o seu subgraph na rede descentralizada do The Graph: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Como consultar o seu subgraph + +Agora, podes consultar o seu subgraph com o envio de queries GraphQL para o URL de Consulta do seu subgraph, que pode ser visto ao clicar no botão Query (consultas). + +Caso não tenha a sua chave API, pode fazer um query do seu dApp pelo URL de consulta temporária, grátis e com rate-limit, que pode ser usado para desenvolvimento e encenação. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/pt/substreams.mdx b/website/pages/pt/substreams.mdx index d0354f06bab1..48b2f7bc643e 100644 --- a/website/pages/pt/substreams.mdx +++ b/website/pages/pt/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Logo do Substreams](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +O Substreams é uma tecnologia poderosa de indexação de blockchains, desenvolvida pela Graph Network. O Substreams permite que programadores escrevam módulos em Rust para compor transmissões de dados ao lado da comunidade, e providencia indexações de altíssimo desempenho por virtude da paralelização, de uma forma que prioriza o streaming. - +Com o Substreams, podes extrair dados de blockchains diferentes (Ethereum, BNB, Solana...) muito rapidamente! Daí, podes enviar dados a vários locais (um banco de dados Postgres, um banco de dados Mongo, ou um Subgraph). + +## Como o Substreams funciona em 4 passos + +1. **Primeiro escreva um programa em Rust, que define as transformações que queres aplicar aos dados em blockchain.** Por exemplo, a seguinte função em Rust extrai informações relevantes de um bloco no Ethereum (número, hash, e hash parente). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **Embrulhe o seu programa Rust num módulo WASM com a execução de um único comando numa CLI.** + +3. **O container WASM é enviado a um endpoint do Substreams para execução.** O provedor do Substreams alimenta o container WASM com os dados da blockchain e as transformações são aplicadas. + +4. **Selecione um [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), um lugar onde queres enviar os dados transformados** (um banco de dados Postgres ou um Subgraph, por exemplo). + +## Documentação do Substreams + +A documentação oficial do Substreams é mantida atualmente pela equipa do StreamingFast no [site oficial do StreamingFast](https://substreams.streamingfast.io/). + +### Como Começar + +- Para poder programar e lançar um Substreams, [instale a CLI do Substreams](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Depois, execute o seu primeiro Substreams ao seguir o [Tutorial de Início Rápido](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expanda o Seu Conhecimento + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/pt/sunrise.mdx b/website/pages/pt/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/pt/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/pt/tokenomics.mdx b/website/pages/pt/tokenomics.mdx index 7ad0cb14cf85..e01cfce19dd9 100644 --- a/website/pages/pt/tokenomics.mdx +++ b/website/pages/pt/tokenomics.mdx @@ -11,7 +11,7 @@ O The Graph é um protocolo descentralizado que permite o acesso fácil a dados É parecido com um modelo B2B2C, mas movido por uma rede descentralizada de participantes, que trabalham juntos para fornecer dados a utilizadores finais em troca de recompensas em GRT. O GRT é o token de utilidade que coordena provedores e consumidores de dados. O GRT serve como utilidade para a coordenação de provedores e consumidores de dados dentro da rede, e incentiva os participantes do protocolo a organizarem dados com eficácia. -Ao usar o The Graph, os utilizadores podem acessar dados da blockchain com facilidade e pagar apenas pelas informações de que precisam. O The Graph é usado por vários [aplicativos populares](https://thegraph.com/explorer) no ecossistema web3 hoje. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. O The Graph indexa dados de blockchain de forma parecida com a indexação da web pelo Google. De fato, pode já estar a usar o The Graph sem perceber. Se viu o frontend de um dApp que ganha os seus dados de um subgraph, já fez um query para dados de um subgraph! @@ -75,7 +75,7 @@ Os Indexadores são o núcleo do The Graph; operam o equipamento e o software in Os Indexadores podem ganhar recompensas em GRT de duas maneiras: -1. Taxas de query: GRT pago por programadores ou utilizadores por queries de dados em subgraphs. Taxas de query são depositadas em um pool de rebate e distribuídos a Indexadores. +1. Taxas de query: GRT pago, por programadores ou utilizadores, para queries de dados de subgraph. Taxas de query são distribuídas diretamente a Indexadores conforme a função de rebate exponencial (veja o GIP [aqui](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Recompensas de indexação: a emissão anual de 3% é distribuída aos Indexadores com base no número de subgraphs que indexam. Estas recompensas os incentivam a indexar subgraphs, às vezes antes das taxas de query começarem, de modo a acumular e enviar Provas de Indexação (POIs) que verificam que indexaram dados corretamente. diff --git a/website/pages/ro/about.mdx b/website/pages/ro/about.mdx index c1f7c886900f..67c438952abb 100644 --- a/website/pages/ro/about.mdx +++ b/website/pages/ro/about.mdx @@ -1,5 +1,5 @@ --- -title: About The Graph +title: Despre The Graph --- This page will explain what The Graph is and how you can get started. diff --git a/website/pages/ro/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ro/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..43f96152931a 100644 --- a/website/pages/ro/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ro/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## Subgraph Transfer -## How do I transfer my subgraph? +### How do I transfer my subgraph? + + To transfer your subgraph, you will need to complete the following steps: @@ -30,55 +54,147 @@ To transfer your subgraph, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Where should I initiate my transfer from? You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. -## How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -## Will my subgraph still be discoverable after I transfer it to L2? +### Will my subgraph still be discoverable after I transfer it to L2? Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. -## Does my subgraph need to be published to transfer it? +### Does my subgraph need to be published to transfer it? To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. -## After I transfer, do I also need to re-publish on Arbitrum? +### After I transfer, do I also need to re-publish on Arbitrum? After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Will my subgraph's curation move with my subgraph? If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my subgraph back to Ethereum mainnet after I transfer? Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Why do I need bridged ETH to complete my transfer? Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curation Signal -## How do I transfer my curation? +### How do I transfer my curation? To transfer your curation, you will need to complete the following steps: @@ -90,25 +206,29 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -## How will I know if the subgraph I curated has moved to L2? +### How will I know if the subgraph I curated has moved to L2? When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. -## What if I do not wish to move my curation to L2? +### What if I do not wish to move my curation to L2? When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. -## How do I know my curation successfully transferred? +### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -## Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one subgraph at a time? There is no bulk transfer option at this time. ## Indexer Stake -## How do I transfer my stake to Arbitrum? +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + To transfer your stake, you will need to complete the following steps: @@ -120,7 +240,7 @@ To transfer your stake, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Will all of my stake transfer? You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. @@ -128,91 +248,45 @@ If you plan on transferring parts of your stake over multiple transactions, you Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. -## How much time do I have to confirm my stake transfer to Arbitrum? +### How much time do I have to confirm my stake transfer to Arbitrum? \*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. -## What if I have open allocations? +### What if I have open allocations? If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. -## How long will it take to transfer my stake? +### How long will it take to transfer my stake? It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. -## Do I have to index on Arbitrum before I transfer my stake? +### Do I have to index on Arbitrum before I transfer my stake? You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. -## Can Delegators move their delegation before I move my indexing stake? +### Can Delegators move their delegation before I move my indexing stake? No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -## Delegation - -## How do I transfer my delegation? - -To transfer your delegation, you will need to complete the following steps: - -1. Initiate delegation transfer on Ethereum mainnet - -2. Wait 20 minutes for confirmation - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? - -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. - -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -## What if I can't find the Indexer I'm delegating to on L2? +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -The L2 transfer tool will automatically detect the Indexer you previously delegated to. - -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? - -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. - -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? - -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? - -It is anticipated that all network participation will move to Arbitrum One in the future. - -## How long does it take to complete the transfer of my delegation to L2? - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -## Is there any delegation tax? - -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## Vesting Contract Transfer -## How do I transfer my vesting contract? +### How do I transfer my vesting contract? To transfer your vesting, you will need to complete the following steps: @@ -222,7 +296,9 @@ To transfer your vesting, you will need to complete the following steps: 3. Confirm vesting transfer on Arbitrum -## How do I transfer my vesting contract if I am only partially vested? +### How do I transfer my vesting contract if I am only partially vested? + + 1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) @@ -232,7 +308,9 @@ To transfer your vesting, you will need to complete the following steps: 4. Withdraw any remaining ETH from the transfer tool contract -## How do I transfer my vesting contract if I am fully vested? +### How do I transfer my vesting contract if I am fully vested? + + For those that are fully vested, the process is similar: @@ -244,7 +322,7 @@ For those that are fully vested, the process is similar: 4. Withdraw any remaining ETH from the transfer tool contract -## Can I transfer my vesting contract to Arbitrum? +### Can I transfer my vesting contract to Arbitrum? You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). @@ -256,27 +334,27 @@ Please note that you will not be able to release/withdraw GRT from the L2 vestin If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -298,18 +376,36 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/ro/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ro/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/ro/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ro/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/ro/billing.mdx b/website/pages/ro/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/ro/billing.mdx +++ b/website/pages/ro/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/ro/chain-integration-overview.mdx b/website/pages/ro/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/ro/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ro/cookbook/arweave.mdx b/website/pages/ro/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/ro/cookbook/arweave.mdx +++ b/website/pages/ro/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ro/cookbook/cosmos.mdx b/website/pages/ro/cookbook/cosmos.mdx index ef21e4bc0855..47a590f13b6b 100644 --- a/website/pages/ro/cookbook/cosmos.mdx +++ b/website/pages/ro/cookbook/cosmos.mdx @@ -198,7 +198,7 @@ $ graph build Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: -**Hosted Service** +**Serviciu găzduit** ```bash graph create account/subgraph-name --product hosted-service diff --git a/website/pages/ro/cookbook/grafting.mdx b/website/pages/ro/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/ro/cookbook/grafting.mdx +++ b/website/pages/ro/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/ro/cookbook/near.mdx b/website/pages/ro/cookbook/near.mdx index 879e8e5c15aa..1d3402482301 100644 --- a/website/pages/ro/cookbook/near.mdx +++ b/website/pages/ro/cookbook/near.mdx @@ -193,7 +193,7 @@ $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # u The node configuration will depend on where the subgraph is being deployed. -### Hosted Service +### Serviciu găzduit ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/ro/cookbook/upgrading-a-subgraph.mdx b/website/pages/ro/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/ro/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ro/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ro/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/ro/deploying/hosted-service.mdx b/website/pages/ro/deploying/hosted-service.mdx index 2e6093531110..9403f847d26c 100644 --- a/website/pages/ro/deploying/hosted-service.mdx +++ b/website/pages/ro/deploying/hosted-service.mdx @@ -10,7 +10,7 @@ If you don't have an account on the hosted service, you can sign up with your Gi For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). -## Create a Subgraph +## Creează un Subgraf First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ro/deploying/subgraph-studio.mdx b/website/pages/ro/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/ro/deploying/subgraph-studio.mdx +++ b/website/pages/ro/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/ro/developing/creating-a-subgraph.mdx b/website/pages/ro/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/ro/developing/creating-a-subgraph.mdx +++ b/website/pages/ro/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/ro/developing/developer-faqs.mdx b/website/pages/ro/developing/developer-faqs.mdx index 0b925a79dce2..5921e8fb8cf5 100644 --- a/website/pages/ro/developing/developer-faqs.mdx +++ b/website/pages/ro/developing/developer-faqs.mdx @@ -1,5 +1,5 @@ --- -title: Developer FAQs +title: FAQs al Developerilor --- ## 1. What is a subgraph? @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/ro/developing/graph-ts/api.mdx b/website/pages/ro/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/ro/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ro/developing/graph-ts/common-issues.mdx b/website/pages/ro/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/ro/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ro/developing/supported-networks.mdx b/website/pages/ro/developing/supported-networks.mdx index 58ce56345f7c..335f07c18968 100644 --- a/website/pages/ro/developing/supported-networks.mdx +++ b/website/pages/ro/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Rețele suportate --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/ro/docsearch.json b/website/pages/ro/docsearch.json index 8cfff967936d..ff22e803a980 100644 --- a/website/pages/ro/docsearch.json +++ b/website/pages/ro/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Căutare", + "buttonAriaLabel": "Căutare" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", - "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "resetButtonTitle": "Șterge interogarea", + "resetButtonAriaLabel": "Șterge interogarea", + "cancelButtonText": "Anuleaza", + "cancelButtonAriaLabel": "Anuleaza" }, "startScreen": { - "recentSearchesTitle": "Recent", - "noRecentSearchesText": "No recent searches", - "saveRecentSearchButtonTitle": "Save this search", - "removeRecentSearchButtonTitle": "Remove this search from history", + "recentSearchesTitle": "Recente", + "noRecentSearchesText": "Nicio căutare recentă", + "saveRecentSearchButtonTitle": "Salvează această căutare", + "removeRecentSearchButtonTitle": "Șterge această căutare din istoric", "favoriteSearchesTitle": "Favorite", - "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + "removeFavoriteSearchButtonTitle": "Șterge această căutare din favorite" }, "errorScreen": { - "titleText": "Unable to fetch results", - "helpText": "You might want to check your network connection." + "titleText": "Nu se pot prelua rezultate", + "helpText": "Verificați conexiunea la internet." }, "footer": { - "selectText": "to select", - "selectKeyAriaLabel": "Enter key", - "navigateText": "to navigate", - "navigateUpKeyAriaLabel": "Arrow up", - "navigateDownKeyAriaLabel": "Arrow down", - "closeText": "to close", - "closeKeyAriaLabel": "Escape key", - "searchByText": "Search by" + "selectText": "a selecta", + "selectKeyAriaLabel": "Tasta Enter", + "navigateText": "a naviga", + "navigateUpKeyAriaLabel": "Săgeată în sus", + "navigateDownKeyAriaLabel": "Săgeată în jos", + "closeText": "a închide", + "closeKeyAriaLabel": "Tasta Escape", + "searchByText": "Caută după" }, "noResultsScreen": { - "noResultsText": "No results for", - "suggestedQueryText": "Try searching for", - "reportMissingResultsText": "Believe this query should return results?", - "reportMissingResultsLinkText": "Let us know." + "noResultsText": "Niciun rezultat pentru", + "suggestedQueryText": "Încearcă să cauți după", + "reportMissingResultsText": "Crezi că această interogare ar trebui să returneze rezultate?", + "reportMissingResultsLinkText": "Anunță-ne." } } } diff --git a/website/pages/ro/firehose.mdx b/website/pages/ro/firehose.mdx new file mode 100644 index 000000000000..02f0d63c72db --- /dev/null +++ b/website/pages/ro/firehose.mdx @@ -0,0 +1,22 @@ +--- +title: Firehose +--- + +![Firehose Logo](/img/firehose-logo.png) + +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. + +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. + +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ro/global.json b/website/pages/ro/global.json index 6a3eb234bfce..250299eb3773 100644 --- a/website/pages/ro/global.json +++ b/website/pages/ro/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "collapse": "Minimizează", + "expand": "Expandează", + "previous": "Anterior", + "next": "Următorul", + "editPage": "Editează pagina", + "pageSections": "Secțiuni de pagină", + "linkToThisSection": "Link către această secțiune", + "technicalLevelRequired": "Nivel tehnic necesar", + "notFoundTitle": "Hopa! Această pagină a fost pierdută în spațiu...", + "notFoundSubtitle": "Verifică dacă utilizezi adresa corectă sau explorează site-ul nostru făcând clic pe linkul de mai jos.", + "goHome": "Mergi la pagina principală", "video": "Video" } diff --git a/website/pages/ro/glossary.mdx b/website/pages/ro/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/ro/glossary.mdx +++ b/website/pages/ro/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ro/graphcast.mdx b/website/pages/ro/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/ro/graphcast.mdx +++ b/website/pages/ro/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/ro/index.json b/website/pages/ro/index.json index 9e28e13d5001..b29126d73694 100644 --- a/website/pages/ro/index.json +++ b/website/pages/ro/index.json @@ -1,77 +1,76 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "title": "Începe", + "intro": "Află mai multe despre The Graph, un protocol descentralizat pentru indexarea și interogarea datelor din blockchains.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Despre The Graph", + "description": "Învață mai multe despre The Graph" }, "quickStart": { "title": "Quick Start", - "description": "Jump in and start with The Graph" + "description": "Alătură-te și implică-te cu The Graph" }, "developerFaqs": { - "title": "Developer FAQs", + "title": "FAQs al Developerilor", "description": "Frequently asked questions" }, "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "title": "Interoghează dintr-o Aplicație", + "description": "Învață sa interoghezi dintr-o aplicație" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Creează un Subgraf", + "description": "Folosește Studio pentru a crea subgrafuri" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", + "title": "Roluri de rețea", + "description": "Află mai multe despre rolurile din rețeaua The Graph.", "roles": { "developer": { "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "description": "Creați un subgraf sau utilizați subgrafuri existente într-o aplicație descentralizată" }, "indexer": { "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "description": "Operează un nod pentru a indexa date și a servi interogări" }, "curator": { "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "description": "Organizați datele prin semnalizarea subgrafurilor" }, "delegator": { "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "description": "Asigurați securitatea rețelei prin delegarea GRT către indexeri" } } }, - "readMore": "Read more", + "readMore": "Află mai multe", "products": { - "title": "Products", + "title": "Produse", "products": { "subgraphStudio": { "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" + "description": "Creează, administrează și publică subgrafuri și chei de API" }, "graphExplorer": { "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "description": "Explorează subgrafurile și interacționează cu protocolul" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "Serviciu găzduit", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Rețele suportate", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ro/mips-faqs.mdx b/website/pages/ro/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/ro/mips-faqs.mdx +++ b/website/pages/ro/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/ro/network/benefits.mdx b/website/pages/ro/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/ro/network/benefits.mdx +++ b/website/pages/ro/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/ro/network/indexing.mdx b/website/pages/ro/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/ro/network/indexing.mdx +++ b/website/pages/ro/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/ro/new-chain-integration.mdx b/website/pages/ro/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/ro/new-chain-integration.mdx +++ b/website/pages/ro/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ro/operating-graph-node.mdx b/website/pages/ro/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/ro/operating-graph-node.mdx +++ b/website/pages/ro/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/ro/publishing/publishing-a-subgraph.mdx b/website/pages/ro/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/ro/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ro/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/ro/querying/querying-the-hosted-service.mdx b/website/pages/ro/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/ro/querying/querying-the-hosted-service.mdx +++ b/website/pages/ro/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/ro/querying/querying-with-python.mdx b/website/pages/ro/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/ro/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ro/quick-start.mdx b/website/pages/ro/quick-start.mdx new file mode 100644 index 000000000000..54247bed1aad --- /dev/null +++ b/website/pages/ro/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Quick Start +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ro/substreams.mdx b/website/pages/ro/substreams.mdx new file mode 100644 index 000000000000..2a06de8ac868 --- /dev/null +++ b/website/pages/ro/substreams.mdx @@ -0,0 +1,44 @@ +--- +title: Substreams +--- + +![Substreams Logo](/img/substreams-logo.png) + +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. + +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ro/sunrise.mdx b/website/pages/ro/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ro/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ro/tokenomics.mdx b/website/pages/ro/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/ro/tokenomics.mdx +++ b/website/pages/ro/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/ro/translations.ts b/website/pages/ro/translations.ts new file mode 100644 index 000000000000..340f7eeea339 --- /dev/null +++ b/website/pages/ro/translations.ts @@ -0,0 +1,13 @@ +import supportedNetworks from './developing/supported-networks.json' +import docsearch from './docsearch.json' +import global from './global.json' +import index from './index.json' + +const translations = { + global, + index, + docsearch, + supportedNetworks, +} + +export default translations diff --git a/website/pages/ru/about.mdx b/website/pages/ru/about.mdx index 231e466f2ac0..29081fbc64f0 100644 --- a/website/pages/ru/about.mdx +++ b/website/pages/ru/about.mdx @@ -2,46 +2,46 @@ title: О The Graph --- -This page will explain what The Graph is and how you can get started. +На этой странице Вы узнаете что такое протокол The Graph и как приступить к работе. -## What is The Graph? +## Что такое Graph? -The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. +The Graph - это децентрализованный протокол для индексации и запроса данных из блокчейна. The Graph позволяет запрашивать данные, которые сложно запросить напрямую. -Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. +Комплексные и сложные проекты такие, как [Uniswap](https://uniswap.org/) и проекты NFT такие, как [Bored Ape Yacht Club](https://boredapeyachtclub.com/) сохраняют свои данные в блокчейн Ethereum в таком формате, что их сложно обработать посылая запросы непосредственно в блокчейн. -In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. +В случае с Bored Ape Yacht Club, мы можем выполнить базовые операции чтения данных [смарт-контракт](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) такие, как запрос на получение собственника определенного Ape, получение URI определенного Ape имея его ID, или запрос на получение информации об общем количестве Ape, потому что эти операции чтения запрограммированы непосредственно в смарт-контракте. Но более сложные запросы, которые встречаются в реальном мире и операции такие как, агрегация данных, поиск, связи между объектами, и не тривиальные фильтры попросту невозможны. На пример, если мы хотим получить Ape, владельцем которого является определенный адрес и отфильтровать, имея определенные характеристики, мы не сможем этого сделать взаимодействуя напрямую со смарт-контрактом. -To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. +Что бы получить искомую информацию, Вам придется обработать каждую [`транзакцию`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) отдельно, вынуть метаданные с IPFS используя Token ID и IPFS хэш, и затем агрегировать полученные данные. Даже для решения этой сравнительно небольшой задачи для распределенного приложения, установленного в браузере, работа займет ** часы или даже дни**. -You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. +Вы также можете создать свой собственный сервер, обработать там транзакции, сохранить их в базе данных и создать API-конечную точку поверх всего этого, чтобы запрашивать данные. Однако этот вариант требует [больших ресурсов](/network/benefits/), нуждается в обслуживании, представляет собой единую точку отказа и нарушает важные свойства безопасности, необходимые для децентрализации. -**Indexing blockchain data is really, really hard.** +**Индексация данных - это очень трудоемкий процесс.** -Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. +Такие свойства объектов блокчейна как "finality", реорганизация цепочек, или "uncled" блоки усложняют обработку запросов на столько, что запрос не просто сложно выполнить, но и в принципе невозможно получить корректные данные. -The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). +Протокол The Graph создан для решения данной проблемы путем распределенной сети узлов индексации и обработки запросов. Данные могут быть запрошены используя API (индексируемые "subgraphs") и стандарта GraphQL API. На сегодня существует 2 сети, работающие параллельно: централизованная и распределенная с одинаковым функционалом. Обе сети основаны на открытом коде [Graph Node](https://github.com/graphprotocol/graph-node). -## How The Graph Works +## Как работает The Graph -The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. +The Graph протокол узнает что и как индексировать из блокчейна основываясь на инструкциях "subgraph", называемый "subgraph манифест". Subgraph описывает: смарт-контракт, данные которого будут индексироваться, события данного смарт-контракта и как данные будут связаны (структура таблиц базы данных) для сохранения данных в базе данных. -Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. +Как только Вы создаете `subgraph манифест`, Вы с помощью Graph CLI сохраняете манифест в сети IPFS и указываете индексатору начинать индексировать данные использую данный субграф. -This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: +Данная диаграмма более детально описывает порядок следования данных после того как subgraph манифест активирован на сети: -![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) +![График, объясняющий потребителям данных, как The Graph использует Graph Node для обслуживания запросов](/img/graph-dataflow.png) -The flow follows these steps: +Данные проходят следующий путь: -1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. -5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. +1. Dapp добавляет данные в Ethereum через транзакцию в смарт-контракте. +2. Смарт-контракт создает одно или несколько событий обрабатывая транзакции. +3. TheGraph нода постоянно сканирует блокчейна и обрабатывает каждый новый блок на предмет поиска данных описанных в Вашем субграфе. +4. The Graph нода затем разбирает события, относящиеся к Вашему субграфу, которые записаны в данном блоке и структурирует их согласно схеме данных описанной в subgraph используя модуль WASM. Затем данные сохраняются в таблицы базы данных Graph Node. +5. Dapp запрашивает у Graph Node данные, проиндексированные с блокчейна, используя [конечную точку GraphQL](https://graphql.org/learn/) ноды. В свою очередь, Graph Node переводит запросы GraphQL в запросы к его базовому хранилищу данных, чтобы получить эти данные, используя возможности индексации этого хранилища. Dapp отображает эти данные в насыщенном пользовательском интерфейсе для конечных пользователей, который они используют для создания новых транзакций в Ethereum. Цикл повторяется. -## Next Steps +## Что далее -In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. +В следующих главах мы более детально разберем принципы построение субграфов, как их запускать, и как создавать запросы для извлечения данных из базы данных Graph Node. -Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. +Перед тем как приступить к разработке своего собственного субграфа, мы рекомендуем изучить Graph Explorer на предмет субграфов, которые уже развернуты на сети. Страничка каждого субграфа содержит "тестовую среду" в которой Вы можете потренироваться в создании GraphQL запросов. diff --git a/website/pages/ru/arbitrum/arbitrum-faq.mdx b/website/pages/ru/arbitrum/arbitrum-faq.mdx index 849d08c92b93..cff78bf53a1a 100644 --- a/website/pages/ru/arbitrum/arbitrum-faq.mdx +++ b/website/pages/ru/arbitrum/arbitrum-faq.mdx @@ -1,78 +1,78 @@ --- -title: Arbitrum FAQ +title: Часто задаваемые вопросы об Arbitrum --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Нажмите [here](#billing-on-arbitrum-faqs), если хотите перейти к часто задаваемым вопросам о выставлении счетов Arbitrum. -## Why is The Graph implementing an L2 Solution? +## Почему The Graph внедряет решение L2? -By scaling The Graph on L2, network participants can expect: +Масштабируя The Graph на L2, участники сети могут рассчитывать на: -- Upwards of 26x savings on gas fees +- Экономию более чем в 26 раз на оплате газа -- Faster transaction speed +- Более высокую скорость транзакций -- Security inherited from Ethereum +- Безопасность, унаследованную от Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Масштабирование смарт-контрактов протокола на L2 позволяет участникам сети взаимодействовать с сетью чаще при снижении затрат на оплату комиссии сети. Например, индексаторы могут открывать и закрывать выделения для индексации большего количества субграфов с большей частотой, разработчики могут с большей легкостью разворачивать и обновлять субграфы, делегаторы могут делегировать GRT с большей частотой, а кураторы могут добавлять или удалять сигнал для большего количества субграфов — действия, которые ранее считались слишком дорогостоящими для частого выполнения из-за высокой комиссии сети. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +Решение о продолжении сотрудничества с Arbitrum было принято в прошлом году по итогам обсуждения сообществом The Graph [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). -## What do I need to do to use The Graph on L2? +## Что мне нужно сделать, чтобы использовать The Graph на L2? -Users bridge their GRT and ETH  using one of the following methods: +Пользователи соединяют с помощью моста свои GRT и ETH  одним из следующих способов: - [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +Чтобы воспользоваться преимуществами использования The Graph на L2, используйте этот выпадающий список для переключения между сетями. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Выпадающий список для переключения на Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## Что мне нужно делать сейчас как разработчику субграфа, потребителю данных, индексатору, куратору или делегатору? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Немедленных действий не требуется, однако участникам сети рекомендуется начать переход на Arbitrum, чтобы воспользоваться преимуществами L2. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Разработчики ядра работают над созданием инструментов для переноса на L2, что значительно упростит передачу делегирования, курирования и субграфов на Arbitrum. Участники сети могут рассчитывать на то, что инструменты переноса на L2 будут доступны к лету 2023 года. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +По состоянию на 10 апреля 2023 года 5% всех вознаграждений за индексацию чеканятся на Arbitrum. По мере увеличения участия в сети и одобрения Советом вознаграждения за индексацию будут постепенно переходить с Ethereum на Arbitrum и, в конечном итоге, полностью перейдут на Arbitrum. -## If I would like to participate in the network on L2, what should I do? +## Если я хочу принять участие в работе сети на L2, что мне следует сделать? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Пожалуйста, помогите [протестировать сеть](https://testnet.thegraph.com/explorer) на L2 и оставьте отзыв о Вашем опыте в [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Существуют ли какие-либо риски, связанные с масштабированием сети до L2? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Все смарт-контракты прошли тщательный [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Все было тщательно протестировано, и разработан план действий на случай непредвиденных обстоятельств, чтобы обеспечить безопасный и непрерывный переход. Подробности можно найти [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## Будут ли продолжать работать существующие субграфы в Ethereum? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Да, контракты в сети The Graph Network будут работать параллельно как на Ethereum, так и на Arbitrum, пока в последствии полностью не перейдут на Arbitrum. -## Will GRT have a new smart contract deployed on Arbitrum? +## Будет ли у GRT новый смарт-контракт, развернутый на Arbitrum? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Да, у GRT есть дополнительный смарт-контракт [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). Тем не менее, [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) в основной сети Ethereum останется в рабочем состоянии. -## Billing on Arbitrum FAQs +## Часто задаваемые вопросы о выставлении счетов на Arbitrum -## What do I need to do about the GRT in my billing balance? +## Что мне нужно сделать с GRT на моем платежном балансе? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Ничего! Ваш GRT был безопасно перенесен на Arbitrum и используется для оплаты запросов, пока Вы это читаете. -## How do I know my funds have migrated securely to Arbitrum? +## Как я узнаю, что мои средства безопасно перенесены на Arbitrum? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Все балансы GRT для оплаты были успешно перенесены на Arbitrum. Вы можете просмотреть контракт об оплате на Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Как я узнаю, что мост Arbitrum безопасен? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Мост был тщательно проверен [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) для обеспечения безопасности всех пользователей. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Что мне нужно сделать, если я добавляю новый GRT из своего кошелька в основной сети Ethereum? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Добавление GRT к Вашему балансу для оплаты на Arbitrum можно выполнить в один клик в [Subgraph Studio](https://thegraph.com/studio/). Вы сможете легко перенести свои GRT на Arbitrum и ввести свои API-ключи в одной транзакции. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Посетите страницу [Billing page](https://thegraph.com/docs/en/billing/) для более подробных инструкций по добавлению, выводу или приобретению GRT. diff --git a/website/pages/ru/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ru/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..14d4d9a90c12 100644 --- a/website/pages/ru/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ru/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: FAQ по инструментам переноса L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## Общая информация -## What are L2 Transfer Tools? +### Что такое инструменты переноса L2? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph удешевил участие контрибьюторов в сети в 26 раз, развернув протокол в Arbitrum One. Инструменты переноса L2 были созданы разработчиками ядра, чтобы упростить переход на L2. -## Can I use the same wallet I use on Ethereum mainnet? +Для каждого участника сети доступен набор инструментов L2 Transfer Tools, позволяющих сделать переход на L2 беспрепятственным, избежать периодов оттаивания или необходимости вручную выводить и переносить GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Эти инструменты потребуют от Вас выполнения определенного набора шагов в зависимости от Вашей роли в The Graph и того, что Вы переносите на L2. -## Subgraph Transfer +### Могу ли я использовать тот же кошелек, что и в основной сети Ethereum? -## How do I transfer my subgraph? +Если Вы используете кошелек [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account), Вы можете использовать тот же адрес. Если Ваш кошелек основной сети Ethereum является контрактным (например, кошелек с мультиподписью), то Вы должны указать адрес [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2), на который будет отправлен Ваш перевод. Пожалуйста, внимательно проверяйте адрес, так как перевод на неправильный адрес может привести к необратимой потере средств. Если Вы хотите использовать кошелек с мультиподписью на L2, убедитесь, что развернули multisig-контракт на Arbitrum One. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Что произойдет, если я не завершу перенос в течение 7 дней? -3. Confirm subgraph transfer on Arbitrum\* +Инструменты переноса L2 используют встроенный механизм Arbitrum для передачи сообщений с L1 на L2. Этот механизм называется "retryable ticket", или "повторный тикет", и используется всеми собственными токен-мостами, включая мост Arbitrum GRT. Подробнее о повторном тикете можно прочитать в [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +Когда Вы переносите свои активы (субграф, стейк, делегирование или курирование) на L2, через мост Arbitrum GRT отправляется сообщение, которое создает повторный тикет на L2. Инструмент переноса включает в транзакцию некоторую стоимость ETH, которая используется для 1) оплаты создания тикета и 2) оплаты стоимости газа для выполнения тикета на L2. Однако, поскольку стоимость газа может измениться за время, пока тикет будет готов к исполнению на L2, возможна ситуация, когда попытка автоматического исполнения не удастся. В этом случае мост Arbitrum сохранит повторный тикет в течение 7 дней, и любой желающий может повторить попытку "погасить" тикет (для этого необходимо иметь кошелек с некоторым количеством ETH, подключенный к мосту Arbitrum). -5. Update Query URL (recommended) +Это так называемый шаг "Подтверждение" во всех инструментах переноса - в большинстве случаев он выполняется автоматически, поскольку автоисполнение чаще всего бывает успешным, но важно, чтобы Вы проверили, прошел ли он. Если он не исполнился и в течение 7 дней не будет повторных успешных попыток, мост Arbitrum отменит тикет, и Ваши активы (субграф, стейк, делегирование или курирование) будут потеряны и не смогут быть восстановлены. У разработчиков ядра The Graph есть система мониторинга, позволяющая выявлять такие ситуации и пытаться погасить тикеты, пока не стало слишком поздно, но в конечном итоге ответственность за своевременное завершение переноса лежит на Вас. Если у Вас возникли проблемы с подтверждением переноса, пожалуйста, свяжитесь с нами через [эту форму] \(https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), и разработчики ядра помогут Вам. -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Я начал передачу делегирования/стейка/курирования и не уверен, что она дошла до уровня L2. Как я могу убедиться, что она была передана правильно? -## Where should I initiate my transfer from? +Если Вы не видите в своем профиле баннер с просьбой завершить передачу, то, скорее всего, транзакция благополучно достигла уровня L2 и никаких дополнительных действий не требуется. Если у Вас есть сомнения, Вы можете проверить, показывает ли Explorer Ваше делегирование, ставку или курирование на Arbitrum One. -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Если у Вас есть хеш транзакции L1 (который Вы можете найти, просмотрев недавние транзакции в своем кошельке), Вы также можете подтвердить, был ли погашен «повторный тикет», который перенес сообщение на L2, здесь: https://retryable-dashboard.arbitum.io/ — если автопогашение не удалось, Вы также можете подключить туда свой кошелек и погасить его. Будьте уверены, что разработчики ядра также отслеживают застрявшие сообщения и попытаются активировать их до истечения срока их действия. -## How long do I need to wait until my subgraph is transferred +## Перенос субграфа -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +### Как мне перенести свой субграф? -## Will my subgraph still be discoverable after I transfer it to L2? + -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Чтобы перенести Ваш субграф, необходимо выполнить следующие действия: -## Does my subgraph need to be published to transfer it? +1. Инициировать перенос в основной сети Ethereum -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +2. Подождать 20 минут для получения подтверждения -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +3. Подтвердить перенос субграфа в Arbitrum\* -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +4. Завершить публикацию субграфа в Arbitrum -## After I transfer, do I also need to re-publish on Arbitrum? +5. Обновить URL-адрес запроса (рекомендуется) -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +\* Обратите внимание, что Вы должны подтвердить перенос в течение 7 дней, иначе Ваш субграф может быть потерян. В большинстве случаев этот шаг выполнится автоматически, но в случае скачка стоимости комиссии сети в Arbitrum может потребоваться ручное подтверждение. Если в ходе этого процесса возникнут какие-либо проблемы, Вам помогут: обратитесь в службу поддержки по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). -## Will there be a down-time to my endpoint while re-publishing? +### С чего необходимо начать перенос? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Вы можете начать перенос со страницы [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) или любой другой страницы с информацией о субграфе. Для начала переноса нажмите кнопку "Перенести субграф" на странице сведений о субграфе. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Как долго мне необходимо ждать, пока мой субграф будет перенесен -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Время переноса занимает около 20 минут. Мост Arbitrum работает в фоновом режиме, чтобы автоматически завершить перенос через мост. В некоторых случаях стоимость комиссии сети может повыситься, и Вам потребуется повторно подтвердить транзакцию. -## Will my subgraph's curation move with my subgraph? +### Будет ли мой субграф по-прежнему доступен для поиска после того, как я перенесу его на L2? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Ваш субграф можно будет найти только в той сети, в которой он опубликован. Например, если Ваш субграф находится в сети Arbitrum One, то Вы сможете найти его в Explorer только в сети Arbitrum One, и не сможете найти в сети Ethereum. Обратите внимание, что в переключателе сетей в верхней части страницы выбран Arbitrum One, чтобы убедиться, что Вы находитесь в правильной сети. После переноса субграф L1 будет отображаться как устаревший. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### Должен ли мой субграф быть опубликован, чтобы его можно было перенести? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +Чтобы воспользоваться инструментом переноса субграфа, Ваш субграф должен быть уже опубликован в основной сети Ethereum и иметь какой-либо сигнал курирования, принадлежащий кошельку, которому принадлежит субграф. Если Ваш субграф не опубликован, рекомендуется просто опубликовать его непосредственно на Arbitrum One - связанная с этим стоимость комиссии сети будет значительно ниже. Если Вы хотите перенести опубликованный субграф, но на счете владельца нет сигнала курирования, Вы можете подать сигнал на небольшую сумму (например, 1 GRT) с этого счета; при этом обязательно выберите сигнал "автомиграция". -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### Что произойдет с версией моего субграфа в основной сети Ethereum после его переноса на Arbitrum? -## Why do I need bridged ETH to complete my transfer? +После переноса Вашего субграфа на Arbitrum версия, находящаяся на основной сети Ethereum станет устаревшей. Мы рекомендуем Вам обновить URL-адрес запроса в течение 48 часов. Однако существует отсрочка, в течение которой Ваш URL-адрес на основной сети будет функционировать, чтобы можно было обновить стороннюю поддержку децентрализованных приложений. -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### Нужно ли мне после переноса повторно опубликовываться на Arbitrum? -## Curation Signal +По истечении 20-минутного окна переноса необходимо подтвердить перенос транзакцией в пользовательском интерфейсе, чтобы завершить перенос, а инструмент переноса подскажет Вам, как это сделать. Ваша конечная точка L1 будет продолжать поддерживаться во время окна переноса и в течение периода отсрочки после него. Рекомендуется обновить свою конечную точку в удобное для Вас время. -## How do I transfer my curation? +### Будет ли моя конечная точка простаивать при повторной публикации? -To transfer your curation, you will need to complete the following steps: +Это маловероятно, но возможно возникновение кратковременного простоя в зависимости от того, какие индексаторы поддерживают субграф на уровне L1 и продолжают ли они индексировать его до тех пор, пока субграф не будет полностью поддерживаться на уровне L2. -1. Initiate signal transfer on Ethereum mainnet +### Публикация и версионность на L2 такие же, как и в основной сети Ethereum? -2. Specify an L2 Curator address\* +Да. При публикации в Subgraph Studio выберите Arbitrum One в качестве публикуемой сети. В Studio будет доступна последняя конечная точка, которая указывает на последнюю обновленную версию субграфа. -3. Wait 20 minutes for confirmation +### Будет ли курирование моего субграфа перемещено вместе с моим субграфом? -\*If necessary - i.e. you are using a contract address. +Если Вы выбрали автомиграцию сигнала, то 100% Вашего собственного кураторства переместится вместе с Вашим субграфом на Arbitrum One. Весь сигнал курирования субграфа будет преобразован в GRT в момент переноса, а GRT, соответствующий Вашему сигналу курирования, будет использован для обработки сигнала на субграфе L2. -## How will I know if the subgraph I curated has moved to L2? +Другие кураторы могут выбрать, снять ли им свою долю GRT, или также перевести ее в L2 для обработки сигнала на том же субграфе. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Могу ли я переместить свой субграф обратно в основную сеть Ethereum после переноса? -## What if I do not wish to move my curation to L2? +После переноса Ваша версия данного субграфа в основной сети Ethereum станет устаревшей. Если Вы захотите вернуться в основную сеть, Вам нужно будет переразвернуть и снова опубликовать субграф в основной сети. Однако перенос обратно в основную сеть Ethereum настоятельно не рекомендуется, так как вознаграждения за индексирование в конечном итоге будут полностью распределяться на Arbitrum One. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Зачем мне необходимо использовать мост ETH для завершения переноса? -## How do I know my curation successfully transferred? +Стоимость комиссии сети в Arbitrum One осуществляется при использовании моста для ETH (т.е. ETH, который был перемещен через мост в Arbitrum One). Однако по сравнению с основной сетью Ethereum стоимость комиссии сети Arbitrum One значительно ниже. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +## Делегирование -## Can I transfer my curation on more than one subgraph at a time? +### Как перенести мое делегирование? -There is no bulk transfer option at this time. + -## Indexer Stake +Для переноса делегирования необходимо выполнить следующие действия: -## How do I transfer my stake to Arbitrum? +1. Инициировать перенос делегирования в основной сети Ethereum +2. Подождать 20 минут для получения подтверждения +3. Подтвердить перенос делегирования на Arbitrum -To transfer your stake, you will need to complete the following steps: +\*\*\*\*Для завершения передачи делегирования на Arbitrum необходимо подтвердить транзакцию. Этот шаг должен быть выполнен в течение 7 дней, иначе делегирование может быть потеряно. В большинстве случаев этот этап выполняется автоматически, однако в случае скачка цен на комиссию сети в Arbitrum может потребоваться ручное подтверждение. Если в ходе этого процесса возникнут какие-либо проблемы, Вам будет оказана помощь: обращайтесь в службу поддержки по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). -1. Initiate stake transfer on Ethereum mainnet +### Что произойдет с моими вознаграждениями, если я инициирую перенос с открытым распределением в основной сети Ethereum? -2. Wait 20 minutes for confirmation +Если индексатор, которому Вы делегируете полномочия, все еще работает на L1, то при переносе на Arbitrum Вы потеряете все вознаграждения за делегирование от открытых распределений в основной сети Ethereum. Это означает, что Вы потеряете вознаграждение максимум за последний 28-дневный период. Если Вы планируете осуществить перенос сразу после того, как индексатор закроет распределение, Вы можете быть уверены, что это произойдет с минимальными затратами. Если у Вас есть канал связи с индексатором (индексаторами), обсудите с ними оптимальное время для переноса. -3. Confirm stake transfer on Arbitrum +### Что произойдет, если индексатора, которому я в данный момент делегирую, нет на Arbitrum One? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Инструмент переноса L2 будет запущен только в том случае, если индексатор, которому Вы делегируете, перенёс свой стейк на Arbitrum. -## Will all of my stake transfer? +### Есть ли у делегаторов возможность делегировать другому индексатору? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Если Вы хотите делегировать другому индексатору, Вы можете перевестись к тому же индексатору на Arbitrum, затем отменить делегирование, и дождаться разблокировки. После этого можно выбрать другого активного индексатора для делегирования. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Что делать, если я не могу найти индексатора, которому я делегирую, на L2? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +Инструмент переноса L2 автоматически обнаружит индексатора, которому Вы ранее делегировали. -## How much time do I have to confirm my stake transfer to Arbitrum? +### Смогу ли я смешивать или "распределять" свое делегирование новому или нескольким индексаторам вместо предыдущего индексатора? -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +Инструмент переноса L2 всегда будет перемещать делегацию на тот же индексатор, на который она была делегирована ранее. После перемещения на L2 можно отменить делегирование, дождаться разблокировки и принять решение о разделении своей делегации. -## What if I have open allocations? +### Попадаю ли я в период ограничения или могу сразу же вывести средства после использования инструмента передачи делегирования L2? -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +Инструмент переноса позволяет немедленно перейти на L2. Если Вы хотите отменить перенос, Вам придется дождаться периода разблокировки. Однако если индексатор перевел весь свой стейк на L2, то можно сразу же вывести средства на основную сеть Ethereum. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Могут ли мои вознаграждения пострадать, если я не перенесу свою делегацию? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +Предполагается, что в будущем все сетевые участники перейдут в Arbitrum One. -## How long will it take to transfer my stake? +### Сколько времени требуется для завершения переноса моей делегации на L2? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +Для переноса делегации требуется 20-минутное подтверждение. Обратите внимание, что по истечении 20-минутного периода Вы должны вернуться и завершить 3-й шаг процесса передачи в течение 7 дней. Если этого не сделать, то делегация может быть потеряна. Обратите внимание, что в большинстве случаев инструмент передачи выполнит этот шаг автоматически. В случае неудачной автопопытки Вам придется выполнить его вручную. Если в ходе этого процесса возникнут какие-либо проблемы, не волнуйтесь, мы всегда готовы помочь: обращайтесь к нам по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). -## Do I have to index on Arbitrum before I transfer my stake? +### Могу ли я перевести свою делегацию, если я использую вестинговый контракт GRT / кошелек с блокировкой токенов? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +Да! Процесс немного отличается, поскольку вестинговые контракты не могут пересылать ETH, необходимые для оплаты стоимости комиссии сети на L2, поэтому их необходимо пополнить заранее. Если Ваш вестинговый контракт не полностью вестирован, Вам также придется сначала инициализировать аналогичный вестинговый контракт на L2, и Вы сможете перевести стейк только на этот вестинговый контракт на L2. Пользовательский интерфейс Explorer может подсказать Вам этот процесс, если Вы подключились к Explorer с помощью вестингового закрытого кошелька. -## Can Delegators move their delegation before I move my indexing stake? +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Существует ли комиссия за делегацию? -## Delegation +Нет. Полученные токены на L2 делегируются указанному индексатору от имени указанного делегатора без взимания пошлины за делегирование. -## How do I transfer my delegation? +### Будут ли перенесены мои нереализованные реварды при передаче моей делегации? -To transfer your delegation, you will need to complete the following steps: +Да! Единственные вознаграждения (реварды), которые нельзя передать, — это те, которые предназначены для открытых аллокаций, поскольку они не существуют до тех пор, пока индексатор не закроет аллокации (обычно каждые 28 дней). Если Вы делегировали свои средства на протяжении некоторого времени, это, скорее всего, лишь небольшая часть вознаграждений. -1. Initiate delegation transfer on Ethereum mainnet +На уровне смарт-контракта нереализованные вознаграждения уже являются частью баланса Вашего делегирования, поэтому они будут перенесены, когда Вы перенесете делегирование на уровень L2. -2. Wait 20 minutes for confirmation +### Обязателен ли перенос делегирования на уровень L2? Есть ли крайний срок? -3. Confirm delegation transfer on Arbitrum +Перенос делегирования на уровень L2 не является обязательным, но вознаграждение за индексирование увеличивается на уровне L2 в соответствии с графиком, описанным в [GIP-0052] \(https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). В конечном итоге, если Совет продолжит одобрять увеличение, все вознаграждения будут распределены на уровне L2, и индексаторы и делегаторы не будут получать вознаграждения за индексацию на уровне L1. -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Если я передам полномочия индексатору, который уже перевел ставку на уровень L2, перестану ли я получать вознаграждения на уровне L1? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Многие индексаторы переносят ставки постепенно, поэтому индексаторы на уровне L1 по-прежнему будут получать вознаграждения и комиссии на уровне L1, которые затем распределяются между делегаторами. Как только индексатор перенесет всю свою ставку, он прекратит работу на уровне L1, а делегаторы больше не будут получать вознаграждения, если не перейдут на уровень L2. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +В конечном итоге, если Совет продолжит утверждать увеличение вознаграждений за индексацию на уровне L2, все вознаграждения будут распределяться на уровне L2, а индексаторы и делегаторы на уровне L1 перестанут получать вознаграждения за индексацию. -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +### Я не вижу кнопки для передачи моей делегации. Почему? -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +Вероятно, Ваш индексатор еще не использовал инструменты передачи ставки на уровень L2. -## Do Delegators have the option to delegate to another Indexer? +Если Вы можете связаться с индексатором, Вы можете предложить ему использовать Transfer Tools для L2, чтобы делегаторы могли передавать делегирования на адрес своего индексатора на уровне L2. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Мой индексатор есть на Arbitrum, но я не вижу в своем профиле кнопки передачи делегирования. Почему? -## What if I can't find the Indexer I'm delegating to on L2? +​Возможно, индексатор настроил операции на уровне L2, но не использовал инструменты передачи L2 для передачи ставки. Следовательно, смарт-контракты уровня L1 не будут знать об адресе индексатора на уровне L2. Если Вы можете связаться с индексатором, Вы можете предложить ему использовать инструмент передачи, чтобы делегаторы могли передавать делегирование на адрес своего индексатора на уровне L2. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Могу ли я передать свое делегирование на уровень L2, если я начал процесс отмены делегирования и еще не отозвал его? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Нет. Если Ваше делегирование находится в стадии разморозки, Вам придется подождать 28 дней и затем отозвать его. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +Токены, которые не делегируются, «заблокированы» и, следовательно, не могут быть переданы на уровень L2. -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +## Сигнал курирования -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Как мне перенести свое курирование? -## Can my rewards be negatively impacted if I do not transfer my delegation? +Для переноса своего курирования необходимо выполнить следующие действия: -It is anticipated that all network participation will move to Arbitrum One in the future. +1. Инициировать перенос сигнала в основной сети Ethereum -## How long does it take to complete the transfer of my delegation to L2? +2. Указать L2 адрес куратора\* -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +3. Подождать 20 минут для получения подтверждения -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +\*При необходимости - т.е. если Вы используете контрактный адрес. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Как я узнаю, что курируемый мною субграф перешел в L2? -## Is there any delegation tax? +При просмотре страницы сведений о субграфе появится баннер, уведомляющий о том, что данный субграф был перенесен. Вы можете следовать подсказке, чтобы перенести свое курирование. Эту информацию можно также найти на странице сведений о субграфе любого перемещенного субграфа. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Что делать, если я не хочу переносить свое курирование в L2? -## Vesting Contract Transfer +Когда субграф устаревает, у Вас есть возможность отозвать свой сигнал. Аналогично, если субграф переместился в L2, Вы можете выбрать, отозвать свой сигнал из основной сети Ethereum или отправить его в L2. -## How do I transfer my vesting contract? +### Как я узнаю, что мое курирование успешно перенесено? -To transfer your vesting, you will need to complete the following steps: +Информация о сигнале будет доступна через Explorer примерно через 20 минут после запуска инструмента переноса L2. -1. Initiate the vesting transfer on Ethereum mainnet +### Можно ли перенести курирование на несколько субграфов одновременно? -2. Wait 20 minutes for confirmation +В настоящее время опция массового переноса отсутствует. -3. Confirm vesting transfer on Arbitrum +## Стейк индексатора -## How do I transfer my vesting contract if I am only partially vested? +### Как мне перенести свой стейк в Arbitrum? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +> Отказ от ответственности: если Вы в настоящее время выводите из стейкинга какую-либо часть своего GRT на своем индексаторе, Вы не сможете использовать инструменты передачи L2. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Чтобы передать свою ставку, Вам необходимо выполнить следующие шаги: -4. Withdraw any remaining ETH from the transfer tool contract +1. Инициировать передачу ставок в сети Ethereum -## How do I transfer my vesting contract if I am fully vested? +2. Подождать 20 минут для получения подтверждения -For those that are fully vested, the process is similar: +3. Подтвердить передачу ставки на Arbitrum -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +\*Обратите внимание, что Вы должны подтвердить перевод в течение 7 дней, в противном случае Ваша ставка может быть потеряна. В большинстве случаев этот шаг выполняется автоматически, но может потребоваться подтверждение вручную, если на Arbitrum произойдет скачок цен стоимости комиссии сети. Если во время этого процесса возникнут какие-либо проблемы, Вам помогут ресурсы: обратитесь в службу поддержки по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). -2. Set your L2 address with a call to the transfer tool contract +### Вся ли моя ставка будет отправлена? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +Вы можете выбрать, какую часть своей ставки перевести. Если Вы решите перевести всю свою ставку сразу, вам необходимо сначала закрыть все открытые распределения. -4. Withdraw any remaining ETH from the transfer tool contract +Если Вы планируете передавать части своей доли в рамках нескольких транзакций, вы всегда должны указывать один и тот же адрес бенефициара. -## Can I transfer my vesting contract to Arbitrum? +Примечание. При первом использовании инструмента перевода Вы должны соответствовать требованиям к минимальной ставке на L2. Индексаторы должны отправить минимум 100 000 GRT (при первом вызове этой функции). Если вы оставляете часть ставки на L1, она также должна превышать минимум 100 000 GRT и быть достаточной (вместе с вашими делегированиями) для покрытия Ваших открытых распределений. -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +### Сколько времени мне нужно, чтобы подтвердить перевод своей ставки в Arbitrum? -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +\*\*\* Вам необходимо подтвердить транзакцию, чтобы завершить перевод ставки на Arbitrum. Этот шаг необходимо выполнить в течение 7 дней, иначе ставка может быть потеряна. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +### Что делать, если у меня есть открытые распределения? -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Если Вы не отправляете всю свою ставку, инструмент передачи L2 проверит, что в основной сети Ethereum остается как минимум 100 000 GRT, а вашей оставшейся ставки и делегирования достаточно для покрытия любых открытых распределений. Возможно, Вам придется закрыть открытые распределения, если ваш баланс GRT не покрывает минимальные суммы + открытые распределения. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### При использовании инструментов переноса нужно ли ждать 28 дней для разблокировки в основной сети Ethereum перед переносом? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +Нет, Вы можете перевести свой стейк на L2 сразу же, нет необходимости снимать стейк и ждать, прежде чем использовать инструмент переноса. 28-дневное ожидание действует только в том случае, если Вы хотите вывести средства обратно на свой кошелек, в основной сети Ethereum или на L2. -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Сколько времени займет перевод моего стейка? -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +Инструмент переноса L2 займет около 20 минут для завершения переноса Вашего стейка. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Должен ли я индексироваться на Arbitrum перед тем, как перенести стейк? -## Can I specify a different beneficiary for my vesting contract on L2? +Вы можете эффективно перенести свой стейк до начала настройки индексации, но Вы не сможете претендовать на вознаграждение на L2 до тех пор, пока не распределите субграфы на L2, не проиндексируете их, а также пока не представите POI. -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### Могут ли делегаторы перемещать свои делегации до того, как я перемещу свой индексируемый стейк? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Нет, для того чтобы делегаторы могли перенести делегированные ими GRT в Arbitrum, индексатор, которому они делегируют, должен быть активен в L2. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### Могу ли я перенести свой стейк, если я использую вестинговый договор GRT / кошелек с блокировкой токенов? -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Да! Процесс немного отличается, поскольку вестинговые контракты не могут пересылать ETH, необходимые для оплаты стоимости комиссии сети на L2, поэтому их необходимо пополнить заранее. Если Ваш вестинговый контракт не полностью вестирован, Вам также придется сначала инициализировать аналогичный вестинговый контракт на L2, и Вы сможете перевести стейк только на этот вестинговый контракт на L2. Пользовательский интерфейс Explorer может подсказать Вам этот процесс, если Вы подключились к Explorer с помощью вестингового закрытого кошелька. -This allows you to transfer your stake or delegation to any L2 address. +### У меня уже есть ставка на L2. Должен ли я всё же отправлять 100 000 GRT, когда впервые использую инструменты перевода? -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +Да. Смарт-контракты уровня L1 не будут знать о Вашей ставке на уровне L2, поэтому они потребуют от Вас перевода не менее 100 000 GRT при первом переводе. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### Могу ли я перевести свою ставку на L2, если нахожусь в процессе анстейкинга GRT? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +Нет. Если какая-то часть Вашей ставки находится в стадии разморозки, Вам придется подождать 28 дней и вывести ее, прежде чем Вы сможете перевести ставку. Токены, которые отправляются в стейкинг, «заблокированы» и предотвращают любые переводы или стейкинг в L2. -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +## Перенос вестингового контракта -2. Revoke protocol access to the vesting contract (needed for the next step) +### Как я могу перенести свой вестинговый контракт? -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Для переноса Вашего вестинга необходимо выполнить следующие действия: -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +1. Инициировать перенос вестинга в основной сети Ethereum -5. Wait 20 minutes for confirmation +2. Подождать 20 минут для получения подтверждения -6. Confirm the balance transfer on L2 +3. Подтвердить перенос вестинга на Arbitrum -\*If necessary - i.e. you are using a contract address. +### Как перенести вестинговый контракт, если я только частично вестирован? -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + -## Can I move my vesting contract back to L1? +1. Внесите некоторое количество ETH в контракт инструмента переноса (UI может помочь оценить разумную сумму) -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +2. Отправьте часть заблокированных GRT через контракт инструмента переноса на L2, чтобы инициализировать блокировку вестинга L2. При этом также будет установлен адрес бенефициара L2. -## Why do I need to move my vesting contract to begin with? +3. Отправьте свой стейк/делегацию в L2 через "заблокированные" функции инструмента переноса в стейкинговом контракте L1. -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +4. Выведите оставшиеся ETH из контракта инструмента переноса -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Как перенести вестинговый контракт, если я полностью вестирован? -This is not a possibility. You can move funds back to L1 and withdraw them there. + -## What if I don't want to move my vesting contract to L2? +Для тех, кто полностью вестирован, процесс аналогичен: -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +1. Внесите некоторое количество ETH в контракт инструмента переноса (UI может помочь оценить разумную сумму) + +2. Установите адрес L2, вызвав контракт инструмента переноса данных + +3. Отправьте свои ставку/делегацию в L2 через "заблокированные" функции инструмента переноса в стейкинговом контракте L1. + +4. Выведите оставшиеся ETH из контракта инструмента переноса + +### Могу ли я перенести свой вестинговый контракт на Arbitrum? + +Вы можете перенести баланс GRT своего вестингового контракта на вестинговый контракт в L2. Это является необходимым условием для переноса стейка или делегации с вестингового контракта на L2. Вестинговый контракт должен иметь ненулевую сумму GRT (при необходимости можно перевести на него небольшую сумму, например 1 GRT). + +При переносе GRT с вестингового контракта L1 на L2 Вы можете выбрать сумму перевода и делать это столько раз, сколько пожелаете. Вестинговый контракт L2 будет инициализирован при первом переводе GRT. + +Переносы осуществляются с помощью инструмента переноса Transfer Tool, который будет виден в профиле Explorer при подключении к счету вестингового контракта. + +Обратите внимание, что Вы не сможете высвободить/изъять GRT из вестингового контракта L2 до окончания срока вестинга, когда Ваш контракт будет полностью вестирован. Если Вам необходимо вывести GRT до этого момента, Вы можете перевести их обратно на вестинговый контракт L1, используя другой инструмент переноса, доступный для этой цели. + +Если Вы не перенесли на L2 баланс вестингового контракта, а Ваш вестинговый контракт полностью вестирован, то Вам не следует переводить свой вестинговый контракт на L2. Вместо этого Вы можете воспользоваться инструментами переноса, чтобы установить адрес кошелька L2 и напрямую перенести свой стейк или делегацию на этот обычный кошелек на L2. + +### Я использую свой вестинговый контракт для размещения стейка в основной сети. Могу ли я перенести свой стейк в Arbitrum? + +Да, но если Ваш контракт все еще вестинговый, Вы можете перевести стейк только таким образом, чтобы он принадлежал Вашему вестинговому контракту L2. Сначала необходимо инициализировать этот контракт L2, переведя часть баланса GRT с помощью инструмента переноса вестингового контракта в Explorer. Если Ваш контракт полностью вестирован, Вы можете перевести свой стейк на любой адрес в L2, но для этого необходимо предварительно установить его и внести некоторое количество ETH для инструмента переноса L2, чтобы оплатить комиссию сети в L2. + +### Я использую свой вестинговый контракт для делегирования в основной сети. Могу ли я перенести свои делегации в Arbitrum? + +Да, но если Ваш контракт все еще вестинговый, Вы можете перевести делегацию только таким образом, чтобы она принадлежала Вашему вестинговому контракту L2. Сначала необходимо инициализировать этот контракт в L2, переведя часть баланса GRT с помощью инструмента переноса вестингового контракта в Explorer. Если Ваш контракт полностью вестирован, Вы можете перевести свою делегацию на любой адрес в L2, но для этого необходимо предварительно установить его и внести некоторое количество ETH для инструмента переноса L2, чтобы оплатить стоимость газа в L2. + +### Могу ли я указать другого бенефициара в моем вестинговом контракте на L2? + +Да, при первом переводе баланса и настройке вестингового контракта L2 Вы можете указать бенефициара L2. Убедитесь, что этот бенефициар является кошельком, который может совершать операции на Arbitrum One, т.е. это должен быть EOA или кошелёк с мультиподписью, развернутый на Arbitrum One. + +Если Ваш контракт полностью вестирован, то Вам не нужно устанавливать вестинговый контракт на L2; вместо этого Вы установите адрес кошелька L2, который будет являться кошельком, принимающим Ваш стейк или делегацию в Arbitrum. + +### Мой контракт полностью вестирован. Могу ли я перевести свой стейк или делегацию на другой адрес, который не является вестинговым контрактом L2? + +Да. Если Вы не перевели на L2 баланс вестингового контракта, и Ваш вестинговый контракт полностью вестирован, то Вам не следует переводить свой вестинговый контракт на L2. Вместо этого Вы можете воспользоваться инструментами переноса, чтобы установить адрес кошелька L2 и напрямую перевести свой стейк или делегацию на этот обычный кошелек на L2. + +Это позволяет переносить свой стейк или делегацию на любой адрес L2. + +### Мой вестинговый контракт все еще находится в состоянии вестинга. Как мне перенести остаток по вестинговому контракту на L2? + +Эти шаги применимы только в том случае, если Ваш контракт все еще находится в состоянии вестинга, или если Вы уже использовали этот процесс, когда Ваш контракт находился в состоянии вестинга. + +Чтобы перенести свой вестинговый контракт в L2, необходимо отправить весь баланс GRT на L2 с помощью инструментов переноса, что приведет к инициализации вестингового контракта в L2: + +1. Внести некоторое количество ETH на контракт инструмента переноса (они будут использованы для оплаты комиссии сети L2) + +2. Отозвать доступ протокола к вестинговому контракту (необходимо для следующего шага) + +3. Предоставить протоколу доступ к вестинговому контракту (позволит Вашему контракту взаимодействовать с инструментом переноса) + +4. Указать адрес бенефициара L2\* и инициировать перенос баланса в основной сети Ethereum + +5. Подождать 20 минут для получения подтверждения + +6. Подтвердите перенос баланса на L2 + +\*При необходимости - т.е. если Вы используете контрактный адрес. + +\*\*\*\*Для завершения передачи баланса на Arbitrum необходимо подтвердить транзакцию. Этот шаг должен быть выполнен в течение 7 дней, иначе баланс может быть потерян. В большинстве случаев этот этап выполняется автоматически, однако в случае скачка цен стоимости комиссии сети в Arbitrum может потребоваться ручное подтверждение. Если в ходе этого процесса возникнут какие-либо проблемы, Вам будет оказана помощь: обращайтесь в службу поддержки по адресу support@thegraph.com или в [Discord](https://discord.gg/graphprotocol). + +### В моем договоре о передаче прав указано 0 GRT, поэтому я не могу его передать. Почему это происходит и как это исправить? + +​Чтобы инициализировать Ваш контракт на передачу прав L2, Вам необходимо перевести ненулевую сумму GRT в L2. Это требуется для моста Arbitrum GRT, который используется инструментами передачи L2. GRT должен поступать из баланса контракта о передаче прав, поэтому он не включает в себя находящиеся в стейкинге или делегированные GRT. + +Если Вы отправили в стейкинг или делегировали все свои GRT из контракта на передачу прав, Вы можете вручную отправить небольшую сумму, например 1 GRT, на адрес контракта на передачу прав из любого другого места (например, из другого кошелька или биржи). + +### Я использую договор о передаче прав на ставку или делегирование, но не вижу кнопки для передачи своей ставки или делегирования на уровень L2. Что мне делать? + +Если Ваш договор о передаче прав еще не завершен, Вам необходимо сначала создать договор о передаче прав L2, который получит Вашу ставку или делегирование на L2. Этот контракт о передаче прав не позволит передавать токены в L2 до конца графика перехода, но позволит Вам перевести GRT обратно в контракт о передаче прав L1 для передачи там. + +При подключении к контракту о передаче прав в Explorer Вы увидите кнопку для инициализации Вашего контракта о передаче прав L2. Сначала выполните этот процесс, и затем в своем профиле Вы увидите кнопки для передачи вашей ставки или делегирования. + +### Если я инициализирую свой контракт о передаче прав уровня L2, переведёт ли это также автоматически моё делегирование на уровень L2? + +Нет, инициализация Вашего контракта о передаче прав L2 является обязательным условием для передачи ставки или делегирования из контракта о передаче прав, но Вам все равно придется передавать их отдельно. + +После инициализации контракта о передаче прав L2 в своем профиле Вы увидите баннер с предложением передать свою долю или делегирование. + +### Могу ли я перевести свой вестинговый контракт обратно в L1? + +Этого делать не нужно, поскольку Ваш вестинговый контракт по-прежнему находится в L1. Когда Вы используете инструменты переноса, Вы просто создаете новый контракт в L2, связанный с Вашим вестинговым контрактом в L1, и можете отправлять GRT туда и обратно между ними. + +### Почему в принципе необходимо переносить вестинговый контракт? + +Вам необходимо установить вестинговый контракт в L2, чтобы этот счет мог владеть Вашим стейком или делегированием на L2. В противном случае у Вас не будет возможности перевести стейк/делегирование на L2, не "выходя" из вестингового контракта. + +### Что произойдет, если я попытаюсь обналичить деньги по своему контракту, который только частично вестирован? Возможно ли это? + +Такой возможности нет. Вы можете перевести средства обратно на L1 и вывести их оттуда. + +### Что делать, если я не хочу переносить свой вестинговый контракт в L2? + +Вы можете продолжать стейкать/делегировать в L1. Со временем Вы можете рассмотреть возможность перехода на L2, чтобы получать там вознаграждения по мере масштабирования протокола на Arbitrum. Обратите внимание, что эти инструменты переноса предназначены для вестинговых контрактов, которые разрешают стейкать и делегировать в протоколе. Если Ваш контракт не позволяет стейкать или делегировать, или является отзывным, то инструмент переноса недоступен. Вы по-прежнему сможете вывести свои GRT из L1, когда они будут доступны. diff --git a/website/pages/ru/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ru/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..f77c7f686ac9 100644 --- a/website/pages/ru/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ru/arbitrum/l2-transfer-tools-guide.mdx @@ -1,165 +1,165 @@ --- -title: L2 Transfer Tools Guide +title: Руководство по инструментам переноса L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +The Graph упростил переход на L2 в Arbitrum One. Для каждого участника протокола существует набор инструментов переноса L2, чтобы сделать переход на L2 бесшовным для всех участников сети. Эти инструменты потребуют от Вас выполнения определенного набора шагов в зависимости от того, что Вы передаете. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Ответы на некоторые частые вопросы об этих инструментах можно найти в [Часто задаваемые вопросы по инструментам переноса L2](/arbitrum/l2-transfer-tools-faq). Часто задаваемые вопросы содержат подробные объяснения того, как использовать инструменты, как они работают и что следует учитывать при их использовании. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Как перенести свой субграф в Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Преимущества переноса Ваших субграфов -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +Сообщество и разработчики ядра The Graph [готовились](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) к переходу на Arbitrum в течение прошлого года. Arbitrum, блокчейн уровня 2 или «L2», наследует безопасность от Ethereum, но обеспечивает значительно более низкую комиссию сети. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Когда Вы публикуете или обновляете свой субграф до The Graph Network, Вы взаимодействуете со смарт-контрактами по протоколу, и для этого требуется проплачивать комиссию сети с помощью ETH. После перемещения Ваших субграфов в Arbitrum, любые будущие обновления Вашего субграфа потребуют гораздо более низких сборов за комиссию сети. Более низкие сборы и тот факт, что кривые связи курирования на L2 ровные, также облегчают другим кураторам курирование Вашего субграфа, увеличивая вознаграждение для индексаторов в Вашем субграфе. Эта менее затратная среда также упрощает индексацию и обслуживание Вашего субграфа. В ближайшие месяцы вознаграждения за индексацию в Arbitrum будут увеличиваться, а в основной сети Ethereum уменьшаться, поэтому все больше и больше индексаторов будут переводить свои стейки и настраивать операции на L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Для передачи субграфа в Arbitrum используется мост Arbitrum GRT, который, в свою очередь, использует собственный мост Arbitrum для отправки субграфа на L2. «Перенос» отменяет поддержку субграфа в основной сети и отправляет информацию для повторного создания субграфа на L2 с использованием моста. Он также будет включать сигнал GRT владельца субграфа, который должен быть больше нуля, чтобы мост смог принять передачу. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Когда Вы решите передать субграф, весь сигнал курирования подграфа будет преобразован в GRT. Это эквивалентно «прекращению поддержки» субграфа в основной сети. GRT, соответствующие Вашему кураторству, будут отправлен на L2 вместе с субграфом, где они будут использоваться для производства сигнала от Вашего имени. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Другие Кураторы могут выбрать, вывести ли свою долю GRT или также перевести ее в L2 для производства сигнала на том же субграфе. Если владелец субграфа не перенесет свой субграф в L2 и вручную аннулирует его с помощью вызова контракта, то Кураторы будут уведомлены и смогут отозвать свое курирование. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Индексаторы больше не будут получать вознаграждение за индексирование субграфа, как только субграф будет перенесён, так как всё курирование конвертируется в GRT. Однако будут индексаторы, которые 1) продолжат обслуживать переданные субграфы в течение 24 часов и 2) немедленно начнут индексировать субграф на L2. Поскольку эти индексаторы уже проиндексировали субграф, не нужно будет ждать синхронизации субграфа, и можно будет запросить субграф L2 практически сразу. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Запросы к субграфу L2 необходимо будет выполнять по другому URL-адресу (на `arbitrum-gateway.thegraph.com`), но URL-адрес L1 будет продолжать работать в течение как минимум 48 часов. После этого шлюз L1 будет перенаправлять запросы на шлюз L2 (на некоторое время), но это увеличит задержку, поэтому рекомендуется как можно скорее переключить все Ваши запросы на новый URL-адрес. -## Choosing your L2 wallet +## Выбор Вашего кошелька L2 -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Когда Вы опубликовали свой субграф в основной сети, Вы использовали подключенный кошелек для его создания, и этот кошелек обладает NFT, который представляет этот субграф и позволяет Вам публиковать обновления. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +При переносе субграфа в Arbitrum Вы можете выбрать другой кошелек, которому будет принадлежать этот NFT субграфа на L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Если Вы используете «обычный» кошелек, такой как MetaMask (Externally Owned Account или EOA, то есть кошелек, который не является смарт-контрактом), тогда это необязательно, и рекомендуется сохранить тот же адрес владельца, что и в L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Если Вы используете смарт-контрактный кошелек, такой как кошелёк с мультиподписью (например, Safe), то выбор другого адреса кошелька L2 является обязательным, так как, скорее всего, эта учетная запись существует только в основной сети, и Вы не сможете совершать транзакции в сети Arbitrum с помощью этого кошелька. Если Вы хотите продолжать использовать кошелек смарт-контрактов или мультиподпись, создайте новый кошелек на Arbitrum и используйте его адрес в качестве владельца L2 Вашего субграфа. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Очень важно использовать адрес кошелька, которым Вы управляете и с которого можно совершать транзакции в Arbitrum. В противном случае субграф будет потерян и его невозможно будет восстановить.** -## Preparing for the transfer: bridging some ETH +## Подготовка к переносу: использование моста с некоторым количеством ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Передача субграфа включает в себя отправку транзакции через мост, а затем выполнение другой транзакции в Arbitrum. Первая транзакция использует ETH в основной сети и включает некоторое количество ETH для оплаты комиссии сети при получении сообщения на уровне L2. Однако, если этого количества будет недостаточно, Вам придется повторить транзакцию и оплатить комиссию сети непосредственно на L2 (это «Шаг 3: Подтверждение перевода» ниже). Этот шаг **должен быть выполнен в течение 7 дней после начала переноса**. Более того, вторая транзакция («Шаг 4: Завершение перевода на L2») будет выполнена непосредственно на Arbitrum. В связи с этим Вам понадобится некоторое количество ETH на кошельке Arbitrum. Если Вы используете учетную запись с мультиподписью или смарт-контрактом, ETH должен находиться в обычном (EOA) кошельке, который Вы используете для выполнения транзакций, а не в самом кошельке с мультиподписью. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Вы можете приобрести ETH на некоторых биржах и вывести его напрямую на Arbitrum, или Вы можете использовать мост Arbitrum для отправки ETH из кошелька основной сети на L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Поскольку плата за комиссию сети в Arbitrum ниже, Вам понадобится лишь небольшая сумма. Рекомендуется начинать с низкого порога (например, 0,01 ETH), чтобы Ваша транзакция была одобрена. -## Finding the subgraph Transfer Tool +## Поиск инструмента переноса субграфа -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Вы можете найти инструмент переноса L2, когда просматриваете страницу своего субграфа в Subgraph Studio: -![transfer tool](/img/L2-transfer-tool1.png) +![инструмент переноса](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +Он также доступен в Explorer, если Вы подключены к кошельку, которому принадлежит субграф, и на странице этого субграфа в Explorer: -![Transferring to L2](/img/transferToL2.png) +![Перенос на L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +При нажатии на кнопку «Перенести на L2» откроется инструмент переноса, в котором Вы сможете начать этот процесс. -## Step 1: Starting the transfer +## Шаг 1: Запуск перевода -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Прежде чем начать перенос, Вы должны решить, какому адресу будет принадлежать субграф на L2 (см. «Выбор кошелька L2» выше), также настоятельно рекомендуется иметь некоторое количество ETH для оплаты комиссии сети за соединение мостом с Arbitrum (см. «Подготовка к переносу: использование моста с некоторым количеством ETH" выше). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Также обратите внимание, что для передачи субграфа требуется наличие ненулевого количества сигнала в субграфе с той же учетной записью, которая владеет субграфом; если Вы не просигнализировали на субграфе, Вам придется добавить немного монет для курирования (достаточно добавить небольшую сумму, например 1 GRT). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +После открытия инструмента переноса Вы сможете ввести адрес кошелька L2 в поле «Адрес получающего кошелька» — **убедитесь, что Вы ввели здесь правильный адрес**. После нажатия на «Перевод субграфа», Вам будет предложено выполнить транзакцию в Вашем кошельке (обратите внимание, что некоторое количество ETH включено для оплаты газа L2); это инициирует передачу и отменит Ваш субграф на L1 (см. «Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса» выше для получения более подробной информации о том, что происходит за кулисами). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Если Вы выполните этот шаг, ** убедитесь в том, что Вы завершили шаг 3 менее чем за 7 дней, иначе субграф и Ваш сигнал GRT будут утеряны.** Это связано с тем, как в Arbitrum работает обмен сообщениями L1-L2: сообщения, которые отправляются через мост, представляют собой «билеты с возможностью повторной попытки», которые должны быть выполнены в течение 7 дней, и для первоначального исполнения может потребоваться повторная попытка, если в Arbitrum будут скачки цен комиссии сети. -![Start the trnasfer to L2](/img/startTransferL2.png) +![Запустите перенос на L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Шаг 2: Ожидание перехода субграфа в L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +После того, как Вы начнете передачу, сообщение, которое отправляет Ваш субграф с L1 в L2, должно пройти через мост Arbitrum. Это занимает примерно 20 минут (мост ожидает, пока блок основной сети, содержащий транзакцию, будет «защищен» от потенциальных реорганизаций чейна). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +По истечении этого времени ожидания Arbitrum попытается автоматически выполнить перевод по контрактам L2. -![Wait screen](/img/screenshotOfWaitScreenL2.png) +![Экран ожидания](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## Шаг 3: Подтверждение переноса -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +В большинстве случаев этот шаг будет выполняться автоматически, поскольку комиссии сети L2, включенной в шаг 1, должно быть достаточно для выполнения транзакции, которая получает субграф в контрактах Arbitrum. Однако в некоторых случаях возможно, что скачок цен комиссии сети на Arbitrum приведёт к сбою этого автоматического выполнения. В этом случае «тикет», который отправляет ваш субграф на L2, будет находиться в ожидании и потребует повторной попытки в течение 7 дней. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +В этом случае Вам нужно будет подключиться с помощью кошелька L2, в котором есть некоторое количество ETH в сети Arbitrum, переключить сеть Вашего кошелька на Arbitrum и нажать «Подтвердить перевод», чтобы повторить транзакцию. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +![Подтвердите перенос на L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## Шаг 4: Завершение переноса в L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +На данный момент Ваш субграф и GRT получены в Arbitrum, но субграф еще не опубликован. Вам нужно будет подключиться с помощью кошелька L2, который Вы выбрали в качестве принимающего кошелька, переключить сеть Вашего кошелька на Arbitrum и нажать «Опубликовать субграф». -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Опубликуйте субграф](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Дождитесь публикации субграфа](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Субграф будет опубликован, и индексаторы, работающие на Arbitrum, смогут начать его обслуживание. Он также будет создавать сигнал курирования, используя GRT, переданные из L1. -## Step 5: Updating the query URL +## Шаг 5. Обновление URL-адреса запроса -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Ваш субграф успешно перенесен в Arbitrum! Для запроса субграфа новый URL будет следующим: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Обратите внимание, что идентификатор субграфа в Arbitrum будет отличаться от того, который был у Вас в основной сети, но Вы всегда можете найти его в Explorer или Studio. Как упоминалось выше (см. «Понимание того, что происходит с сигналом, Вашим субграфом L1 и URL-адресами запроса»), старый URL-адрес L1 будет поддерживаться в течение некоторого времени, но Вы должны переключить свои запросы на новый адрес, как только субграф будет синхронизирован в L2. -## How to transfer your curation to Arbitrum (L2) +## Как перенести свой субграф в Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Понимание того, что происходит с курированием передачи субграфов на L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +Когда владелец субграфа передает субграф в Arbitrum, весь сигнал субграфа одновременно конвертируется в GRT. Это же относится и к "автоматически мигрировавшему" сигналу, т.е. сигналу, который не относится к конкретной версии или развертыванию субграфа, но который следует за последней версией субграфа. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Это преобразование сигнала в GRT аналогично тому, что произошло бы, если бы владелец субграфа объявил его устаревшим на L1. Когда субграф устаревает или переносится, в то же время «сжигается» весь сигнал курирования (с использованием кривой связывания курирования), а полученный GRT сохраняется в смарт-контракте GNS (то есть контракте, который обрабатывает обновления субграфа и сигнал автоматической миграции). Таким образом, каждый куратор этого субграфа имеет право на GRT, пропорционально количеству акций, которыми он владел в этом субграфе. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +Часть этих GRT, принадлежащая владельцу субграфа, отправляется на L2 вместе с субграфом. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +На этом этапе курируемый GRT больше не будет начислять комиссии за запросы, поэтому кураторы могут выбрать: вывести свой GRT или перевести его на тот же субграф на L2, где его можно использовать для создания нового сигнала курирования. Спешить с этим не стоит, так как GRT может храниться неограниченное время, и каждый получит сумму пропорционально своим долям, независимо от того, когда это будет сделано. -## Choosing your L2 wallet +## Выбор Вашего кошелька L2 -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Если Вы решите перевести курируемый GRT на L2, Вы можете выбрать другой кошелек, который будет владеть сигналом курирования на L2. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +Если Вы используете «обычный» кошелек, такой как Metamask (Externally Owned Account или EOA, то есть кошелек, который не является смарт-контрактом), тогда это необязательно, и рекомендуется сохранить тот же адрес куратора, что и в L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Если Вы используете смарт-контрактный кошелек, такой как кошелёк с мультиподписью (например, Safe), то выбор другого адреса кошелька L2 является обязательным, так как, скорее всего, эта учетная запись существует только в основной сети, и Вы не сможете совершать транзакции в сети Arbitrum с помощью этого кошелька. Если Вы хотите продолжать использовать кошелек смарт-контрактов или мультиподпись, создайте новый кошелек на Arbitrum и используйте его адрес в качестве адреса кошелька-получателя на L2. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**Очень важно использовать адрес кошелька, которым Вы управляете и с которого можно совершать транзакции в Arbitrum, в противном случае курирование будет потеряно и его невозможно будет восстановить.** -## Sending curation to L2: Step 1 +## Отправка курирования на L2: шаг 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +Прежде чем начать перенос, Вы должны решить, какой адрес будет владеть курированием на L2 (см. "Выбор кошелька L2" выше), также рекомендуется уже иметь на Arbitrum некоторое количество ETH для газа на случай, если Вам потребуется повторно выполнить отправку сообщения на L2. Вы можете купить ETH на любых биржах и вывести его напрямую на Arbitrum, или использовать мост Arbitrum для отправки ETH из кошелька основной сети на L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) — поскольку комиссии за газ на Arbitrum очень низкие, Вам понадобится небольшая сумма, например, 0.01 ETH, этого, вероятно, будет более чем достаточно. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +Если субграф, который Вы курируете, был перенесен на L2, Вы увидите сообщение в Explorer о том, что Вы курируете перенесённый субграф. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +При просмотре страницы субграфа Вы можете выбрать вывод или перенос курирования. Нажатие на кнопку "Перенести сигнал в Arbitrum", откроет инструмент переноса. -![Transfer signal](/img/transferSignalL2TransferTools.png) +![Перенос сигнала](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +После открытия инструмента переноса Вам может быть предложено добавить немного ETH в Ваш кошелек, если у Вас его нет. Затем Вы сможете ввести адрес кошелька L2 в поле "Адрес кошелька получения" - **убедитесь, что ввели здесь правильный адрес.** При нажатии на Сигнал перевода Вам будет предложено выполнить транзакцию в Вашем кошельке (обратите внимание, что для оплаты газа L2 включена некоторая сумма ETH); это инициирует процесс переноса. -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Если Вы выполните этот шаг, ** убедитесь в том, что Вы завершили шаг 3 менее чем за 7 дней, иначе Ваш сигнал GRT будет утерян.** Это связано с тем, как в Arbitrum работает обмен сообщениями L1-L2: сообщения, отправленные через мост, представляют собой «билеты с возможностью повторной попытки», которые должны быть выполнены в течение 7 дней, и для первоначального исполнения может потребоваться повторная попытка, если в Arbitrum будут скачки цен комиссии сети. -## Sending curation to L2: step 2 +## Отправка курирования на L2: шаг 2 -Starting the transfer: +Запуск переноса: -![Send signal to L2](/img/sendingCurationToL2Step2First.png) +![Отправить сигнал на L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +После того, как Вы начнете перенос, сообщение, которое отправляет Ваше курирование с L1 в L2, должно пройти через мост Arbitrum. Это занимает примерно 20 минут (мост ожидает, пока блок основной сети, содержащий транзакцию, будет «защищен» от потенциальных реорганизаций чейна). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +По истечении этого времени ожидания Arbitrum попытается автоматически выполнить перевод по контрактам L2. -![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) +![Отправка сигнала курирования на L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## Отправка курирования на L2: шаг 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +В большинстве случаев этот шаг выполняется автоматически, поскольку газа L2, включенного в шаг 1, должно быть достаточно для выполнения транзакции, которая получает курирование в контрактах Arbitrum. Однако в некоторых случаях возможно, что скачок цен на газ на Arbitrum приведет к сбою автоматического выполнения. В этом случае «тикет», который отправляет Ваше курирование на L2, будет находиться в состоянии ожидания и потребует повторной попытки в течение 7 дней. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +В этом случае Вам нужно будет подключиться с помощью кошелька L2, в котором есть некоторое количество ETH в сети Arbitrum, переключить сеть Вашего кошелька на Arbitrum и нажать «Подтвердить перевод», чтобы повторить транзакцию. -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) +![Отправить сигнал на L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## Снятие Вашего курирования на L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +Если Вы предпочитаете не отправлять свой GRT на L2 или хотите передать GRT вручную, Вы можете вывести свой курируемый GRT на L1. На баннере на странице субграфа выберите "Вывести сигнал" и подтвердите транзакцию; GRT будет отправлен на Ваш адрес Куратора. diff --git a/website/pages/ru/billing.mdx b/website/pages/ru/billing.mdx index c3448c134a7e..a69a01687d48 100644 --- a/website/pages/ru/billing.mdx +++ b/website/pages/ru/billing.mdx @@ -37,8 +37,12 @@ Banxa позволяет Вам избежать необходимости об ### Добавление токенов GRT с помощью криптокошелька + + > Этот раздел предполагает, что в Вашем криптокошельке уже есть GRT и Вы находитесь в сети Ethereum. Если же у Вас нет GRT, Вы можете узнать, как их получить [здесь](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Перейдите на [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Нажмите на кнопку «Connect Wallet» в правом верхнем углу страницы. Вы будете перенаправлены на страницу выбора кошелька. Выберите свой кошелек и нажмите «Connect». @@ -71,6 +75,8 @@ Banxa позволяет Вам избежать необходимости об ### Добавление токенов GRT с помощью кошелька с мультиподписью + + 1. Перейдите на [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Кликните «Connect Wallet» в правом верхнем углу страницы. Выберите свой кошелек и нажмите «Connect». Если Вы используете [Gnosis-Safe](https://gnosis-safe.io/), Вы сможете подключить как стандартный кошелёк, так и кошелёк с мультиподписью. Затем подпишите соответствующую транзакцию. За это Вам не придётся платить комиссию. @@ -97,11 +103,11 @@ Banxa позволяет Вам избежать необходимости об ## Получение токенов GRT -В этом разделе показано, как оплачивать комиссии за запросы с помощью токенов GRT. +This section will show you how to get GRT to pay for query fees. ### Coinbase -Далее будет представлено пошаговое руководство по приобретению токена GRT на Coinbase. +This will be a step by step guide for purchasing GRT on Coinbase. 1. Перейдите на [Coinbase](https://www.coinbase.com/) и создайте учетную запись. 2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. @@ -117,11 +123,11 @@ Banxa позволяет Вам избежать необходимости об - Введите сумму GRT, которую хотите отправить, и адрес кошелька, на который хотите её отправить. - Нажмите «Continue» и подтвердите транзакцию. -Обратите внимание, что при больших суммах покупки Coinbase может потребовать от Вас подождать 7-10 дней, прежде чем переводить полную сумму на криптокошелек. -Вы можете узнать больше о получении GRT на Coinbase [здесь](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -Далее будет представлено пошаговое руководство по приобретению токена GRT на Binance. +This will be a step by step guide for purchasing GRT on Binance. 1. Перейдите на [Binance](https://www.binance.com/en) и создайте учетную запись. 2. После того как Вы создали учетную запись, Вам нужно будет подтвердить свою личность с помощью процесса, известного как KYC (или Know Your Customer). Это стандартная процедура для всех централизованных или кастодиальных криптобирж. @@ -137,11 +143,11 @@ Banxa позволяет Вам избежать необходимости об - Введите сумму GRT, которую хотите отправить, и адрес кошелька из белого списка, на который Вы хотите её отправить. - Нажмите «Continue» и подтвердите транзакцию. -Вы можете узнать больше о получении GRT на Binance [здесь](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -Так вы можете приобрести GRT на Uniswap. +This is how you can purchase GRT on Uniswap. 1. Перейдите на страницу [Uniswap](https://app.uniswap.org/#/swap) и подключите свой кошелек. 2. Выберите токен, который хотите обменять. Выберите ETH. @@ -151,8 +157,52 @@ Banxa позволяет Вам избежать необходимости об 5. Нажмите «Swap». 6. Подтвердите транзакцию в своем кошельке и дождитесь ее обработки. -Вы можете узнать больше о получении GRT на Uniswap [здесь](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Перейдите на [Coinbase](https://www.coinbase.com/) и создайте учетную запись. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Выберите валюту, которую хотите купить. Выберите токен ETH. +5. Выберите предпочитаемый способ оплаты. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Нажмите «Continue» и подтвердите транзакцию. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Перейдите на [Binance](https://www.binance.com/en) и создайте учетную запись. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Выберите валюту, которую хотите купить. Выберите токен ETH. +5. Выберите предпочитаемый способ оплаты. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Нажмите «Continue» и подтвердите транзакцию. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Arbitrum Bridge -Контракт на выставление счетов предназначен только для моста GRT из основной сети Ethereum в сеть Arbitrum. Если Вы хотите перенести свой GRT из Arbitrum обратно в сеть Ethereum, Вам необходимо использовать [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/ru/chain-integration-overview.mdx b/website/pages/ru/chain-integration-overview.mdx new file mode 100644 index 000000000000..bb89299d40e9 --- /dev/null +++ b/website/pages/ru/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +Прозрачный и основанный на управлении процесс интеграции был разработан для команд блокчейнов, стремящихся к [интеграции с протоколом The Graph] \(https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). Это трехэтапный процесс, как описано ниже. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ru/cookbook/arweave.mdx b/website/pages/ru/cookbook/arweave.mdx index 6678d0b81218..190e4b304b00 100644 --- a/website/pages/ru/cookbook/arweave.mdx +++ b/website/pages/ru/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Создание Подграфов на Arweave --- -> Airweave поддерживается в Graph Node и на размещенном сервисе в режиме в бета-версии: пожалуйста, свяжитесь с нами в [Discord](https://discord.gg/graphprotocol) по любым вопросам о создании подграфов на Arweave! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! В этом руководстве вы узнаете, как создавать и развертывать подграфы для индексации блокчейна Arweave. @@ -83,7 +83,7 @@ dataSources: ``` - Подграфы Arweave представляют новый вид источника данных (`arweave`) -- Сеть должна соответствовать сети на хостинге Graph Node. В размещенном сервисе основной сетью Arweave является `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Источники данных Arweave вводят необязательное поле source.owner, которое является открытым ключом кошелька Arweave Источники данных Arweave поддерживают два типа обработчиков: @@ -150,9 +150,9 @@ class Transaction { Написание отображений подграфа Arweave очень похоже на написание отображений подграфа Ethereum. Для получения дополнительной информации нажмите [здесь](/developing/creating-a-subgraph/#writing-mappings). -## Развертывание подграфа Arweave на размещенном сервисе +## Deploying an Arweave Subgraph on the hosted service -Как только ваш сабграф будет создан на панели управления размещенной службы, вы сможете выполнить развертывание с помощью команды CLI `graph deploy`. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ru/cookbook/grafting.mdx b/website/pages/ru/cookbook/grafting.mdx index b2737b7bbd49..f4bb33a9cdd3 100644 --- a/website/pages/ru/cookbook/grafting.mdx +++ b/website/pages/ru/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: Замените контракт и сохраните его истор В этом руководстве мы рассмотрим базовый вариант использования. Мы заменим существующий договор на идентичный договор (с новым адресом, но тем же кодом). Затем привяжем существующий подграф к «базовому» подграфу, который отслеживает новый контракт. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Создание существующего подграфа Создание подграфов — важная часть The Graph, более подробно описанная [здесь](http://localhost:3000/en/cookbook/quick-start/). Чтобы иметь возможность создавать и развертывать существующий подграф, используемый в этом руководстве, предоставляется следующий репозиторий: diff --git a/website/pages/ru/cookbook/near.mdx b/website/pages/ru/cookbook/near.mdx index e59a169cdf14..8f64775949f2 100644 --- a/website/pages/ru/cookbook/near.mdx +++ b/website/pages/ru/cookbook/near.mdx @@ -277,7 +277,7 @@ accounts: ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -Если это общий вопрос о разработке субграфа, то гораздо больше информации содержится в остальной части [документации разработчика](/cookbook/quick-start). В противном случае пожалуйста, присоединяйтесь [The Graph Protocol Discord](https://discord.gg/graphprotocol) и задайте вопрос в канале #near или по электронной почте near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## Ссылки diff --git a/website/pages/ru/cookbook/substreams-powered-subgraphs.mdx b/website/pages/ru/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..b86021c64b79 100644 --- a/website/pages/ru/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/ru/cookbook/substreams-powered-subgraphs.mdx @@ -1,30 +1,30 @@ --- -title: Substreams-powered subgraphs +title: Субграфы, работающие на основе субпотоков (Substreams) --- [Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. -## Requirements +## Требования -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +Для использования этого справочного материала требуются [yarn]\(https://yarnpkg.com /), [зависимости, необходимые для разработки локальных субпотоков](https://substreams.streamingfast.io/developers-guide/installation-requirements) и последняя версия Graph CLI (>=0.52.0): ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## Используйте справочный материал -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> В качестве справочного материала здесь используется этот [субграф на основе субпотоков в качестве ссылки](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). ``` graph init --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## Определение пакета субпотоков A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): +Рассматриваемый пакет Substreams обнаруживает развертывания контрактов в основной сети Ethereum, отслеживая блок создания и временную метку для всех вновь развернутых контрактов. Для этого в `/proto/example.proto` есть специальный тип `Contract` ([узнайте больше об определении Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: +Основной логикой пакета Substreams является модуль `map_contract` в `lib.rs `, который обрабатывает каждый блок, фильтруя вызовы Create, которые не были отменены, возвращая `Contracts`: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. +> В крейте `substreams_entity_change` также есть специальная функция `Tables` для простой генерации изменений объектов ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). Сгенерированные изменения объектов должны быть совместимы с объектами `schema.graphql`, определенными в `subgraph.graphql` соответствующего субграфа. ``` #[substreams::handlers::map] @@ -88,18 +88,18 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: +Чтобы подготовить этот пакет субпотоков к использованию субграфом, необходимо выполнить следующие команды: ```bash -yarn substreams:protogen # generates types in /src/pb -yarn substreams:build # builds the substreams -yarn substreams:package # packages the substreams in a .spkg file +yarn substreams:protogen # генерирует типы в /src/pb +yarn substreams:build # создает субпотоки +yarn substreams:package # упаковывает субпотоки в файл .spkg -# alternatively, yarn substreams:prepare calls all of the above commands +# в качестве альтернативы, yarn substreams:prepare вызывают все вышеперечисленные команды ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> Эти скрипты определены в файле `package.json`, если вы хотите понять базовые команды субпотоков -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +При этом создается файл `spkg` на основе имени и версии пакета из `substreams.yaml`. Файл `spkg` содержит всю информацию, которая необходима Graph Node для приема этого пакета субпотоков. -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. +> Если Вы обновите пакет субпотоков, в зависимости от внесенных Вами изменений, Вам может потребоваться выполнить некоторые или все вышеперечисленные команды, чтобы `spkg` был обновлен. -## Defining a Substreams-powered subgraph +## Определение субграфа, работающего на основе субпотоков -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. +Субграфы, работающие на основе на субпотоков, вводят в употребление новый "вид" источника данных - "субпотоки". Такие субграфы могут иметь только один источник данных. -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +В этом источнике данных должна быть указана индексированная сеть, пакет Substreams (`spkg`) в качестве относительного местоположения файла и модуль в этом пакете Substreams, который производит совместимые с субграфами изменения объектов (в данном случае `map_entity_changes` из пакета Substreams, указанного выше). Мэппинг указан, но просто идентифицирует вид мэппинга ("substreams/graph-entities") и apiVersion. -> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). +> В настоящее время Subgraph Studio и The Graph Network поддерживают субграфы, работающие на основе субпотоков, которые индексируют "mainnet" (Mainnet Ethereum). ```yaml specVersion: 0.0.4 @@ -180,7 +180,7 @@ dataSources: apiVersion: 0.0.5 ``` -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. +`subgraph.yaml` также ссылается на файл схемы. Требования к этому файлу не изменились, но указанные объекты должны быть совместимы с изменениями объектов, произведенными модулем Substreams, на который ссылается `subgraph.yaml`. ```graphql type Contract @entity { @@ -194,9 +194,9 @@ type Contract @entity { } ``` -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. +Учитывая вышеизложенное, разработчики субграфов могут использовать Graph CLI для развертывания этого субграфа, работающего на основе субпотоков. -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). +> Субграфы, работающие на основе субпотоков, индексирующие основную сеть Ethereum, могут быть развернуты в [Subgraph Studio](https://thegraph.com/studio/). ```bash yarn install # install graph-cli @@ -204,11 +204,11 @@ yarn subgraph:build # build the subgraph yarn subgraph:deploy # deploy the subgraph ``` -That's it! You have built and deployed a Substreams-powered subgraph. +Вот и все! Вы создали и развернули субграф, работающий на основе субпотоков. -## Serving Substreams-powered subgraphs +## Обслуживание субграфов, работающих на основе субпотоков -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: +Для обслуживания субграфов, работающих на основе субпотоков, Graph Node должен быть сконфигурирован с соответствующей сетью провайдера субпотоков, а также с Firehose или RPC для отслеживания головного блока чейна. Эти провайдеры могут быть настроены с помощью файла `config.toml`: ```toml [chains.mainnet] diff --git a/website/pages/ru/cookbook/upgrading-a-subgraph.mdx b/website/pages/ru/cookbook/upgrading-a-subgraph.mdx index 77db634998e8..71d71a83b15c 100644 --- a/website/pages/ru/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ru/cookbook/upgrading-a-subgraph.mdx @@ -8,10 +8,10 @@ This is a guide on how to upgrade your subgraph from the hosted service to The G The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. -### Prerequisites +### Предварительные требования - Вы уже разместили подграф на базе hosted service. -- Подграф индексирует сеть, которая доступна (или находится в бета-версии) в сети The Graph. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx index 132affca4fb1..3f28de521fb1 100644 --- a/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ru/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Развертывание подграфа в Subgraph Studio --- -> Убедитесь, что сеть, из которой ваш подграф индексирует данные, [поддерживается](/developing/supported-chains) в децентрализованной сети. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). Вот шаги для развертывания вашего подграфа в Subgraph Studio: diff --git a/website/pages/ru/deploying/hosted-service.mdx b/website/pages/ru/deploying/hosted-service.mdx index 823291146647..c9a8397ade4a 100644 --- a/website/pages/ru/deploying/hosted-service.mdx +++ b/website/pages/ru/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / Пример подграфа основан на контракте Gravity от Dani Grant, который управляет пользовательскими аватарами и выдает события `NewGravatar` или `UpdateGravatar` всякий раз, когда создаются или обновляются аватары. Подграф обрабатывает эти события, записывая объекты `Gravatar` в хранилище нод Graph и обеспечивая их обновление в соответствии с событиями. Перейдите к [манифесту подграфа](/developing/creating-a-subgraph#the-subgraph-manifest), чтобы лучше понять, на какие события из ваших смарт-контрактов следует обратить внимание, маппинг и многое другое. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service Вы можете найти список поддерживаемых сетей [здесь](/developing/supported-networks). diff --git a/website/pages/ru/deploying/subgraph-studio-faqs.mdx b/website/pages/ru/deploying/subgraph-studio-faqs.mdx index 2952174443e1..a325dbd428ee 100644 --- a/website/pages/ru/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/ru/deploying/subgraph-studio-faqs.mdx @@ -20,7 +20,7 @@ title: Часто задаваемые вопросы о Subgraph Studio ## 5. Могу ли я передать свой субграф другому владельцу? -Да, субграфы, которые были опубликованы в основной сети, могут быть перенесены в новый кошелек или мультиподпись. Вы можете сделать это, щелкнув три точки рядом с кнопкой «Опубликовать» ('Publish') на странице сведений о субграфе и выбрав «Передать право собственности» ('Transfer ownership'). +Да, субграфы, которые были опубликованы в основной сети, могут быть перенесены в новый кошелек или на кошелек с мультиподписью. Вы можете сделать это, щелкнув три точки рядом с кнопкой «Опубликовать» ('Publish') на странице сведений о субграфе и выбрав «Передать право собственности» ('Transfer ownership'). Обратите внимание, что Вы больше не сможете просматривать или редактировать субграф в Studio после его переноса. diff --git a/website/pages/ru/deploying/subgraph-studio.mdx b/website/pages/ru/deploying/subgraph-studio.mdx index 000cb604180e..92ba439dd2e4 100644 --- a/website/pages/ru/deploying/subgraph-studio.mdx +++ b/website/pages/ru/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Subgraph Studio - это ваше место для создания подгр 1. Войдите в систему с помощью своего кошелька - вы можете сделать это через MetaMask или Wallet Connect 1. Как только вы войдете в систему, вы увидите свой уникальный ключ на домашней странице вашей учетной записи. Это позволит вам либо публиковать свои подграфы, либо управлять ключами API + биллингом. У вас будет уникальный ключ, который можно восстановить, если вы считаете, что он был скомпрометирован. -## Как создать свой подграф в Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -Самая лучшая часть! Когда вы впервые создадите подграф, вам будет предложено заполнить: - -- Имя вашего подграфа -- Изображение профиля -- Описание -- Категории (т.е. `DeFi`, `NFTs`, `Governance`) -- Веб-сайт + ## Совместимость подграфов с сетью Graph diff --git a/website/pages/ru/developing/creating-a-subgraph.mdx b/website/pages/ru/developing/creating-a-subgraph.mdx index e985ac4e83ff..cd93eaa17e95 100644 --- a/website/pages/ru/developing/creating-a-subgraph.mdx +++ b/website/pages/ru/developing/creating-a-subgraph.mdx @@ -1,28 +1,28 @@ --- -title: Creating a Subgraph +title: Создание субграфа --- -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. +Субграф извлекает данные из блокчейна, обрабатывает их и сохраняет таким образом, чтобы их можно было легко запросить с помощью GraphQL. -![Defining a Subgraph](/img/defining-a-subgraph.png) +![Определение субграфа](/img/defining-a-subgraph.png) -The subgraph definition consists of a few files: +Определение субграфа состоит из нескольких файлов: -- `subgraph.yaml`: a YAML file containing the subgraph manifest +- `subgraph.yaml`: файл YAML, содержащий манифест субграфа -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL +- `schema.graphql`: схема GraphQL, которая определяет, какие данные хранятся для Вашего субграфа и как их запрашивать через GraphQL -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) +- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript), который преобразует данные события в объекты, определенные в Вашей схеме (например, `mapping.ts` в этом руководстве) -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). +> Чтобы использовать свой субграф в децентрализованной сети The Graph, Вам необходимо [создать API-ключ](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). Рекомендуется [добавить сигнал](/network/curating/#how-to-signal) в свой субграф как минимум с [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. +Прежде чем Вы перейдете к подробному описанию содержимого файла манифеста, Вам необходимо установить[Graph CLI](https://github.com/graphprotocol/graph-cli), который понадобится для создания и развертывания субграфа. -## Install the Graph CLI +## Установка Graph CLI -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +The Graph CLI написан на JavaScript, и для его использования необходимо установить либо `yarn`, либо `npm`; в дальнейшем предполагается, что у Вас есть yarn. -Once you have `yarn`, install the Graph CLI by running +Получив `yarn`, установите Graph CLI, запустив следующие команды **Установка с помощью yarn:** @@ -36,11 +36,11 @@ yarn global add @graphprotocol/graph-cli npm install -g @graphprotocol/graph-cli ``` -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. +После установки команду `graph init` можно использовать для настройки нового проекта субграфа либо из существующего контракта, либо из примера субграфа. Эту команду можно использовать для создания субграфа в Subgraph Studio, передав в `graph init --product subgraph-studio`. Если у Вас уже есть смарт-контракт, развернутый в выбранной Вами сети, загрузка нового субграфа из этого контракта может быть хорошим способом начать работу. -## From An Existing Contract +## Из существующего контракта -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. +Следующая команда создает субграф, который индексирует все события существующего контракта. Он пытается получить ABI контракта из Etherscan и возвращается к запросу пути к локальному файлу. Если какой-либо из необязательных аргументов отсутствует, он проведет Вас через интерактивную форму. ```sh graph init \ @@ -51,49 +51,49 @@ graph init \ [] ``` -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. +`` - это идентификатор Вашего субграфа в Subgraph Studio, его можно найти на странице сведений о субграфе. -## From An Example Subgraph +## Из примера субграфа -Второй режим, который поддерживает `graph init`, - это создание нового проекта из примера подграфа. Следующая команда делает это: +Второй режим, который поддерживает `graph init`, - это создание нового проекта из примера субграфа. Это делает следующая команда: ```sh graph init --studio ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. +Пример субграфа основан на контракте Gravity Дэни Гранта, который управляет пользовательскими аватарами и генерирует события `NewGravatar` или `UpdateGravatar` при создании или обновлении аватаров. Субграф обрабатывает эти события, записывая объекты `Gravatar` в хранилище Graph Node и обеспечивая их обновление в соответствии с событиями. В следующих разделах будут рассмотрены файлы, составляющие манифест субграфа для этого примера. -## Add New dataSources To An Existing Subgraph +## Добавление новых источников данных к существующему субграфу -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. +Начиная с `v0.31.0` `graph-cli` поддерживает добавление новых источников данных к существующему субграфу с помощью команды `graph add`. ```sh graph add
    [] -Options: +Опции: - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") + --abi Путь к контракту ABI (default: download from Etherscan) + --contract-name Имя контракта (default: Contract) + --merge-entities Следует ли объединять объекты с одинаковым именем (default: false) + --network-file Путь к файлу конфигурации сети (default: "./networks.json") ``` -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. +Команда `add` извлечёт ABI из Etherscan (если путь к ABI не указан с помощью опции `--abi`) и создаст новый `dataSource` таким же образом, как `graph init` создает `dataSource` `--from-contract`, соответствующим образом обновляя схему и мэппинги. -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: +Параметр `--merge-entities` определяет, как разработчик хотел бы обрабатывать конфликты имен `entity` и `event`: -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. +- Если `true`: новый `dataSource` должен использовать существующие ` eventHandlers` & `entities`. +- Если `false`: следует создать новую сущность и обработчик событий с помощью `${dataSourceName}{EventName}`. -The contract `address` will be written to the `networks.json` for the relevant network. +Контракт `address` будет записан в `networks.json` для соответствующей сети. -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. +> **Примечание:** При использовании интерактивного интерфейса командной строки после успешного запуска `graph init` Вам будет предложено добавить новый `dataSource`. -## The Subgraph Manifest +## Манифест субграфа -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +Манифест субграфа `subgraph.yaml` определяет смарт-контракты, которые индексирует Ваш субграф, на какие события из этих контрактов следует обращать внимание и как сопоставлять данные событий с объектами, которые хранит и позволяет запрашивать Graph Node. Полную спецификацию манифестов субграфов можно найти [здесь](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph, `subgraph.yaml` is: +Для примера субграфа `subgraph.yaml`: ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -134,59 +142,63 @@ dataSources: file: ./src/mapping.ts ``` -The important entries to update for the manifest are: +Важными элементами манифеста, которые необходимо обновить, являются: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: понятное описание того, что представляет собой субграф. Это описание отображается в Graph Explorer при развертывании субграфа в хостинговом сервисе. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. +- `repository`: URL-адрес репозитория, в котором можно найти манифест субграфа. Это также отображается в The Graph Explorer. -- `features`: a list of all used [feature](#experimental-features) names. +- `features`: список всех используемых имен [функций](#experimental-features). -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: адрес смарт-контракта, источники субграфа и ABI смарт-контракта для использования. Адрес необязателен; отсутствие этого параметра позволяет индексировать совпадающие события из всех контрактов. -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.startBlock`: необязательный номер блока, с которого источник данных начинает индексацию. В большинстве случаев мы предлагаем использовать блок, в котором был создан контракт. -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.context`: пары «ключ-значение», которые можно использовать внутри мэппингов субграфов. Поддерживает различные типы данных, такие как `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` и `BigInt`. Для каждой переменной нужно указать ее `type` и `data`. Эти контекстные переменные затем становятся доступными в файлах мэппинга, предлагая больше настраиваемых параметров для разработки субграфов. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.entities`: объекты, которые источник данных записывает в хранилище. Схема для каждого объекта определена в файле schema.graphql. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.abis`: один или несколько именованных файлов ABI для исходного контракта, а также любых других смарт-контрактов, с которыми Вы взаимодействуете из мэппингов. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `DataSources.mapping.EventHandlers`: перечисляет события смарт—контракта, на которые реагирует этот субграф, и обработчики в мэппинге —./src/mapping.ts в примере - которые преобразуют эти события в объекты в хранилище. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +- `DataSources.mapping.callHandlers`: перечисляет функции смарт-контракта, на которые реагирует этот субграф, и обработчики в мэппинге, которые преобразуют входные и выходные данные для вызовов функций в объекты в хранилище. -The triggers for a data source within a block are ordered using the following process: +- `dataSources.mapping.blockHandlers`: перечисляет блоки, на которые реагирует этот субграф, и обработчики в мэппинг, которые запускаются при добавлении блока в чейн. Без фильтра обработчик блока будет запускаться для каждого блока. Дополнительный фильтр вызовов может быть предоставлен путем добавления в обработчик поля `filter` с `kind: call `. Обработчик будет запущен только в том случае, если блок содержит хотя бы один вызов контракта источника данных. -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. +Один субграф может индексировать данные из нескольких смарт-контрактов. Добавьте в массив `dataSources` запись для каждого контракта, данные которого нужно проиндексировать. -These ordering rules are subject to change. +Триггеры для источника данных внутри блока упорядочиваются с помощью следующего процесса: -### Getting The ABIs +1. Триггеры событий и вызовов сначала упорядочиваются по индексу транзакции внутри блока. +2. Триггеры событий и вызовов в рамках одной транзакции упорядочиваются по следующему принципу: сначала триггеры событий, затем триггеры вызовов, причем для каждого типа соблюдается тот порядок, в котором они определены в манифесте. +3. Триггеры блоков запускаются после триггеров событий и вызовов в том порядке, в котором они определены в манифесте. -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: +Эти правила оформления заказа могут быть изменены. -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +### Получение ABIs -## The GraphQL Schema +Файл(ы) ABI должен(ы) соответствовать Вашему контракту (контрактам). Существует несколько способов получения файлов ABI: -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. +- Если Вы создаете свой собственный проект, у Вас, скорее всего, будет доступ к наиболее актуальным ABIS. +- Если Вы создаете субграф для публичного проекта, Вы можете загрузить этот проект на свой компьютер и получить ABI, используя [`truffle compile`](https://truffleframework.com/docs/truffle/overview) или используя solc для компиляции. +- Вы также можете найти ABI на [Etherscan](https://etherscan.io/), но это не всегда надежно, так как загруженный туда ABI может быть устаревшим. Убедитесь, что у Вас есть нужный ABI, в противном случае запуск Вашего субграфа будет неудачным. -## Defining Entities +## Схема GraphQL -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. +Схема для Вашего субграфа находится в файле `schema.graphql`. Схемы GraphQL определяются с использованием языка определения интерфейса GraphQL. Если Вы никогда ранее не писали схему GraphQL, рекомендуем ознакомиться с этим руководством по системе типов GraphQL. Справочную документацию по схемам GraphQL можно найти в разделе [GraphQL API](/querying/graphql-api). -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. +## Определение Объектов -### Good Example +Прежде чем определять объекты, важно сделать шаг назад и подумать о том, как структурированы и связаны Ваши данные. Все запросы будут выполняться к модели данных, определенной в схеме субграфа, и объектам, проиндексированным этим субграфом. Для этого рекомендуется определить схему субграфа таким образом, чтобы она соответствовала потребностям Вашего децентрализованного приложения. Может быть полезно представить объекты как "объекты, содержащие данные", а не как события или функции. -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. +С помощью The Graph Вы просто определяете типы объектов в `schema.graphql`, и узел The Graph будет генерировать поля верхнего уровня для запроса отдельных экземпляров и коллекций этого типа объектов. Каждый тип, который должен быть объектом, должен быть аннотирован директивой `@entity`. По умолчанию объекты изменяемы, что означает, что мэппинги могут загружать существующие объекты, изменять их и сохранять их новую версию. Измененяемость имеет свою цену, и для типов объектов, для которых известно, что они никогда не будут изменены, например, потому что они просто содержат данные, идентично извлеченные из чейна, рекомендуется помечать их как неизменяемые с помощью `@entity(immutable: true)`. Мэппинги могут вносить изменения в неизменяемые объекты до тех пор, пока эти изменения происходят в том же блоке, в котором был создан объект. Неизменяемые объекты гораздо быстрее записываются и запрашиваются, и поэтому их следует использовать каждый раз, когда это возможно. + +### Удачный пример + +Приведенный ниже объект `Gravatar` структурирован вокруг объекта Gravatar и является хорошим примером того, как объект может быть определен. ```graphql type Gravatar @entity(immutable: true) { @@ -198,9 +210,9 @@ type Gravatar @entity(immutable: true) { } ``` -### Bad Example +### Неудачный пример -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. +Приведенные ниже примеры объектов `GravatarAccepted` и `GravatarDeclined` основаны на событиях. Не рекомендуется сопоставлять события или вызовы функций с объектами 1:1. ```graphql type GravatarAccepted @entity { @@ -218,36 +230,37 @@ type GravatarDeclined @entity { } ``` -### Optional and Required Fields +### Дополнительные и обязательные поля -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: +Поля объекта могут быть определены как обязательные или необязательные. Обязательные поля обозначены символом `!` в схеме. Если в мэппинге не задано обязательное поле, то при запросе к нему будет выдана эта ошибка: ``` Null value resolved for non-null field 'name' ``` -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. +У каждого объекта должно быть поле `id`, у которого должен быть тип `Bytes!` или `String!`. Обычно рекомендуется использовать `Bytes!`, если только `id` не содержит удобочитаемый текст, поскольку объекты с `!` будут записываться и запрашиваться быстрее, чем объекты с `String!``id`. Поле `id` служит первичным ключом и должно быть уникальным среди всех объектов одного типа. В силу исторических причин тип `ID!` также принимается и является синонимом `String!`. -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. +Для некоторых типов объектов `id` создается из идентификаторов двух других объектов; этому способствует `concat`, например, для формирования id `let id = left.id.concat(right.id)` из идентификаторов `left` и `right`. Аналогично этому, чтобы создать идентификатор из идентификатора существующего объекта и счетчика `count`, можно использовать `let id = left.id.concatI32(count)`. Объединение гарантированно приведёт к созданию уникальных идентификаторов, если длина `left` одинакова для всех таких объектов, например, потому что `left.id` является `Address`. -### Built-In Scalar Types +### Встроенные скалярные типы -#### GraphQL Supported Scalars +#### Поддерживаемые GraphQL скаляры -We support the following scalars in our GraphQL API: +Мы поддерживаем следующие скаляры в нашем GraphQL API: -| Type | Описание | +| Тип | Описание | | --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Bytes` | Массив байтов, представленный в виде шестнадцатеричной строки. Обычно используется для хэшей и адресов Ethereum. | +| `String` | Скаляр для значений `string`. Нулевые символы не поддерживаются и автоматически удаляются. | +| `Boolean` | Скаляр для значений `boolean`. | +| `Int` | Спецификация GraphQL определяет `Int` как имеющий размер 32 байта. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Большие целые числа. Используется для типов Ethereum `uint32`, `int64`, `uint64`, ..., `uint256`. Примечание: Все, что указано ниже `uint32`, например `int32`, `uint24` или `int8`, представлено как `i32`. | +| `BigDecimal` | `BigDecimal` Десятичные дроби высокой точности, представленные в виде значащего числа и экспоненты. Диапазон значений экспонент находится в диапазоне от -6143 до +6144. Округление до 34 значащих цифр. | -#### Enums +#### Перечисления -You can also create enums within a schema. Enums have the following syntax: +Вы также можете создавать перечисления внутри схемы. Перечисления имеют следующий синтаксис: ```graphql enum TokenStatus { @@ -257,19 +270,19 @@ enum TokenStatus { } ``` -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: +Как только перечисление определено в схеме, вы можете использовать строковое представление значения перечисления, чтобы задать поле перечисления для сущности. Например, вы можете установить для `tokenStatus` значение `SecondOwner`, сначала определив свою сущность, а затем установив в поле `entity.tokenStatus = "SecondOwner"`. Приведенный ниже пример демонстрирует, как будет выглядеть сущность токена с полем enum: -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). +Более подробную информацию о написании перечислений можно найти в [Документации по GraphQL](https://graphql.org/learn/schema/). -#### Entity Relationships +#### Связи сущностей -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. +Сущность может иметь связь с одной или несколькими другими сущностям в вашей схеме. Эти связи могут быть использованы в ваших запросах. Связи в The Graph являются однонаправленными. Можно смоделировать двунаправленные связи, определив однонаправленную связь на любом "конце" связи. -Relationships are defined on entities just like any other field except that the type specified is that of another entity. +Связи определяются для сущностей точно так же, как и для любого другого поля, за исключением того, что в качестве типа указывается тип другой сущности. -#### One-To-One Relationships +#### Связи "Один к одному" -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: +Определите тип сущности `Transaction` с необязательной взаимосвязью "один к одному" с типом сущности `transactionReceipt`: ```graphql type Transaction @entity(immutable: true) { @@ -283,9 +296,9 @@ type TransactionReceipt @entity(immutable: true) { } ``` -#### One-To-Many Relationships +#### Связи "Один ко многим" -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: +Определите тип сущности `TokenBalance` с обязательной взаимосвязью "один ко многим" с типом сущности токена: ```graphql type Token @entity(immutable: true) { @@ -299,15 +312,15 @@ type TokenBalance @entity { } ``` -#### Reverse Lookups +#### Обратный поиск -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. +Обратный поиск может быть определен для сущности с помощью поля `@derivedFrom`. Это создает виртуальное поле в сущности, которое может быть запрошено, но не может быть задано вручную через API сопоставлений. Скорее, оно является производным от связи, определенной для другой сущности. Для таких связей редко имеет смысл сохранять обе стороны связи, и и производительность индексирования и запросов будет выше, когда сохраняется только одна сторона, а другая является производной. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +Для связей "один ко многим" связь всегда должна храниться на стороне "один", а сторона "много" всегда должна быть производной. Такое хранение связи, вместо хранения массива сущностей на стороне "многие", приведет к значительно более высокой производительности как при индексации, так и при запросах к субграфам. В общем, следует избегать хранения массивов сущностей настолько, насколько это практически возможно. -#### Example +#### Пример -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: +Мы можем сделать балансы для токена доступными из самого токена, создав поле `tokenBalances`: ```graphql type Token @entity(immutable: true) { @@ -322,13 +335,13 @@ type TokenBalance @entity { } ``` -#### Many-To-Many Relationships +#### Связи "Многие ко многим" -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. +Для связей "многие ко многим", таких как пользователи, каждый из которых может принадлежать к любому числу организаций, наиболее простым, но, как правило, не самым производительным способом моделирования связей является создание массива в каждой из двух задействованных сущностей. Если связь симметрична, то необходимо сохранить только одну сторону связи, а другая сторона может быть получена. -#### Example +#### Пример -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. +Определите обратный поиск от типа сущности `User` к типу сущности `Organization`. В приведенном ниже примере это достигается путем поиска атрибута `members` внутри сущности `Organization`. В запросах поле `organizations` в `User` будет разрешено путем поиска всех объектов `Organization`, которые включают идентификатор пользователя. ```graphql type Organization @entity { @@ -344,7 +357,7 @@ type User @entity { } ``` -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like +Более эффективный способ сохранить эту взаимосвязь - с помощью таблицы мэппинга, которая содержит по одной записи для каждой пары `User` / `Organization` со схемой, подобной ```graphql type Organization @entity { @@ -366,7 +379,7 @@ type UserOrganization @entity { } ``` -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: +Этот подход требует, чтобы запросы опускались на один дополнительный уровень для получения, например, сведений об организациях для пользователей: ```graphql query usersWithOrganizations { @@ -381,11 +394,11 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +Такой более сложный способ хранения связей "многие ко многим" приведет к уменьшению объема хранимых данных для субграфа и, следовательно, к тому, что субграф зачастую значительно быстрее индексируется и запрашивается. -#### Adding comments to the schema +#### Добавление комментариев к схеме -As per GraphQL spec, comments can be added above schema entity attributes using double quotations `""`. This is illustrated in the example below: +Согласно спецификации GraphQL, комментарии могут быть добавлены над атрибутами сущностей схемы с использованием двойных кавычек `""`. Это проиллюстрировано в примере ниже: ```graphql type MyFirstEntity @entity { @@ -395,13 +408,13 @@ type MyFirstEntity @entity { } ``` -## Defining Fulltext Search Fields +## Определение полей полнотекстового поиска -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. +Полнотекстовые поисковые запросы фильтруют и ранжируют объекты на основе введенных данных текстового запроса. Полнотекстовые запросы способны возвращать совпадения по схожим словам путем обработки текста запроса в виде строк перед сравнением с индексированными текстовыми данными. -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. +Определение полнотекстового запроса включает в себя название запроса, словарь языка, используемый для обработки текстовых полей, алгоритм ранжирования, используемый для упорядочивания результатов, и поля, включенные в поиск. Каждый полнотекстовый запрос может охватывать несколько полей, но все включенные поля должны относиться к одному типу сущности. -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. +Чтобы добавить полнотекстовый запрос, включите тип `_Schema_` с полнотекстовой директивой в схему GraphQL. ```graphql type _Schema_ @@ -424,7 +437,7 @@ type Band @entity { } ``` -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. +Пример поля `bandSearch` можно использовать в запросах для фильтрации сущностей `Band` на основе текстовых документов в полях `name`, `description` и `bio`.>. Перейдите к [GraphQL API - запросы](/querying/graphql-api#queries) для описания API полнотекстового поиска и дополнительных примеров использования. ```graphql query { @@ -437,49 +450,49 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Управление функциями](#experimental-features):** Начиная с `specVersion` `0.0.4` и далее, `fullTextSearch` должно быть объявлено в разделе `features` в манифесте субграфа. -### Languages supported +### Поддерживаемые языки -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". +Выбор другого языка окажет решающее, хотя иногда и неуловимое влияние на API полнотекстового поиска. Поля, охватываемые полем полнотекстового запроса, рассматриваются в контексте выбранного языка, поэтому лексемы, полученные в результате анализа и поисковых запросов, варьируются от языка к языку. Например: при использовании поддерживаемого турецкого словаря "token" переводится как "toke", в то время как, конечно, словарь английского языка переводит его в "token". -Supported language dictionaries: +Поддерживаемые языковые словари: -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Код | Словарь | +| ------- | ------------- | +| простой | General | +| da | Датский | +| nl | Голландский | +| en | Английский | +| fi | Финский | +| fr | Французский | +| de | Немецкий | +| hu | Венгерский | +| it | Итальянский | +| no | Норвежский | +| pt | Португальский | +| ro | Румынский | +| ru | Русский | +| es | Испанский | +| sv | Шведский | +| tr | Турецкий | -### Ranking Algorithms +### Алгоритмы ранжирования -Supported algorithms for ordering results: +Поддерживаемые алгоритмы для упорядочивания результатов: -| Algorithm | Описание | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Алгоритм | Описание | +| ------------- | ---------------------------------------------------------------------------------------------- | +| rank | Используйте качество соответствия (0-1) полнотекстового запроса, чтобы упорядочить результаты. | +| proximityRank | Аналогично рангу, но также включает в себя близость совпадений. | -## Writing Mappings +## Написание мэппингов -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. +Мэппинги берут данные из определенного источника и преобразуют их в сущности, которые определены в вашей схеме. Мэппинги записываются в подмножестве [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html), который называется [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki), и который может быть скомпилирован в WASM ([ WebAssembly ](https://webassembly.org/)). AssemblyScript более строг, чем обычный TypeScript, но при этом предоставляет знакомый синтаксис. -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. +Для каждого обработчика событий, определенного в `subgraph.yaml` в разделе `mapping.EventHandlers`, создайте экспортируемую функцию с тем же именем. Каждый обработчик должен принимать один параметр с именем `event` с типом, соответствующим имени обрабатываемого события. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +В примере субграф `src/mapping.ts` содержит обработчики для событий `NewGravatar` и `UpdatedGravatar`: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -506,31 +519,31 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. +Первый обработчик принимает событие `NewGravatar` и создает новую сущность `Gravatar` с помощью `new Gravatar(event.params.id.toHex())`, заполняя поля сущности, используя соответствующие параметры события. Этот экземпляр сущности представлен переменной `gravatar` со значением идентификатора `event.params.id.toHex()`. -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. +Второй обработчик пытается загрузить существующий `Gravatar` из хранилища узлов The Graph. Если он еще не существует, он создается по требованию. Затем сущность обновляется в соответствии с новыми параметрами события, прежде чем он будет сохранен обратно в хранилище с помощью `gravatar.save()`. -### Recommended IDs for Creating New Entities +### Рекомендуемые идентификаторы для создания новых сущностей -Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. +Каждая сущность должна иметь `id`, который является уникальным среди всех сущностей одного типа. Значение сущности `id` устанавливается при создании этой сущности. Ниже приведены некоторые рекомендуемые значения `id`, которые следует учитывать при создании новых сущностей. ПРИМЕЧАНИЕ: Значение `id` должно быть `string`. - `event.params.id.toHex()` - `event.transaction.from.toHex()` - `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` -We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. +Мы предоставляем [Библиотеку Graph Typescript](https://github.com/graphprotocol/graph-ts), которая содержит утилиты для взаимодействия с хранилищем узлов The Graph и удобства для обработки данных смарт-контрактов и сущностей. Вы можете использовать эту библиотеку в своих мэппингах, импортировав `@graphprotocol/graph-ts` в `mapping.ts`. -## Code Generation +## Генерация кода -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +Для упрощения и обеспечения безопасности типов при работе со смарт-контрактами, событиями и сущностями Graph CLI может генерировать типы AssemblyScript на основе схемы GraphQL субграфа и ABI контрактов, включенных в источники данных. -This is done with +Это делается с помощью ```sh graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +но в большинстве случаев субграфы уже предварительно сконфигурированы с помощью `package.json`, что позволяет вам просто запустить одно из следующих действий для достижения того же результата: ```sh # Yarn @@ -540,7 +553,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +Это сгенерирует класс AssemblyScript для каждого смарт-контракта в файлах ABI, упомянутых в `subgraph.yaml`, позволяя вам привязывать эти контракты к определенным адресам в мэппигах и вызывать контрактные методы, доступные только для чтения, для обрабатываемого блока. Кроме того, для каждого события контракта генерируется класс, обеспечивающий удобный доступ к параметрам события, а также к блоку и транзакции, от которых произошло событие. Все эти типы записываются в `//.ts`. В примере субграфа это будет ` generated/Gravity/Gravity.ts`, позволяющий импортировать эти типы с помощью мэппинга. ```javascript import { @@ -552,25 +565,25 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +В дополнение к этому, для каждого типа сущности в схеме subgraphs GraphQL генерируется по одному классу. Эти классы обеспечивают безопасную для типов загрузку сущностей, доступ к полям сущностей на чтение и запись, а также метод `save()` для записи сущностей в хранилище. Все классы сущностей записываются в `/schema.ts`, что позволяет мэппингам импортировать с их помощью ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Примечание:** Генерация кода должна выполняться повторно после каждого изменения схемы GraphQL или ABIS, включенного в манифест. Это также должно быть выполнено по крайней мере один раз перед сборкой или развертыванием субграфа. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Генерация кода не проверяет ваш мэппинг код в `src/mapping.ts`. Если вы хотите проверить это, прежде чем пытаться развернуть свой субграф в Graph Explorer, вы можете запустить `yarn build` и исправить любые синтаксические ошибки, которые может обнаружить компилятор TypeScript. -## Data Source Templates +## Шаблоны источников данных -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. +Распространенным шаблоном в смарт-контрактах, совместимых с EVM, является использование реестровых или заводских контрактов, когда один контракт создает, управляет или ссылается на произвольное количество других контрактов, каждый из которых имеет свое собственное состояние и события. -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. +Адреса этих субконтрактов могут быть известны или не известны заранее, и многие из этих контрактов могут быть созданы и/или добавлены с течением времени. Поэтому в таких случаях определение одного источника данных или фиксированного количества источников данных невозможно и необходим более динамичный подход: _data source templates_. -### Data Source for the Main Contract +### Источник данных для основного контракта -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +Сначала вы определяете обычный источник данных для основного контракта. Во фрагменте ниже показан упрощенный пример источника данных для контракта фабрики обмена [Uniswap](https://uniswap.org). Обратите внимание на обработчик события `New Exchange(address,address)`. Этот сигнал выдается, когда новый контракт обмена создается в цепочке с помощью заводского контракта. ```yaml dataSources: @@ -595,9 +608,9 @@ dataSources: handler: handleNewExchange ``` -### Data Source Templates for Dynamically Created Contracts +### Шаблоны источников данных для динамически создаваемых контрактов -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. +Затем вы добавляете _data source templates_ в манифест. Они идентичны обычным источникам данных, за исключением того, что в них отсутствует предопределенный адрес контракта в `source`. Как правило, вы определяете один шаблон для каждого типа субконтракта, управляемого родительским контрактом, или на который ссылается родительский контракт. ```yaml dataSources: @@ -631,9 +644,9 @@ templates: handler: handleRemoveLiquidity ``` -### Instantiating a Data Source Template +### Создание шаблона источника данных -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. +На заключительном шаге вы обновляете мэппинг основного контракта, чтобы создать экземпляр динамического источника данных из одного из шаблонов. В данном примере в отображение основного контракта импортируется шаблон `Exchange` и вызывается метод `Exchange.create(address)`, чтобы начать индексирование нового контракта обмена. ```typescript import { Exchange } from '../generated/templates' @@ -645,13 +658,13 @@ export function handleNewExchange(event: NewExchange): void { } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> **Примечание:** Новый источник данных будет обрабатывать только вызовы и события для блока, в котором он был создан, и всех последующих блоков, но не будет обрабатывать исторические данные, т.е. данные, которые содержатся в предыдущих блоках. > -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. +> Если предыдущие блоки содержат данные, относящиеся к новому источнику данных, лучше всего проиндексировать эти данные, считывая текущее состояние контракта и создавая сущности, представляющие это состояние на момент создания нового источника данных. -### Data Source Context +### Контекст источника данных -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: +Контексты источника данных позволяют передавать дополнительную конфигурацию при создании экземпляра шаблона. В нашем примере предположим, что биржи связаны с определенной торговой парой, которая включена в событие `newExchange`. Эта информация может быть передана в созданный экземпляр источника данных, например, следующим образом: ```typescript import { Exchange } from '../generated/templates' @@ -663,7 +676,7 @@ export function handleNewExchange(event: NewExchange): void { } ``` -Inside a mapping of the `Exchange` template, the context can then be accessed: +Внутри мэппинга шаблона `Exchange` затем можно получить доступ к контексту: ```typescript import { dataSource } from '@graphprotocol/graph-ts' @@ -672,11 +685,11 @@ let context = dataSource.context() let tradingPair = context.getString('tradingPair') ``` -There are setters and getters like `setString` and `getString` for all value types. +Существуют установщики и получатели, такие как `setString` и `getString` для всех типов значений. -## Start Blocks +## Стартовые блоки -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +`startBlock` - это необязательный параметр, который позволяет вам определить, с какого блока в цепочке источник данных начнет индексацию. Установка начального блока позволяет источнику данных пропускать потенциально миллионы блоков, которые не имеют отношения к делу. Как правило, разработчик субграфа устанавливает `startBlock` в блок, в котором был создан смарт-контракт источника данных. ```yaml dataSources: @@ -702,23 +715,23 @@ dataSources: handler: handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Примечание:** Блок создания контракта можно быстро найти в Etherscan: > -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. +> 1. Найдите контракт, введя его адрес в строке поиска. +> 2. Нажмите на хэш транзакции создания в разделе `Contract Creator`. +> 3. Загрузите страницу сведений о транзакции, где вы найдете начальный блок для этого контракта. -## Call Handlers +## Обработчики вызовов -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +В то время как события обеспечивают эффективный способ сбора соответствующих изменений в состоянии контракта, многие контракты избегают создания журналов для оптимизации затрат на газ. В этих случаях субграф может подписываться на обращения к контракту источника данных. Это достигается путем определения обработчиков вызовов, ссылающихся на сигнатуру функции, и обработчика мэппинга, который будет обрабатывать вызовы этой функции. Чтобы обработать эти вызовы, обработчик мэппинга получит `ethereum.Call` в качестве аргумента, содержащего типизированные входы и выходы вызова. Вызовы, выполненные на любой глубине цепочки вызовов транзакции, запускают мэппинг, позволяя фиксировать действия с контрактом источника данных через прокси-контракты. -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. +Обработчики вызовов срабатывают только в одном из двух случаев: когда указанная функция вызывается учетной записью, отличной от самого контракта, или когда она помечена как внешняя в Solidity и вызывается как часть другой функции в том же контракте. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Примечание:** Обработчики вызовов в настоящее время зависят от Parity tracing API. Некоторые сети, такие как BNB chain и Arbitrium, не поддерживают этот API. Если субграф, индексирующий одну из этих сетей, содержит один или несколько обработчиков вызовов, синхронизация не начнется. Разработчикам субграфов следует вместо этого использовать обработчики событий. Они гораздо более производительны, чем обработчики вызовов, и поддерживаются в каждой сети evm. -### Defining a Call Handler +### Определение обработчика вызова -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. +Чтобы определить обработчик вызовов в вашем манифесте, просто добавьте массив `callHandlers` под источником данных, на который вы хотели бы подписаться. ```yaml dataSources: @@ -743,11 +756,11 @@ dataSources: handler: handleCreateGravatar ``` -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. +`function` - это нормализованная сигнатура функции, по которой можно фильтровать вызовы. Свойство `handler` - это имя функции в вашем мэппинге, которую вы хотели бы выполнить при вызове целевой функции в контракте источника данных. -### Mapping Function +### Функция мэппинга -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Каждый обработчик вызова принимает один параметр, тип которого соответствует имени вызываемой функции. В приведенном выше примере субграфа мэппинг содержит обработчик для случаев, когда вызывается функция `createGravatar` и получает параметр `CreateGravatarCall` в качестве аргумента: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -762,24 +775,26 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { } ``` -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. +Функция `handleCreateGravatar` принимает новый `CreateGravatarCall`, который является подклассом `ethereum.Call`, предоставляемый `@graphprotocol/graph-ts`, который включает в себя введенные входы и выходы о звонке. Тип `CreateGravatarCall` генерируется для вас при запуске `graph codegen`. + +## Обработчики блоков -## Block Handlers +В дополнение к подписке на события контракта или вызовы функций, субграф может захотеть обновлять свои данные по мере добавления новых блоков в цепочку. Чтобы достичь этого, субграф может запускать функцию после каждого блока или после блоков, которые соответствуют предопределенному фильтру. -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +### Поддерживаемые фильтры -### Supported Filters +#### Фильтр вызовов ```yaml filter: kind: call ``` -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ +_Определенный обработчик будет вызван один раз для каждого блока, содержащего обращение к контракту (источнику данных), в соответствии с которым определен обработчик._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Примечание:** Фильтр `call` в настоящее время зависит от Parity tracing API. Некоторые сети, такие как BNB chain и Arbitrium, не поддерживают этот API. Если субграф, индексирующий одну из этих сетей, содержит один или несколько обработчиков блоков с фильтром `call`, синхронизация не начнется. -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. +Отсутствие фильтра для обработчика блоков гарантирует, что обработчик вызывается для каждого блока. Источник данных может содержать только один обработчик блоков для каждого типа фильтра. ```yaml dataSources: @@ -806,9 +821,48 @@ dataSources: kind: call ``` -### Mapping Function +#### Фильтр опроса + +> **Requires `specVersion` >= 0.0.8** + +> **Примечание.** Фильтры опроса доступны только для источников данных `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +Определенный обработчик будет вызываться один раз для каждого блока `n`, где `n` — это значение, указанное в поле `every`. Эта конфигурация позволяет субграфу выполнять определенные операции через регулярные интервалы блоков. + +#### Однократный фильтр + +> **Requires `specVersion` >= 0.0.8** + +> **Примечание.** Однократные фильтры доступны только для источников данных `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +Определенный обработчик с однократным фильтром будет вызываться только один раз перед запуском всех остальных обработчиков. Эта конфигурация позволяет субграфу использовать обработчик в качестве обработчика инициализации, выполняя определенные задачи в начале индексирования. -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Функция мэппинга + +Функция мэппинга получит `ethereum.Block` в качестве своего единственного аргумента. Подобно функциям мэппинга событий, эта функция может получать доступ к существующим в хранилище сущностям субграфа, вызывать смарт-контракты и создавать или обновлять сущности. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -820,9 +874,9 @@ export function handleBlock(block: ethereum.Block): void { } ``` -## Anonymous Events +## Анонимные события -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: +Если вам нужно обрабатывать анонимные события в Solidity, это можно сделать, указав тему события 0, как показано в примере: ```yaml eventHandlers: @@ -831,13 +885,13 @@ eventHandlers: handler: handleGive ``` -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. +Событие будет запущено только в том случае, если подпись и тема 0 совпадают. По умолчанию `topic0` равен хэшу сигнатуры события. -## Transaction Receipts in Event Handlers +## Квитанции о транзакциях в обработчиках событий -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. +Начиная с `specVersion` `0.0.5` и `apiVersion` `0.0.7` обработчики событий могут иметь доступ к квитанции для транзакции, которая их отправила. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +Для этого обработчики событий должны быть объявлены в манифесте субграфа с новым ключом ` receipt: true`, который является необязательным и по умолчанию имеет значение false. ```yaml eventHandlers: @@ -846,20 +900,20 @@ eventHandlers: receipt: true ``` -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. +Внутри функции обработчика доступ к квитанции можно получить в поле `Event.receipt`. Если для ключа `receipt` установлено значение`false` или оно опущено в манифесте, вместо него будет возвращено значение `null`. -## Experimental features +## Экспериментальные возможности -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Начиная с `specVersion` `0.0.4`, объекты субграфа должны быть явно объявлены в разделе `features` на верхнем уровне файла манифеста, используя их имя `camelCase`, как указано в таблице ниже: -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Неисправимые ошибки](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | +| Возможность | Имя | +| ---------------------------------------------------------- | ---------------------------------------------------- | +| [Нефатальные ошибки](#non-fatal-errors) | `nonFatalErrors` | +| [Полнотекстовый поиск](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Графтинг](#grafting-onto-existing-subgraphs) | `grafting` | +| [IPFS на контрактах Ethereum](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` или `nonDeterministicIpfs` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +Например, если в субграфе используются функции **Full-Text Search** и **Non-fatal Errors**, поле `features` в манифесте должно быть: ```yaml specVersion: 0.0.4 @@ -870,27 +924,27 @@ features: dataSources: ... ``` -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +Обратите внимание, что использование функции без ее объявления приведет к ** validation error ** во время развертывания субграфа, но никаких ошибок не возникнет, если функция объявлена, но не используется. -### IPFS on Ethereum Contracts +### IPFS на контрактах Ethereum -A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. +Распространенным вариантом сочетания IPFS с Ethereum является хранение данных в IPFS, которые было бы слишком дорого поддерживать в цепочке, и ссылка на хэш IPFS в контрактах Ethereum. -Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. +Учитывая такие хэши IPFS, субграфы могут считывать соответствующие файлы из IPFS, используя `ipfs.cat ` и `ipfs.map`. Для надежного выполнения этой задачи необходимо, чтобы эти файлы были привязаны к узлу IPFS с высокой доступностью, так чтобы Узел IPFS [размещенный сервис](https://thegraph.com/hosted-service) мог найти их при индексировании. -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Примечание:** Сеть The Graph пока не поддерживает `ipfs.cat ` и `ipfs.map`, и разработчикам не следует разворачивать субграфы, использующие эту функциональность, в сети через Studio. -> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. +> **[Управление функционалом](#experimental-features):** `ipfsOnEthereumContracts` должны быть объявлены в разделе `features` в манифесте субграфа. Для цепочек, отличных от EVM, псевдоним `nonDeterministicIpfs` также может использоваться для той же цели. -When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. +При запуске локального узла The Graph необходимо установить переменную окружения `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS`, чтобы индексировать ce, графы, используя эту экспериментальную функциональность. -### Неисправимые ошибки +### Нефатальные ошибки -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Ошибки индексирования в уже синхронизированных субграфах по умолчанию приведут к сбою субграфа и прекращению синхронизации. В качестве альтернативы субграфы можно настроить на продолжение синхронизации при наличии ошибок, игнорируя изменения, внесенные обработчиком, который спровоцировал ошибку. Это дает авторам субграфов время на исправление своих субграфов, в то время как запросы продолжают выполняться к последнему блоку, хотя результаты могут быть несовместимыми из-за ошибки, вызвавшей ошибку. Обратите внимание, что некоторые ошибки по-прежнему всегда являются фатальными. Чтобы быть не фатальной, ошибка должна быть заведомо детерминированной. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Примечание:** Сеть The Graph пока не поддерживает нефатальные ошибки, и разработчикам не следует развертывать субграфы, использующие эту функциональность, в сети через Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Для включения нефатальных ошибок необходимо установить в манифесте субграфа следующий флаг возможности: ```yaml specVersion: 0.0.4 @@ -900,7 +954,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +Запрос также должен разрешить запрос данных с потенциальными несоответствиями с помощью аргумента `subgraphError`. Также рекомендуется запросить `_meta`, для проверки того, что субграф пропустил ошибки, как в примере: ```graphql foos(first: 100, subgraphError: allow) { @@ -912,7 +966,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +Если субграф обнаруживает ошибку, этот запрос вернет как данные, так и graphqlerror с сообщением `"indexing_error"`, как в данном примере ответа: ```graphql "data": { @@ -932,11 +986,13 @@ If the subgraph encounters an error, that query will return both the data and a ] ``` -### Grafting onto Existing Subgraphs +### Графтинг на существующие субграфы -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +> **Примечание:** не рекомендуется использовать графтинг при первоначальном переходе на сеть The Graph. Подробнее [здесь](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +Когда субграф развертывается впервые, он начинает индексировать события в блоке genesis соответствующей цепочки (или в `startBlock`, определенном для каждого источника данных). В некоторых обстоятельствах полезно повторно использовать данные из существующего субграфа и начинать индексацию с гораздо более позднего блока. Этот режим индексации называется _Grafting_. Графтинг, например, полезен во время разработки, чтобы быстро устранить простые ошибки в отображениях или временно возобновить работу существующего субграфа после его сбоя. + +Субграф графтится к базовому субграфу, когда манифест субграфа в `subgraph.yaml` содержит блок `graft` на верхнем уровне: ```yaml description: ... @@ -945,49 +1001,49 @@ graft: block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +Когда развертывается субграф, манифест которого содержит блок `graft`, узел The Graph скопирует данные `базового` ce, графа вплоть до указанного `block` включительно, а затем продолжит индексирование нового субграфа начиная с этого блока. Базовый субграф должен существовать на целевом экземпляре узла The Graph и должен быть проиндексирован по крайней мере до заданного блока. Из-за этого ограничения графтинг следует использовать только в процессе разработки или в экстренных случаях, чтобы ускорить создание эквивалентного графтового субграфа. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Поскольку графтинг копирует, а не индексирует базовые данные, гораздо быстрее перенести субграф в нужный блок, чем индексировать с нуля, хотя для очень больших субграфов копирование исходных данных может занять несколько часов. Пока графтовый субграф инициализируется, узел The Graph будет регистрировать информацию о типах сущностей, которые уже были скопированы. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Графтовый субграф может использовать схему GraphQL, которая не идентична схеме базового субграфа, а просто совместима с ней. Она сама по себе должна быть допустимой схемой субграфа, но может отличаться от схемы базового субграфа следующими способами: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Она добавляет или удаляет типы сущностей +- Она удаляет атрибуты из типов сущностей +- Она добавляет в типы сущностей атрибуты с возможностью обнуления +- Она превращает ненулевые атрибуты в нулевые +- Она добавляет значения в перечисления +- Она добавляет или удаляет интерфейсы +- Она изменяется в зависимости от того, для каких типов сущностей реализован тот или иной интерфейс -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Управление функционалом](#experimental-features):** `grafting` должен быть объявлен в разделе `features` в манифесте субграфа. -## File Data Sources +## Источники файловых данных -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +Источники файловых данных — это новая функциональность субграфа для надежного и расширяемого доступа к данным вне цепочки во время индексации. Источники данных файлов поддерживают получение файлов из IPFS и Arweave. -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. +> Это также закладывает основу для детерминированного индексирования данных вне сети, а также потенциального введения произвольных данных из HTTP-источников. ### Обзор -Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. +Вместо извлечения файлов «в очереди» во время выполнения обработчика вводятся шаблоны, которые могут создаваться как новые источники данных для заданного идентификатора файла. Эти новые источники данных извлекают файлы, повторяя попытки, если они не увенчались успехом, запуская специальный обработчик, когда файл найден. -This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. +Это похоже на [существующие шаблоны источников данных](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), которые используются для динамического создания новых источников данных на чейн-основе. -> This replaces the existing `ipfs.cat` API +> Это заменяет существующий API `ipfs.cat` -### Upgrade guide +### Руководство по обновлению -#### Update `graph-ts` and `graph-cli` +#### Обновите `graph-ts` и `graph-cli` -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +Для файловых источников данных требуется graph-ts >=0.29.0 и graph-cli >=0.33.1 -#### Add a new entity type which will be updated when files are found +#### Добавить новый тип сущности, который будет обновляться при обнаружении файлов -File data sources cannot access or update chain-based entities, but must update file specific entities. +Источники файловых данных не могут получать доступ к сущностям на чейн-основе или обновлять их, но должны обновлять сущности, специфичные для файлов. -This may mean splitting out fields from existing entities into separate entities, linked together. +Это может означать разделение полей существующих сущностей на отдельные сущности, связанные между собой. -Original combined entity: +Первоначальная объединенная сущность: ```graphql type Token @entity { @@ -1005,7 +1061,7 @@ type Token @entity { } ``` -New, split entity: +Новая разделенная сущность: ```graphql type Token @entity { @@ -1026,13 +1082,13 @@ type TokenMetadata @entity { } ``` -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! +Если между родительской сущностью и результирующей сущностью-источником данных существует связь1:1, то наиболее простым вариантом будет связать родительскую сущность с результирующей файловой сущностью, используя в качестве поиска IPFS CID. Свяжитесь с нами в Discord, если у вас возникли трудности с моделированием новых сущностей на основе файлов! -> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> Вы можете использовать [вложенные фильтры](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) для фильтрации родительских объектов на основе этих вложенных сущностей. -#### Add a new templated data source with `kind: file/ipfs` +#### Добавьте новый шаблонный источник данных с помощью `kind: file/ipfs` или `kind: file/arweave`. -This is the data source which will be spawned when a file of interest is identified. +Это источник данных, который будет создан при обнаружении интересующего файла. ```yaml templates: @@ -1050,21 +1106,21 @@ templates: file: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> В настоящее время требуется `abis` хотя невозможно вызывать контракты из файловых источников данных -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. +В файле-источнике данных должны быть конкретно указаны все типы сущностей, с которыми он будет взаимодействовать в рамках `entities`. Дополнительные сведения см. в разделе [ограничения](#Limitations). -#### Create a new handler to process files +#### Создание нового обработчика для обработки файлов -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). +Этот обработчик должен принимать один параметр `Bytes`, который будет содержимым файла, когда он будет найден, который затем можно будет обработать. Часто это файл JSON, который можно обработать с помощью помощников `graph-ts` ([документация](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). -The CID of the file as a readable string can be accessed via the `dataSource` as follows: +Доступ к CID файла в виде читаемой строки можно получить через `dataSource` следующим образом: ```typescript const cid = dataSource.stringParam() ``` -Example handler: +Пример обработчика: ```typescript import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' @@ -1091,22 +1147,24 @@ export function handleMetadata(content: Bytes): void { } ``` -#### Spawn file data sources when required +#### Размножение файловых источников данных при необходимости + +Теперь вы можете создавать файловые источники данных во время выполнения обработчиков на чейн-основе: -You can now create file data sources during execution of chain-based handlers: +- Импортируйте шаблон из автоматически созданных `templates` +- вызовите `TemplateName.create(cid: string)` из мэппинга, где cid является действительным идентификатором контента для IPFS или Arweave -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +Для IPFS Нода Graph Node поддерживает [идентификаторы контента v0 и v1](https://docs.ipfs.tech/concepts/content-addressing/), а также идентификаторы контента с каталогами (например, `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci). /metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). -Example: +Пример: ```typescript import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//Этот пример кода предназначен для сборщика субграфа Crypto. Приведенный выше хеш ipfs представляет собой каталог с метаданными токена для всех NFT криптоковена. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -1116,7 +1174,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + //Это создает путь к метаданным для одного сборщика NFT Crypto. Он объединяет каталог с "/" + filename + ".json" token.ipfsURI = tokenIpfsHash @@ -1129,50 +1187,50 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +Это создаст новый источник данных файла, который будет опрашивать настроенную конечную точку IPFS или Arweave Graph Node, повторяя попытку, если она не найдена. Когда файл будет найден, будет выполнен обработчик источника данных файла. -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. +В этом примере CID используется для поиска между родительской сущностью `Token` и результирующей сущностью `TokenMetadata`. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Раньше это была точка, в которой разработчик субграфа вызывал `ipfs.cat(CID)` для извлечения файла -Congratulations, you are using file data sources! +Поздравляем, вы используете файловые источники данных! -#### Deploying your subgraphs +#### Развертывание субграфов -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +Теперь вы можете `build` (построить) и ` deploy` (развернуть) свой субграф на любом узле The Graph >=v0.30.0-rc.0. -#### Limitations +#### Ограничения -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +Обработчики и сущности файловых источников данных изолированы от других сущностей субграфа, что гарантирует их детерминированность при выполнении и исключает загрязнение источников данных на чейн-основе. В частности: -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers +- Сущности, созданные с помощью файловых источников данных, неизменяемы и не могут быть обновлены +- Обработчики файловых источников данных не могут получить доступ к сущностям из других файловых источников данных +- Объекты, связанные с источниками данных файлов, не могут быть доступны обработчикам на чейн-основе -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> Хотя это ограничение не должно вызывать проблем в большинстве случаев, для некоторых оно может вызвать сложности. Если у вас возникли проблемы с моделированием ваших файловых данных в субграфе, свяжитесь с нами через Discord! -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. +Кроме того, невозможно создать источники данных из файлового источника данных, будь то источник данных onchain или другой файловый источник данных. Это ограничение может быть снято в будущем. -#### Best practices +#### Лучшие практики -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. +Если вы связываете метаданные NFT с соответствующими токенами, используйте хэш IPFS метаданных для ссылки на сущность Metadata из сущности Token. Сохраните сущность Metadata, используя хэш IPFS в качестве идентификатора. -You can use [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. +Вы можете использовать [Контекст источника данных](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) при создании файловых источников данных для передачи дополнительной информации, которая будет доступна обработчику файлового источника данных. -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. +Если у вас есть сущности, которые обновляются несколько раз, создайте уникальные сущности на основе файлов, используя хэш IPFS и идентификатор сущности, и ссылайтесь на них, используя производное поле в сущности на чейн-основе. -> We are working to improve the above recommendation, so queries only return the "most recent" version +> Мы работаем над улучшением приведенной выше рекомендации, поэтому запросы возвращают только "самую последнюю" версию -#### Known issues +#### Известные проблемы -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. +Файловые источники данных в настоящее время требуют ABI, даже если ABI не используются ([проблема](https://github.com/graphprotocol/graph-cli/issues/961)). Обходным решением является добавление любого ABI. -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. +Обработчики для файловых источников данных не могут находиться в файлах, которые импортируют привязки контракта `eth_call`, с ошибкой "unknown import: `ethereum::ethereum.call` has not been defined" ([проблема](https://github.com/graphprotocol/graph-cli/issues/4309)). Обходным решением является создание обработчиков файловых источников данных в специальном файле. #### Примеры -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) +[Миграция субграфа Crypto Coven](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) #### Ссылки -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) +[Источники данных GIP-файла](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/ru/developing/developer-faqs.mdx b/website/pages/ru/developing/developer-faqs.mdx index 7767f45f0eb6..c266c4383068 100644 --- a/website/pages/ru/developing/developer-faqs.mdx +++ b/website/pages/ru/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } В настоящее время рекомендуемым подходом для децентрализованного приложения является добавление ключа во внешний интерфейс и предоставление его конечным пользователям. Тем не менее, Вы можете ограничить этот ключ именем хоста, например _yourdapp.io_ и субграфом. Шлюз в настоящее время управляется Edge & Node. Частью ответственности шлюза является отслеживание неправомерного поведения и блокировка трафика от вредоносных клиентов. -## 25. Где я могу найти свой текущий субграф в Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Перейдите в Hosted Service, чтобы найти субграфы, которые Вы или другие развернули в нём. Вы можете найти его [здесь](https://thegraph.com/hosted-service). -## 26. Будет ли Hosted Service взимать плату за запросы? +## 26. Will the hosted service start charging query fees? The Graph никогда не будет взимать плату за Hosted Service. The Graph — это децентрализованный протокол, и взимание платы за централизованную услугу не соответствует ценностям The Graph. Hosted Service всегда был временным шагом, помогающим получить доступ к децентрализованной сети. У разработчиков будет достаточно времени, чтобы перейти на децентрализованную сеть, когда им будет удобно. -## 27. Когда Hosted Service будет отключен? - -Hosted Service будет закрыт в 2023 году. Прочитайте объявление в блоге [здесь](https://thegraph.com/blog/sunsetting-hosted-service). Всем децентрализованным приложениям, использующим Hosted Service, рекомендуется перейти на децентрализованную сеть. Разработчикам доступны сетевые гранты, чтобы помочь им обновить субграф до The Graph Network. Если Ваше децентрализованное приложение обновляет субграф, Вы можете подать заявку [здесь](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. Как обновить субграф в основной сети? +## 27. How do I update a subgraph on mainnet? Если Вы являетесь разработчиком субграфа, Вы можете развернуть новую версию своего субграфа в Subgraph Studio с помощью интерфейса командной строки (CLI). На этом этапе он будет приватным, но, если Вы захотите, Вы сможете опубликовать его в децентрализованном Graph Explorer. Это создаст новую версию Вашего субграфа, на которую кураторы смогут начать подавать сигналы. diff --git a/website/pages/ru/developing/graph-ts/api.mdx b/website/pages/ru/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..6669e951f9fa --- /dev/null +++ b/website/pages/ru/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +На этой странице описаны встроенные API, которые можно использовать при написании мэппингов субграфов. По умолчанию доступны два вида API: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Референс API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Низкоуровневые примитивы для перевода между системами различных типов, таких как Ethereum, JSON, GraphQL и AssemblyScript. + +### Версии + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Версия | Примечания к релизу | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Встроенные типы + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Создание объектов + +Ниже приведен общий шаблон для создания объектов из событий Ethereum. + +```typescript +// Импорт класса событий Transfer, сгенерированного из ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Импорт типа объекта Transfer, сгенерированного из схемы GraphQL +import { Transfer } from '../generated/schema' +событие +// Обработчик события передачи +экспортирует функцию handleTransfer(event: TransferEvent): void { + // Создание объекта Transfer, с использованием хеша транзакции в качестве идентификатора объекта +let id = event.transaction.hash + let transfer = new Transfer(id) + + // Установка свойства объекта, с использованием параметров события +transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Сохранение объекта в хранилище +transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Каждый объект должен иметь уникальный идентификатор, чтобы избежать коллизий с другими объектами. Довольно часто параметры события включают уникальный идентификатор, который может быть использован. Примечание: Использование хэша транзакции в качестве идентификатора предполагает, что никакие другие события в той же транзакции не создают объекты с этим хэшем в качестве идентификатора. + +#### Загрузка объектов из хранилища + +Если объект уже существует, его можно загрузить из хранилища следующим образом: + +```typescript +let id = event.transaction.hash // или некоторым образом создается идентификатор +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Используйте объект Transfer, как и раньше +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Поиск объектов, созданных внутри блока + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +Хранилище API облегчает поиск объектов, которые были созданы или обновлены в текущем блоке. Типичная ситуация для этого заключается в том, что один обработчик создает транзакцию из некоторого события в чейне, а более поздний обработчик хочет получить доступ к этой транзакции, если она существует. В случае, когда транзакция не была осуществлена, субграфу придется обратиться к базе данных только для того, чтобы узнать, что объект не существует; если же автор субграфа знает, что объект, должен был быть создан в том же блоке, использование loadInBlock позволит избежать этого обхода базы данных. Для некоторых субграфов эти пропущенные запросы могут значительно увеличить время индексации. + +```typescript +let id = event.transaction.hash // или некоторым образом создается идентификатор +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Используйте объект Transfer, как и раньше +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Поиск производных объектов + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +Это позволяет загружать поля производных объектов из обработчика событий. Например, учитывая следующую схему: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Загрузите объекты токена, связанные с данным держателем +let tokens = holder.tokens.load() +``` + +#### Обновление существующих объектов + +Существует два способа обновить существующий объект: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Изменение свойств в большинстве случаев не вызывает затруднений благодаря сгенерированным установщикам свойств: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +Также можно сбросить свойства с помощью одной из следующих двух инструкций: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Это не сработает +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// Это сработает +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Удаление объектов из хранилища + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +Ethereum API предоставляет доступ к смарт-контрактам, общедоступным переменным состояния, функциям контрактов, событиям, транзакциям, блокам и кодированию/декодированию данных Ethereum. + +#### Поддержка типов Ethereum + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +Следующий пример иллюстрирует это. С учётом схемы субграфа, такой как + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### События и данные о блоках/транзакциях + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Доступ к состоянию смарт-контракта + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +Распространенным шаблоном является доступ к контракту, из которого исходит событие. Это достигается с помощью следующего кода: + +```typescript +// Импорт сгенерированного класса контракта и сгенерированного класса события Transfer +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Импорт созданного класса объекта +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Привязка контракта к адресу, сгенерировавшему событие + let contract = ERC20Contract.bind(event.address) + + // Доступ к переменным состояния и функциям путем их вызова + пусть erc20Symbol = контракт.символ() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Любой другой контракт, который является частью субграфа, может быть импортирован из сгенерированного кода и привязан к действительному адресу. + +#### Обработка возвращенных вызовов + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Обратите внимание, что Graph Node, подключенная к клиенту Geth или Infura, может обнаруживать не все откаты. Если Вы полагаетесь на это, мы рекомендуем использовать Graph Node, подключенную к клиенту Parity. + +#### Кодирование/декодирование ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +Для получения дополнительной информации: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Регистрация API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Регистрация одного или нескольких значений + +##### Регистрация одного значения + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Отображает: "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Регистрация одной записи из существующего массива + +В приведенном ниже примере регистрируется только первое значение массива аргументов, несмотря на то, что массив содержит три значения. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Отображает : "My value is: A" (Несмотря на то, что в `log.info` передаются три значения) + log.info('My value is: {}', myArray) +} +``` + +#### Регистрация нескольких записей из существующего массива + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Отображает : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Регистрация конкретной записи из существующего массива + +Чтобы отобразить определенное значение в массиве, необходимо указать индексированное значение. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Отображает : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Регистрация информации о событии + +В приведенном ниже примере регистрируется номер блока, хэш блока и хэш транзакции из события: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +При наличии хеша или пути IPFS чтение файла из IPFS выполняется следующим образом: + +```typescript +// Поместите это в обработчик события в мэппинге +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Пути, подобные `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile`, +// которые включают файлы в директориях, также поддерживаются +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // Смотрите документацию по JsonValue для получения подробной информации о работе + // со значениями JSON + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Обратные вызовы также могут создавать объекты + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Установите для родителя значение "parentId" + newitem.save() +} + +// Поместите это внутри обработчика событий в мэппинге +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// В качестве альтернативы, используйте `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Справка по преобразованию типов + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ----------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() или s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() или s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Метаданные источника данных + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Объект и DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ru/developing/graph-ts/common-issues.mdx b/website/pages/ru/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..127d5d70548f --- /dev/null +++ b/website/pages/ru/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Распространенные проблемы с AssemblyScript +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ru/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/ru/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..6dd521a3cfb6 100644 --- a/website/pages/ru/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/ru/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: Часто задаваемые вопросы о субграфах, работающих на основе субпотоков (Substreams) --- -## What are Substreams? +## Что такое субпотоки? Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. Go to the [Substreams Documentation](/substreams) to learn more about Substreams. -## What are Substreams-powered subgraphs? +## Что такое субграфы, работающие на основе Субпотоков? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +[Субграфы, работающие на основе Субпотоков](/cookbook/substreams-powered-subgraphs/) сочетают в себе мощь Субпотоков с возможностью запроса субграфов. При публикации Субграфа, работающего на основе Субпотоков, данные, полученные в результате преобразований Субпотоков, могут [выводить изменения объекта](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), совместимые с объектами субграфа. -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +Если Вы уже знакомы с разработкой субграфов, обратите внимание на то, что к тому же можно запрашивать субграфы, работающие на основе Субпотоков, точно так же, как если бы они были созданы на уровне преобразования AssemblyScript, со всеми преимуществами Субграфа, такими как предоставление динамического и гибкого GraphQL API. -## How are Substreams-powered subgraphs different from subgraphs? +## Чем субграфы, работающие на основе Субпотоков, отличаются от субграфов? -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +Субграфы состоят из источников данных, которые определяют события в цепочке и то, как эти события должны быть преобразованы с помощью обработчиков, написанных на Assemblyscript. Эти события обрабатываются последовательно в зависимости от того, в каком порядке события происходят в цепочке. -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +Напротив, субграфы, работающие на основе субпотоков, имеют один источник данных, который ссылается на пакет субпотоков, обрабатываемый the Graph Node. Субпотоки имеют доступ к дополнительным детализированным данным в цепочке по сравнению с обычными субграфами, а также могут извлекать выгоду из массового распараллеливания обработки, что может подразумевать гораздо более быстрое время обработки. -## What are the benefits of using Substreams-powered subgraphs? +## Каковы преимущества использования субграфов, работающих на основе Субпотоков? Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. -## What are the benefits of Substreams? +## В чем преимущества Субпотоков? -There are many benefits to using Substreams, including: +Использование Субпотоков имеет много преимуществ, в том числе: -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- Компонуемость: Вы можете объединять модули Субпотоков, как блоки LEGO, и опираться на модули сообщества, дополнительно уточняя общедоступные данные. -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- Высокопроизводительное индексирование: индексирование на порядки быстрее благодаря крупномасштабным кластерам параллельных операций (как пример, BigQuery). -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- Возможность загружать куда угодно: Загружайте Ваши данные в любое удобное для Вас место: PostgreSQL, MongoDB, Kafka, субграфы, плоские файлы, Google Sheets. -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- Программируемость: Используйте код для настройки извлечения, выполнения агрегирования во время преобразования и моделирования выходных данных для нескольких приемников. -- Access to additional data which is not available as part of the JSON RPC +- Доступ к дополнительным данным, недоступным в составе JSON RPC -- All the benefits of the Firehose. +- Все преимущества Firehose. -## What is the Firehose? +## Что такое Firehose? -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +Firehose, разработанный [StreamingFast](https://www.streamingfast.io/), представляет собой уровень извлечения данных блокчейна, разработанный с нуля для обработки полной истории блокчейнов на ранее невиданных скоростях. Обеспечивая подход, основанный на файлах и потоковой передаче, он является основным компонентом пакета технологий Streamingfast с открытым исходным кодом и основой для Субпотоков. -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Перейдите к [documentation](https://firehose.streamingfast.io/), чтобы узнать больше о Firehose. -## What are the benefits of the Firehose? +## В чем преимущества Firehose? -There are many benefits to using Firehose, including: +Использование Firehose имеет много преимуществ, в том числе: -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- Наименьшая задержка и отсутствие опроса: В режиме потоковой передачи узлы Firehose спроектированы таким образом, чтобы первыми передавать данные блока. -- Prevents downtimes: Designed from the ground up for High Availability. +- Предотвращает простои: Разработан с нуля для обеспечения высокой доступности. -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- Никогда не пропустите ни одного момента: Курсор потока Firehose предназначен для обработки форков и продолжения работы с того места, где Вы остановились, в любых условиях. -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- Богатейшая модель данных:  Лучшая модель данных, которая включает изменения баланса, полное дерево вызовов, внутренние транзакции, логи, изменения в хранилище, затраты на газ и многое другое. -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- Использует плоские файлы: Данные блокчейна извлекаются в плоские файлы — самый дешевый и наиболее оптимизированный доступный вычислительный ресурс. -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## Где разработчики могут получить доступ к дополнительной информации о субграфах работающих на основе Субпотоков и о Субпотоках? The [Substreams documentation](/substreams) will teach you how to build Substreams modules. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +В документации [Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) показано, как упаковать их для развертывания в The Graph. -## What is the role of Rust modules in Substreams? +## Какова роль модулей Rust в Субпотоках? -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +Модули Rust - это эквивалент мапперов AssemblyScript в субграфах. Они компилируются в WASM аналогичным образом, но модель программирования допускает параллельное выполнение. Они определяют, какие преобразования и агрегации необходимо применить к необработанным данным блокчейна. See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. -## What makes Substreams composable? +## Что делает Субпотоки компонуемыми? -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +При использовании Субпотоков компоновка происходит на уровне преобразования, что позволяет повторно использовать кэшированные модули. -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +Например, Алиса может создать ценовой модуль DEX, Боб может использовать его для создания агрегатора объемов для некоторых интересующих его токенов, а Лиза может объединить четыре отдельных ценовых модуля DEX, чтобы создать ценовой оракул. Один запрос Субпотоков упакует все эти отдельные модули, свяжет их вместе, чтобы предложить гораздо более уточненный поток данных. Затем этот поток может быть использован для заполнения субграфа и запрашиваться потребителями. -## How can you build and deploy a Substreams-powered Subgraph? +## Как Вы можете создать и развернуть субграф, работающий на основе Субпотоков? -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +После [defining](/cookbook/substreams-powered-subgraphs/) субграфа, работающего на основе Субпотоков, Вы можете использовать the Graph CLI для его развертывания в [Subgraph Studio]\(https://thegraph.com/studio /). -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## Где я могу найти примеры Субпотоков и субграфов, работающих на основе Субпотоков? -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +Вы можете посетить [этот репозиторий на Github](https://github.com/pinax-network/awesome-substreams), чтобы найти примеры Субпотоков и субграфов, работающих на основе Субпотоков. -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## Что означают Субпотоки и субграфы, работающие на основе Субпотоков, для сети The Graph? -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +Интеграция обещает множество преимуществ, включая чрезвычайно высокопроизводительную индексацию и большую компонуемость за счет использования модулей сообщества и развития на их основе. diff --git a/website/pages/ru/developing/supported-networks.json b/website/pages/ru/developing/supported-networks.json index 5e12392b8c7d..bfbadf75f718 100644 --- a/website/pages/ru/developing/supported-networks.json +++ b/website/pages/ru/developing/supported-networks.json @@ -1,9 +1,9 @@ { "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "cliName": "Название CLI", + "chainId": "ID чейна", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "Децентрализованная сеть", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/ru/developing/supported-networks.mdx b/website/pages/ru/developing/supported-networks.mdx index c10bfff67847..daba34911b4b 100644 --- a/website/pages/ru/developing/supported-networks.mdx +++ b/website/pages/ru/developing/supported-networks.mdx @@ -9,16 +9,16 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +Полный список функций, поддерживаемых в децентрализованной сети, можно найти на [этой странице](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). -Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. +Субграфы, работающие на основе субпотоков, индексирующие `mainnet` Ethereum, поддерживаются в Subgraph Studio и децентрализованной сети. ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +Если предпочитаемая Вами сеть не поддерживается в децентрализованной сети The Graph, Вы можете запустить свой собственный [Graph Node](https://github.com/graphprotocol/graph-node) для индексирования любой сети, совместимой с EVM. Убедитесь, что используемая Вами [версия](https://github.com/graphprotocol/graph-node/releases) поддерживает сеть, и у Вас есть необходимая конфигурация. -Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. +Graph Node также может индексировать другие протоколы посредством интеграции с Firehose. Интеграция Firehose была создана для сетей на базе NEAR, Arweave и Cosmos. diff --git a/website/pages/ru/developing/unit-testing-framework.mdx b/website/pages/ru/developing/unit-testing-framework.mdx index 032919ee6554..c1b2101a874b 100644 --- a/website/pages/ru/developing/unit-testing-framework.mdx +++ b/website/pages/ru/developing/unit-testing-framework.mdx @@ -1,30 +1,30 @@ --- -title: Unit Testing Framework +title: Фреймворк модульного тестирования --- -Matchstick is a unit testing framework, developed by [LimeChain](https://limechain.tech/), that enables subgraph developers to test their mapping logic in a sandboxed environment and deploy their subgraphs with confidence! +Matchstick - это фреймворк модульного тестирования, разработанный компанией [LimeChain](https://limechain.tech/), который позволяет разработчикам субграфов тестировать логику мэппинга в изолированной среде и уверенно развертывать свои субграфы! -## Getting Started +## Начало работы -### Install dependencies +### Установка зависимостей -In order to use the test helper methods and run the tests, you will need to install the following dependencies: +Чтобы использовать вспомогательные методы тестирования и запускать тесты, Вам необходимо будет установить следующие зависимости: ```sh yarn add --dev matchstick-as ``` -❗ `graph-node` depends on PostgreSQL, so if you don't already have it, you will need to install it. We highly advise using the commands below as adding it in any other way may cause unexpected errors! +❗ `graph-node` зависит от PostgreSQL, поэтому, если у Вас его еще нет, Вам нужно будет его установить. Мы настоятельно рекомендуем использовать приведенные ниже команды, так как добавление их любым другим способом может привести к непредвиденным ошибкам! #### MacOS -Postgres installation command: +Команда установки Postgres: ```sh brew install postgresql ``` -Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` +Создайте символическую ссылку на последнюю версию libpq.5.lib _Возможно, сначала Вам потребуется создать этот каталог_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -32,7 +32,7 @@ ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/o #### Linux -Postgres installation command (depends on your distro): +Команда установки Postgres (зависит от вашего дистрибутива): ```sh sudo apt install postgresql @@ -40,25 +40,25 @@ sudo apt install postgresql ### WSL (Windows Subsystem for Linux) -You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like +Вы можете использовать Matchstick в WSL как с помощью подхода Docker, так и с помощью бинарного подхода. Поскольку WSL может быть немного сложной задачей, вот несколько советов на случай, если Вы столкнетесь с такими проблемами, как ``` static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = ``` -or +или ``` /node_modules/gluegun/build/index.js:13 throw up; ``` -Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running +Пожалуйста, убедитесь, что используете более новую версию Node.js. graph-cli больше не поддерживает **v10.19.0** и по-прежнему является версией по умолчанию для новых образов Ubuntu на WSL. Например, подтверждено, что Matchstick работает на WALL с **v18.1.0**. Вы можете переключиться на него либо через **nvm**, либо, если обновите свой глобальный Node.js. Не забудьте удалить `node_modules` и повторно запустить `npm install` после обновления nodejs! Затем убедитесь, что у Вас установлена **libpq**. Это можно сделать, запустив ``` sudo apt-get install libpq-dev ``` -And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as +И, наконец, не применяйте `graph test` (который использует Вашу глобальную установку graph-cli и по какой-то причине в настоящее время выглядит так, как будто он не работает в WSL). Вместо этого примените `yarn test` или `npm run test` (который будет использовать локальный экземпляр graph-cli на уровне проекта, который работает отлично). Для этого Вам, конечно, понадобится скрипт `"test"` в файле `package.json`, который может быть довольно простым, например ```json { @@ -76,57 +76,57 @@ And finally, do not use `graph test` (which uses your global installation of gra } ``` -### Usage +### Применение -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +Чтобы использовать **Matchstick** в своём проекте subgraph, просто откройте терминал, перейдите в корневую папку своего проекта и запустите `graph test [options] ` - он загрузит последний двоичный файл **Matchstick** и запустит указанный тест или все тесты в тестовой папке (или все существующие тесты, если флаг источника данных не указан). -### CLI options +### Параметры CLI -This will run all tests in the test folder: +Это запустит все тесты в тестовой папке: ```sh graph test ``` -This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: +Это запустит тест с именем gravity.test.ts и/или все тесты внутри папки с именем gravity: ```sh graph test gravity ``` -This will run only that specific test file: +Это запустит только конкретный тестовый файл: ```sh graph test path/to/file.test.ts ``` -**Options:** +**Параметры:** ```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used +-c, --coverage Запускает тесты в режиме покрытия +-d, --docker Запускает тесты в docker-контейнере (Примечание: пожалуйста, выполняйте из корневой папки субграфа) +-f, --force Binary: повторно загружает двоичный файл. Docker: Повторно загружает файл Docker и перестраивает образ docker +-h, --help Показывает информацию об использовании +-l, --logs Выводит на консоль информацию об операционной системе, модели процессора и URL-адресе загрузки (в целях отладки) +-r, --recompile Принудительно перекомпилирует тесты +-v, --version Выберите версию бинарного файла rust, которую хотите загрузить/использовать ``` ### Docker -From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. +Из `graph-cli 0.25.2` команда `graph test` поддерживает запуск `matchstick` в контейнере docker с флагом `-d`. Реализация docker использует [bind mount](https://docs.docker.com/storage/bind-mounts /), чтобы не приходилось перестраивать образ docker каждый раз, когда выполняется команда `graph test -d`. В качестве альтернативы Вы можете следовать инструкциям из репозитория [matchstick](https://github.com/LimeChain/matchstick#docker-) для запуска docker вручную. -❗ If you have previously ran `graph test` you may encounter the following error during docker build: +❗ Если Вы ранее запускали `graph test`, Вы можете столкнуться со следующей ошибкой во время сборки docker: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` +В этом случае создайте в корневой папке `.dockerignore` и добавьте `node_modules/binary-install-raw/bin` -### Configuration +### Конфигурация -Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: +Matchstick можно настроить на использование пользовательских тестов, библиотек и пути к манифесту через файл конфигурации `matchstick.yaml`: ```yaml testsFolder: path/to/tests @@ -134,27 +134,27 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Демонстрационный субграф -You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) +Вы можете попробовать и поиграть с примерами из этого руководства, клонировав [Демонстрационный репозиторий субграфов](https://github.com/LimeChain/demo-subgraph) -### Video tutorials +### Видеоуроки -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Также Вы можете посмотреть серию видеороликов [>"Как использовать Matchstick для написания модульных тестов для Ваших субграфов"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) -## Tests structure (>=0.5.0) +## Структура тестов (>=0.5.0) -_**IMPORTANT: Requires matchstick-as >=0.5.0**_ +_**ВАЖНО: Требуется matchstick-as >=0.5.0**_ ### describe() -`describe(name: String , () => {})` - Defines a test group. +`describe(name: String , () => {})` - Определяет тестовую группу. -**_Notes:_** +**_Примечания:_** -- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ +- _Описания не являются обязательными. Вы по-прежнему можете использовать test() как и раньше, вне блоков describe()_ -Example: +Пример: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -167,7 +167,7 @@ describe("handleNewGravatar()", () => { }) ``` -Nested `describe()` example: +Пример вложенной функции `describe()`: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -192,9 +192,9 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. +`test(name: String, () =>, should_fail: bool)` - Определяет тестовый пример. Вы можете использовать test() внутри блоков describe() или независимо друг от друга. -Example: +Пример: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -207,7 +207,7 @@ describe("handleNewGravatar()", () => { }) ``` -or +или ```typescript test("handleNewGravatar() should create a new entity", () => { @@ -221,11 +221,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. +Запускает блок кода перед любым из тестов в файле. Если `beforeAll` объявлен внутри блока `describe`, он запускается в начале этого блока `describe`. -Examples: +Примеры: -Code inside `beforeAll` will execute once before _all_ tests in the file. +Код внутри `beforeAll` будет выполнен один раз перед _всеми_ тестами в файле. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -252,7 +252,7 @@ describe("When entity already exists", () => { }) ``` -Code inside `beforeAll` will execute once before all tests in the first describe block +Код внутри `beforeAll` будет выполняться один раз перед всеми тестами в первом блоке описания ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -281,11 +281,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. +Запускает блок кода после выполнения всех тестов в файле. Если `afterAll` объявлен внутри блока `describe`, он запускается в конце этого блока `describe`. -Example: +Пример: -Code inside `afterAll` will execute once after _all_ tests in the file. +Код внутри `afterAll` будет выполнен один раз после _всех_ тестов в файле. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -310,7 +310,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -Code inside `afterAll` will execute once after all tests in the first describe block +Код внутри `afterAll` будет выполнен один раз после всех тестов в первом блоке описания ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" @@ -342,9 +342,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. +Запускает блок кода перед каждым тестом. Если `beforeEach` объявлен внутри блока `describe`, он запускается перед каждым тестом в этом блоке `describe`. -Examples: Code inside `beforeEach` will execute before each tests. +Примеры: Код внутри `beforeEach` будет выполняться перед каждым тестированием. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -367,7 +367,7 @@ describe("handleNewGravatars, () => { ... ``` -Code inside `beforeEach` will execute only before each test in the that describe +Код внутри `beforeEach` будет выполняться только перед каждым тестом в описании ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -384,7 +384,7 @@ describe('handleUpdatedGravatars', () => { test('Upates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // код, который должен обновить отображаемое имя до "1st Gravatar" + // код, который должен обновить displayName до 1-го Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -393,7 +393,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // код, который должен изменить URL-адрес изображения на https://www.gravatar.com/avatar/0x0 + // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -405,11 +405,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. +Запускает блок кода после каждого теста. Если `afterEach` объявлен внутри блока `describe`, он запускается после каждого теста в этом блоке `describe`. -Examples: +Примеры: -Code inside `afterEach` will execute after every test. +Код внутри `afterEach` будет выполняться после каждого теста. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -433,7 +433,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // код, который должен обновить отображаемое имя до "1st Gravatar" + // код, который должен обновить displayName до 1-го Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -441,18 +441,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // код, который должен изменить URL-адрес изображения на https://www.gravatar.com/avatar/0x0 + // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) - -Text -XPath: /pre[24]/code -File: unit-testing-framework.mdx ``` -Code inside `afterEach` will execute after each test in that describe +Код внутри `afterEach` будет выполняться после каждого теста в этом описании ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -477,7 +473,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // код, который должен обновить отображаемое имя до "1st Gravatar" + // код, который должен обновить displayName до 1-го Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -485,14 +481,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // код, который должен изменить URL-адрес изображения на https://www.gravatar.com/avatar/0x0 + // код, который должен изменить imageUrl на https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -## Asserts +## Утверждения ```typescript fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) @@ -526,11 +522,11 @@ assertNotNull(value: T) entityCount(entityType: string, expectedCount: i32) ``` -## Write a Unit Test +## Напишите юнит-тест -Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Давайте посмотрим, как будет выглядеть простой юнит-тест, используя примеры Gravatar в [Демонстрационном субграфе](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). -Assuming we have the following handler function (along with two helper functions to make our life easier): +Предположим, у нас есть следующая функция-обработчик (наряду с двумя вспомогательными функциями, облегчающими нашу жизнь): ```typescript export function handleNewGravatar(event: NewGravatar): void { @@ -581,7 +577,7 @@ export function createNewGravatarEvent( } ``` -We first have to create a test file in our project. This is an example of how that might look like: +Сначала мы должны создать тестовый файл в нашем проекте. Вот пример того, как это могло бы выглядеть: ```typescript import { clearStore, test, assert } from 'matchstick-as/assembly/index' @@ -590,23 +586,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Create a test entity and save it in the store as initial state (optional) + // Создайте тестовый объект и сохраните его в хранилище как исходное состояние (необязательно) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Create mock events + // Создайте фиктивные события let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Call mapping functions passing the events we just created + // Вызовите функции мэппинга, передающие события, которые мы только что создали handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Assert the state of the store + // Подтвердите состояние хранилища assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Clear the store in order to start the next test off on a clean slate + // Очистите хранилище, чтобы начать следующий тест с чистого листа clearStore() }) @@ -615,38 +611,38 @@ test('Next test', () => { }) ``` -That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: +Предстоит очень многое распаковать! Прежде всего, важно отметить, что мы импортируем данные из `matchstick-as`, нашей вспомогательной библиотеки AssemblyScript (распространяемой как модуль npm). Репозиторий Вы можете найти [здесь](https://github.com/LimeChain/matchstick-as). `matchstick-as` предоставляет нам полезные методы тестирования, а также определяет функцию `test()`, которую мы будем использовать для построения наших тестовых блоков. В остальном все довольно просто - вот что происходит: -- We're setting up our initial state and adding one custom Gravatar entity; -- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; -- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; -- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; -- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. +- Мы настраиваем наше исходное состояние и добавляем один пользовательский объект Gravatar; +- Мы определяем два объекта события `NewGravatar` вместе с их данными, используя функцию `create New Gravatar Event()`; +- Мы вызываем методы-обработчики этих событий - `обрабатываем новые Gravatars()` и передаем список наших пользовательских событий; +- Мы утверждаем состояние хранилища. Как это происходит? - Мы передаем уникальную комбинацию типа объекта и идентификатора. Затем мы проверяем конкретное поле в этом объекте и утверждаем, что оно имеет то значение, которое мы ожидаем от него получить. Мы делаем это как для исходного объекта Gravatar, который мы добавили в хранилище, так и для двух объектов Gravatar, которые добавляются при вызове функции-обработчика; +- И, наконец, мы очищаем хранилище с помощью `clear Store()`, чтобы наш следующий тест можно было начать с нового и пустого объекта хранилища. Мы можем определить столько тестовых блоков, сколько захотим. -There we go - we've created our first test! 👏 +Вот и все - мы создали наш первый тест! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Теперь, чтобы запустить наши тесты, Вам просто нужно запустить в корневой папке своего субграфа следующее: `graph test Gravity` -And if all goes well you should be greeted with the following: +И если все пойдет хорошо, Вы увидите следующее приветствие: -![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) +![Matchstick с надписью “Все тесты пройдены!”](/img/matchstick-tests-passed.png) -## Common test scenarios +## Распространенные сценарии тестирования -### Hydrating the store with a certain state +### Наполнение хранилища до определенного состояния -Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: +Пользователи могут наполнять хранилище известным набором объектов. Вот пример инициализации хранилища с помощью объекта Gravatar: ```typescript let gravatar = new Gravatar('entryId') gravatar.save() ``` -### Calling a mapping function with an event +### Вызов функции мэппинга с помощью события -A user can create a custom event and pass it to a mapping function that is bound to the store: +Пользователь может создать пользовательское событие и передать его функции мэппинга, привязанной к хранилищу: ```typescript import { store } from 'matchstick-as/assembly/store' @@ -658,9 +654,9 @@ let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01 handleNewGravatar(newGravatarEvent) ``` -### Calling all of the mappings with event fixtures +### Вызов всех мэппингов с фиксированными событиями -Users can call the mappings with test fixtures. +Пользователи могут вызывать мэппинги с помощью тестовых наборов данных. ```typescript import { NewGravatar } from '../../generated/Gravity/Gravity' @@ -682,9 +678,9 @@ export function handleNewGravatars(events: NewGravatar[]): void { } ``` -### Mocking contract calls +### Имитация вызовов контракта -Users can mock contract calls: +Пользователи могут имитировать вызовы контракта: ```typescript import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' @@ -704,9 +700,9 @@ let result = gravity.gravatarToOwner(bigIntParam) assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) ``` -As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. +Как было продемонстрировано, для того, чтобы имитировать вызов контракта и хардкор возвращаемого значения, пользователь должен предоставить адрес контракта, имя функции, сигнатуру функции, массив аргументов и, конечно же, возвращаемое значение. -Users can also mock function reverts: +Пользователи также могут имитировать возврат функций: ```typescript let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') @@ -715,20 +711,20 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri .reverts() ``` -### Mocking IPFS files (from matchstick 0.4.1) +### Имитация файлов IPFS (из matchstick 0.4.1) -Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. +Пользователи могут имитировать файлы IPFS с помощью функции `mockIpfsFile(hash, filePath)`. Функция принимает два аргумента, первый из которых - хэш/путь к файлу IPFS, а второй - путь к локальному файлу. -NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: +ПРИМЕЧАНИЕ: При тестировании `ipfs.map/ipfs.mapJSON` функция обратного вызова должна быть экспортирована из тестового файла, чтобы matchstck мог ее обнаружить, подобно функции `processGravatar()` в приведенном ниже примере теста: -`.test.ts` file: +Файл `.test.ts`: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Export ipfs.map() callback in order for matchstck to detect it +// Экспортируйте обратный вызов ipfs.map(), чтобы matchstick мог его обнаружить export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -758,7 +754,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` file: +Файл `utils.ts`: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -766,10 +762,10 @@ import { Gravatar } from "../../generated/schema" ... -// ipfs.map callback +// обратный вызов ipfs.map export function processGravatar(value: JSONValue, userData: Value): void { - // See the JSONValue documentation for details on dealing - // with JSON values + // Смотрите документацию по JsonValue для получения подробной информации о работе + // со значениями JSON let obj = value.toObject() let id = obj.get('id') @@ -777,13 +773,13 @@ export function processGravatar(value: JSONValue, userData: Value): void { return } - // Callbacks can also created entities + // Обратные вызовы также могут создавать объекты let gravatar = new Gravatar(id.toString()) gravatar.displayName = userData.toString() + id.toString() gravatar.save() } -// function that calls ipfs.cat +// функция, которая вызывает ipfs.cat export function gravatarFromIpfs(): void { let rawData = ipfs.cat("ipfsCatfileHash") @@ -806,9 +802,9 @@ export function gravatarFromIpfs(): void { } ``` -### Asserting the state of the store +### Подтверждение состояния хранилища -Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: +Пользователи могут утверждать конечное (или промежуточное) состояние хранилища с помощью утверждающих объектов. Для этого пользователь должен указать тип объекта, конкретный идентификатор объекта, имя поля этого объекта и ожидаемое значение поля. Вот небольшой пример: ```typescript import { assert } from 'matchstick-as/assembly/index' @@ -820,38 +816,38 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. +Запуск функции assert.field Equals() проверит соответствие данного поля заданному ожидаемому значению. Тест завершится неудачей, и будет выведено сообщение об ошибке, если значения **НЕ** равны. В противном случае тест пройдет успешно. -### Interacting with Event metadata +### Взаимодействие с метаданными событий -Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: +Пользователи могут по умолчанию использовать метаданные транзакции, которые могут быть возвращены в виде ethereum.Event с помощью функции `new MockEvent()`. В следующем примере показано, как можно считывать/записывать данные в эти поля объекта Event: ```typescript -// Read +// Чтение let logType = newGravatarEvent.logType -// Write +// Запись let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) ``` -### Asserting variable equality +### Утверждение равенства переменных ```typescript assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Asserting that an Entity is **not** in the store +### Утверждение о том, что объект **отсутствует** в хранилище -Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: +Пользователи могут утверждать, что объект отсутствует в хранилище. Функция принимает тип объекта и идентификатор. Если объект действительно находится в хранилище, тест завершится неудачей с соответствующим сообщением об ошибке. Вот краткий пример использования этой функции: ```typescript assert.notInStore('Gravatar', '23') ``` -### Printing the whole store (for debug purposes) +### Распечатка всего хранилища (в целях отладки) -You can print the whole store to the console using this helper function: +С помощью этой вспомогательной функции можно вывести всё хранилище на консоль: ```typescript import { logStore } from 'matchstick-as/assembly/store' @@ -859,9 +855,9 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -### Expected failure +### Ожидаемый сбой -Users can have expected test failures, using the shouldFail flag on the test() functions: +Пользователи могут ожидать сбоев тестирования, используя флаг shouldFail в функциях test(): ```typescript test( @@ -873,11 +869,11 @@ test( ) ``` -If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. +Если тест помечен как shouldFail = true, но НЕ завершается неудачей, это отобразится как ошибка в логах, и тестовый блок завершится неудачей. Также, если он помечен как shouldFail = false (состояние по умолчанию), произойдет сбой тестового исполнителя. -### Logging +### Логирование (ведение журналов) -Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: +Наличие пользовательских логов в модульных тестах - это точно то же самое, что логирование в мэппингах. Разница заключается в том, что объект лога необходимо импортировать из matchstick-as, а не из graph-ts. Вот простой пример со всеми некритическими типами логов: ```typescript import { test } from "matchstick-as/assembly/index"; @@ -900,7 +896,7 @@ test("Warning", () => { }); ``` -Users can also simulate a critical failure, like so: +Пользователи также могут имитировать критический сбой, например, так: ```typescript test('Blow everything up', () => { @@ -908,11 +904,11 @@ test('Blow everything up', () => { }) ``` -Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. +Логирование критических ошибок остановит выполнение тестов и все испортит. В конце концов, мы хотим быть уверены, что Ваш код не содержит критических логов при развертывании, и Вы сразу заметите, если это произойдет. -### Testing derived fields +### Тестирование производных полей -Testing derived fields is a feature which (as the example below shows) allows the user to set a field in a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. Important thing to note is that the first entity needs to be reloaded as the automatic update happens in the store in rust of which the AS code is agnostic. +Тестирование производных полей - это функция, которая (как показано в примере ниже) позволяет пользователю задать поле в определенном объекте и автоматически обновить другой объект, если он извлекает одно из своих полей из первого объекта. Важно отметить, что первый объект необходимо перезагрузить, поскольку автоматическое обновление происходит в хранилище в rust, от которого код AS не зависит. ```typescript test('Derived fields example test', () => { @@ -935,13 +931,13 @@ test('Derived fields example test', () => { }) ``` -### Testing dynamic data sources +### Тестирование динамических источников данных -Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). +Тестирование динамических источников данных может быть выполнено путем имитации возвращаемого значения функций `context()`, `address()` и `network()` пространства имен dataSource. В настоящее время эти функции возвращают следующее: `context()` - возвращает пустой объект (DataSourceContext), `address()` - возвращает `0x0000000000000000000000000000000000000000` `network()` - возвращает `mainnet`. Функции `create(...)` и `createWithContext(...)` замаскированы так, что они не выполняют никаких действий, поэтому их вообще не нужно вызывать в тестах. Изменения возвращаемых значений могут быть выполнены с помощью функций пространства имен `dataSourceMock` в `matchstick-as` (версия 0.3.0+). -Example below: +Пример ниже: -First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): +Во-первых, у нас есть следующий обработчик событий (который был намеренно перепрофилирован для демонстрации искусственного искажения источника данных): ```typescript export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { @@ -957,7 +953,7 @@ export function handleApproveTokenDestinations(event: ApproveTokenDestinations): } ``` -And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: +Во-вторых, у нас есть тест, использующий один из методов в пространстве имён dataSourceMock для установки нового возвращаемого значения для всех функций dataSource: ```typescript import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' @@ -990,41 +986,41 @@ test('Data source simple mocking example', () => { }) ``` -Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. +Обратите внимание, что функция DataSource Mock.resetValues() вызывается в конце. Это происходит потому, что значения запоминаются при их изменении, и их необходимо сбросить, если Вы хотите вернуться к значениям по умолчанию. -## Test Coverage +## Тестовое покрытие -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Используя **Matchstick**, разработчики субграфов могут запустить скрипт, который вычислит тестовое покрытие написанных модульных тестов. -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +Инструмент тестового покрытия берет скомпилированные тестовые двоичные файлы `wasm` и преобразует их в файлы `wat`, которые затем можно легко проверить, были ли вызваны обработчики, определенные в `subgraph.yaml`. Поскольку покрытие кода (и тестирование в целом) в AssemblyScript и WebAssembly находится на очень ранних стадиях, **Matchstick** не может проверить покрытие ветвей. Вместо этого мы полагаемся на утверждение, что если был вызван данный обработчик, то событие/функция для него были должным образом имитированы. -### Prerequisites +### Предварительные требования -To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: +Чтобы запустить функцию тестового покрытия, представленную в **Matchstick**, необходимо заранее подготовить несколько вещей: -#### Export your handlers +#### Экспортируйте свои обработчики -In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: +Для того чтобы **Matchstick** мог проверить, какие обработчики запущены, эти обработчики необходимо экспортировать из **тестового файла**. Так, например, в файле gravity.test.ts импортируется следующий обработчик: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: +Чтобы эта функция была видимой (чтобы она была включена в файл `wat` **под именем**), нам нужно также экспортировать ее, например, так: ```typescript export { handleNewGravatar } ``` -### Usage +### Применение -Once that's all set up, to run the test coverage tool, simply run: +После того как всё это будет настроено, чтобы запустить инструмент тестового покрытия, просто запустите: ```sh graph test -- -c ``` -You could also add a custom `coverage` command to your `package.json` file, like so: +Вы также можете добавить пользовательскую команду `coverage` в свой файл `package.json`, например, так: ```typescript "scripts": { @@ -1033,7 +1029,7 @@ You could also add a custom `coverage` command to your `package.json` file, like }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +При этом запустится инструмент покрытия, и в терминале Вы должны увидеть что-то вроде этого: ```sh $ graph test -c @@ -1072,17 +1068,17 @@ Test coverage: 0.0% (0/6 handlers). Global test coverage: 22.2% (2/9 handlers). ``` -### Test run time duration in the log output +### Продолжительность выполнения теста в выходных данных лога -The log output includes the test run duration. Here's an example: +Выходные данные лога включают в себя продолжительность тестового запуска. Вот пример: `[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` -## Common compiler errors +## Типичные ошибки компилятора -> Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined +> Критично: Не удалось создать WasmInstance из допустимого модуля с контекстом: неизвестный импорт: wasi_snapshot_preview1::fd_write не определен -This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/assemblyscript-api/#logging-api) +Это означает, что Вы использовали в своем коде `console.log`, который не поддерживается AssemblyScript. Пожалуйста, рассмотрите возможность использования [API логирования](/developing/assemblyscript-api/#logging-api) > ERROR TS2554: Expected ? arguments, but got ?. > @@ -1096,8 +1092,8 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. +Несовпадение в аргументах вызвано несоответствием в `graph-ts` и `matchstick-as`. Лучший способ устранить проблемы, подобные этой, - обновить всё до последней выпущенной версии. -## Feedback +## Обратная связь -If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. +Если у Вас есть какие-либо вопросы, отзывы, пожелания по функциям или Вы просто хотите связаться с нами, лучшим местом будет Graph Discord, где у нас есть выделенный канал для Matchstick под названием 🔥| unit-testing. diff --git a/website/pages/ru/firehose.mdx b/website/pages/ru/firehose.mdx index 75e4dd19ba8d..cba336e05bc9 100644 --- a/website/pages/ru/firehose.mdx +++ b/website/pages/ru/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose обеспечивает файловый и потоковый подход к обработке данных блокчейна. +![Firehose Logo](/img/firehose-logo.png) -Интеграция Firehose была создана для Ethereum (и многих EVM чейнов), NEAR, Solana, Cosmos и Arweave, а также других, находящихся в разработке. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Интеграция Graph Node была создана для нескольких чейнов, поэтому субграфы могут передавать данные из Firehose для повышения производительности и масштабируемости индексации. Firehose также поддерживает [субпотоки](/substreams), новую технологию преобразования, созданную разработчиками ядра The Graph. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Изучите [документацию Firehose](https://firehose.streamingfast.io/), чтобы узнать больше. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Начало работы + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ru/glossary.mdx b/website/pages/ru/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/ru/glossary.mdx +++ b/website/pages/ru/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ru/graphcast.mdx b/website/pages/ru/graphcast.mdx index 471723478ec2..dfa4f9b0c57a 100644 --- a/website/pages/ru/graphcast.mdx +++ b/website/pages/ru/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/ru/index.json b/website/pages/ru/index.json index fb5fffe50133..7c3bfae69569 100644 --- a/website/pages/ru/index.json +++ b/website/pages/ru/index.json @@ -11,20 +11,20 @@ "description": "Присоединяйтесь и начните работу с The Graph" }, "developerFaqs": { - "title": "Часто задаваемы вопросы для разработчиков", - "description": "Часто задаваемы вопросы" + "title": "Часто задаваемые вопросы для разработчиков", + "description": "Часто задаваемые вопросы" }, "queryFromAnApplication": { "title": "Запрос из приложения", - "description": "Узнайте как отправлять запросы из приложения" + "description": "Узнайте, как отправлять запросы из приложения" }, "createASubgraph": { "title": "Создайте субграф", "description": "Используйте Studio для создания субграфов" }, "migrateFromHostedService": { - "title": "Перенос из хостингового сервиса", - "description": "Перенос субграфов в сеть The Graph" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Создание и исследование подграфов в Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Поддерживаемые сети", - "description": "The Graph поддерживает следующие блокчейны в распределенной сети The Graph и в централизованном решении Hosted Service.", - "graphNetworkAndHostedService": "Распределенная сеть The Graph и централизованная Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "В бета-версии." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ru/managing/deprecating-a-subgraph.mdx b/website/pages/ru/managing/deprecating-a-subgraph.mdx index e6adfccad368..96c9062d8be3 100644 --- a/website/pages/ru/managing/deprecating-a-subgraph.mdx +++ b/website/pages/ru/managing/deprecating-a-subgraph.mdx @@ -1,18 +1,18 @@ --- -title: Deprecating a Subgraph +title: Отключение сабграфа --- -So you'd like to deprecate your subgraph on The Graph Explorer. You've come to the right place! Follow the steps below: +Если Вам необходимо удалить устаревший сабграф в The Graph Explorer. Вы находитесь в нужном разделе: -1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. +1. Перейдите по ссылке на адрес контракта [здесь](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) +2. Вызовите функцию `deprecateSubgraph`, используя ваш `SubgraphID` в качестве аргумента. +3. Вуаля! Ваш субграф более не будет отображаться при поиске в Graph Explorer. -Please note the following: +Пожалуйста, учтите следующее: -- The `deprecateSubgraph` function should be called by the owner's wallet. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph will be able to withdraw their signal at an average share price. -- Deprecated subgraphs will be indicated with an error message. +- Функция `deprecateSubgraph` должна быть вызвана из кошелька владельца. +- Кураторы больше не смогут сигналить на сабграф. +- Кураторы которые уже засигналили на сабграф будут иметь возможность снять их сигнал по средней стоимости доли. +- Отключенный сабграф будет помечен сообщением об ошибке. -If you interacted with the deprecated subgraph, you'll be able to find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. +Если Вам необходимо совершать действия с устаревшим сабграфом, Вы сможете найти его в профиле пользователя в подразделах "Subgraphs", "Indexing" или "Curating" соответственно. diff --git a/website/pages/ru/managing/transferring-subgraph-ownership.mdx b/website/pages/ru/managing/transferring-subgraph-ownership.mdx index 1ca1c621a9c9..880a144c111d 100644 --- a/website/pages/ru/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/ru/managing/transferring-subgraph-ownership.mdx @@ -1,39 +1,39 @@ --- -title: Transferring Subgraph Ownership +title: Передача права на владение субграфом --- -The Graph supports the transfer of the ownership of a subgraph. +The Graph поддерживает передачу права на владение субграфом. -When you deploy a subgraph to mainnet, an NFT will be minted to the address that deployed the subgraph. The NFT is based on a standard ERC721, so it can be easily transferred to different accounts. +Когда Вы развёртываете субграф в основной сети, новое NFT будет создан по адресу, по которому был развернут субграф. NFT основан на стандарте ERC721, поэтому его можно легко передавать на другие аккаунты. -Whoever owns the NFT controls the subgraph. If the owner decides to sell the NFT, or transfer it, they will no longer be able to make edits or updates to that subgraph on the network. +Тот, кто владеет NFT, контролирует субграф. Если владелец решит продать NFT или передать его, он уже не сможет вносить изменения или обновления в этот субграф в сети. -In addition to adding more flexibility to the development lifecycle, this functionality makes certain use cases more convenient, such as moving your control to a multisig or a community member creating it on behalf of a DAO. +В дополнение к тому, что эта функциональность увеличивает гибкость жизненного цикла разработки, она также делает определенные сценарии использования более удобными, такие как передача контроля к мультисигу или создание его от имени члена сообщества для DAO. -## Viewing your subgraph as an NFT +## Просмотр субграфа как NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like OpenSea: +Чтобы просмотреть свой субграф как NFT, Вы можете посетить маркетплейс NFT, например, OpenSea: ``` https://opensea.io/your-wallet-address ``` -Or a wallet explorer like **Rainbow.me**: +Или воспользоваться обозревателем кошелька, например, **Rainbow.me**: ``` https://rainbow.me/your-wallet-addres ``` -## Transferring ownership of a subgraph +## Передача права на владение субграфом -To transfer ownership of a subgraph, you can use the UI built into Subgraph Studio: +Чтобы передать право собственности на субграф, Вы можете использовать UI (пользовательский интерфейс), встроенный в Subgraph Studio: -![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) +![Передача права на владение субграфом](/img/subgraph-ownership-transfer-1.png) -And then choose the address that you would like to transfer the subgraph to: +А затем выберите адрес, на который хотели бы передать субграф: -![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) +![Передача права на владение субграфом](/img/subgraph-ownership-transfer-2.png) -You can also use the built-in UI of NFT marketplaces like OpenSea: +Вы также можете использовать встроенный UI (пользовательский интерфейс) маркетплейсов NFT, таких, как OpenSea: -![Subgraph Ownership Trasfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) +![Передача права на владение субграфом с маркетплейса NFT](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/ru/mips-faqs.mdx b/website/pages/ru/mips-faqs.mdx index 84995ef4642a..84b01a93830a 100644 --- a/website/pages/ru/mips-faqs.mdx +++ b/website/pages/ru/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Введение +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). @@ -94,11 +96,11 @@ The percentage to be distributed at the end of the program will be subject to ve ### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -Yes +Да -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? +### 14. Можно ли использовать заблокированные токены из программы куратора the graph для участия в тестовой сети MIP? -Yes +Да ### 15. During the MIPs program, will there be a period to dispute invalid POI? @@ -114,7 +116,7 @@ Please email info@thegraph.foundation ### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -Yes +Да ### 20. Are there recommended regions to run the servers? diff --git a/website/pages/ru/network/benefits.mdx b/website/pages/ru/network/benefits.mdx index 839a0a7b9cf7..05dfe6b13c35 100644 --- a/website/pages/ru/network/benefits.mdx +++ b/website/pages/ru/network/benefits.mdx @@ -1,96 +1,97 @@ --- -title: The Graph Network vs. Self Hosting +title: Сеть The Graph по сравнению с Self Hosting socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +Децентрализованная сеть Graph была спроектирована и усовершенствована для создания надежной системы индексации и запросов — и с каждым днем она становится лучше благодаря тысячам участников по всему миру. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +Преимущества этого децентрализованного протокола не могут быть воспроизведены путем локального запуска `graph-node`. Сеть The Graph более надежна, эффективна и менее затратна. -Here is an analysis: +Вот анализ: -## Why You Should Use The Graph Network +## Почему Вы должны использовать сеть The Graph -- 60-98% lower monthly cost -- $0 infrastructure setup costs -- Superior uptime -- Access to 438 Indexers (and counting) -- 24/7 technical support by global community +- ежемесячные расходы снижаются на 60-98% +- затраты на установку инфраструктуры в размере $0 +- Превосходное время безотказной работы +- Access to hundreds of independent Indexers around the world +- 24/7 техническая поддержка со стороны глобального сообщества -## The Benefits Explained +## Преимущества -### Lower & more Flexible Cost Structure +### Более низкая& и гибкая структура затрат -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. +Никаких контрактов. Никаких ежемесячных платежей. Платите только за те запросы, которые вы используете, — средняя стоимость каждого запроса составляет 0,0002 доллара. Запросы оцениваются в долларах США и оплачиваются в GRT. -Query costs may vary; the quoted cost is the average at time of publication (December 2022). +Стоимость запроса может варьироваться; указанная стоимость является средней на момент публикации (декабрь 2022 года). -## Low Volume User (less than 30,000 queries per month) +## Пользователь с небольшим количеством запросов (менее 30 000 запросов в месяц) -| Cost Comparison | Self Hosted | Graph Network | +| Сравнение затрат | Самостоятельный хостинг | Сеть Graph | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | ~$15 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | -| Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | ~$15 | - -## Medium Volume User (3,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $0+ | ~$15 в месяц | +| Время разработки | $400 в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | 30,000 (автоматическое масштабирование) | +| Стоимость одного запроса | $0 | $0.0005 | +| Инфраструктура | Централизованная | Децентрализованная | +| Географическая избыточность | $750+ за каждую дополнительную ноду | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $750+ | ~$15 | + +## Пользователь со средним количеством запросов (более 3,000,000 запросов в месяц) + +| Сравнение затрат | Самостоятельный хостинг | Сеть Graph | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $750 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 3,000,000+ | -| Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $750 | - -## High Volume User (30,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Ежемесячная стоимость сервера\* | $350 в месяц | $0 | +| Стоимость запроса | $500 в месяц | $750 в месяц | +| Время разработки | $800 в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | 3,000,000+ | +| Стоимость одного запроса | $0 | $0.00025 | +| Инфраструктура | Централизованная | Децентрализованная | +| Инженерные расходы | $200 в час | Включено | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $1,650+ | $750 | + +## Пользователь с высоким количеством запросов (более 30,000,000 запросов в месяц) + +| Сравнение затрат | Самостоятельный хостинг | Сеть Graph | | :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $4,500 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000,000+ | -| Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $4,500 | +| Ежемесячная стоимость сервера\* | $1100 в месяц за ноду | $0 | +| Стоимость запроса | $4000 | $4,500 в месяц | +| Необходимое количество нод | 10 | Не подходит | +| Время разработки | $6,000 или больше в месяц | Никто, встроен в сеть с глобально распределенными индексаторами | +| Запросы в месяц | Ограничен возможностями инфраструктуры | 30,000,000+ | +| Стоимость одного запроса | $0 | $0.00015 | +| Инфраструктура | Централизованная | Децентрализованная | +| Географическая избыточность | общие затраты на каждую дополнительную ноду составляют $1,200 | Включено | +| Время безотказной работы | Варьируется | 99.9%+ | +| Общие ежемесячные расходы | $11,000+ | $4,500 | -\*including costs for backup: $50-$100 per month +\* включая расходы на резервное копирование: $50-$100 в месяц -Engineering time based on $200 per hour assumption +Время разработки основано на предположении о $200 в час -using the max query budget function in the budget billing tab, while maintaining high quality of service +используя функцию максимального бюджета запроса на вкладке бюджетное выставление счетов, сохраняя при этом +высокое качество обслуживания -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. +Предполагаемые затраты указаны только для субграфов в основной сети Ethereum — затраты еще выше при самостоятельном размещении `graph-node` в других сетях. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Курирование сигнала на субграфе - это необязательная единовременная стоимость, равная нулю (например, сигнал стоимостью 1 тыс. долларов может быть курирован на субграфе, а затем отозван - с возможностью получения прибыли в процессе). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Некоторым пользователям может потребоваться обновить свой субграф до новой версии. Комиссия сети Ethereum при обновлении составляет около 50 долларов на момент написания данного материала. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +Обратите внимание, что комиссия сети в [Arbitrum](/arbitrum/arbitrum-faq) значительно ниже, чем в основной сети Ethereum. -## No Setup Costs & Greater Operational Efficiency +## Отсутствие затрат на настройку & повышение операционной эффективности -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Нулевая плата за установку. Приступайте к работе немедленно, без каких-либо затрат на настройку или накладные расходы. Никаких требований к оборудованию. Отсутствие перебоев в работе из-за централизованной инфраструктуры и больше времени для концентрации на вашем основном продукте. Нет необходимости в резервных серверах, устранении неполадок или дорогостоящих инженерных ресурсах. -## Reliability & Resiliency +## Надежность & отказоустойчивость -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +Итог: Сеть Graph дешевле, проще в использовании и дает превосходные результаты по сравнению с запуском `graph-node` локально. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Начните использовать The Graph Network сегодня и узнайте, как [обновить свой субграф до децентрализованной сети The Graph](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/ru/network/curating.mdx b/website/pages/ru/network/curating.mdx index 797d9b9dd896..e81902254800 100644 --- a/website/pages/ru/network/curating.mdx +++ b/website/pages/ru/network/curating.mdx @@ -1,96 +1,96 @@ --- -title: Curating +title: Кураторство --- -Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. +Кураторы - это очень важная роль в распределенной экономики Graph протокола. Кураторы используют свой опыт и экспертизу в web3 экосистеме для оценки и сигнала на субграфы, которые стоят того что бы их индексировали Индексаторы. Используя Explorer, Кураторы имеет возможность просмотреть данные сети, что бы принять решение стоит ли сигналить на субграф. Graph протокол предусматривает награды для Кураторов, как часть от суммы оплаты за запросы пользователей к данному субграфу. Модель наград реализована таким образом, что бы стимулировать Кураторов сигналить как можно раньше. Индексаторы используют сигнал от Кураторов для принятия решения об индексации данного субграфа. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +При сигнализировании, кураторы могут решить подать сигнал о конкретной версии субграфа или сигнализировать с помощью автоматической миграции. При использовании автоматической миграции, предоставляемые Кураторами данные всегда будут перенесены на последнюю версию, опубликованную разработчиком. Если вместо этого Вы решите сигнализировать об определенной версии, общие ресурсы всегда останутся на этой конкретной версии. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Помните, что кураторство носит рискованный характер. Пожалуйста, проявите должную осмотрительность, курируя подграфы, которым вы доверяете. Создание подграфа не требует разрешения, поэтому люди могут создавать подграфы и давать им любые названия. Для дополнительных рекомендаций по минимизации рисков, ознакомьтесь с [Руководством по кураторству от Академии Graph.](https://thegraph.academy/curators/) ## Bonding Curve 101 -First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. +Сначала мы сделаем шаг назад. Каждый субграф имеет кривую связи, на которой производится начисление долей курирования, когда пользователь добавляет сигнал **в** кривую. Каждая кривая связи в подграфе уникальна. Кривые связи построены таким образом, что цена выпуска доли на подграфе линейно возрастает с увеличением количества выпущенных единиц. ![Price per shares](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +В результате цена растет линейно, а это означает, что со временем покупка акции станет дороже. Вот пример того, что мы имеем в виду, см. кривую связи ниже: -![Bonding curve](/img/bonding-curve.png) +![Кривая связи](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Предположим, у нас есть два куратора, которые минтят доли для субграфа: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. -- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. +- Куратор "А" первым сигнализирует о подграфе. Добавив 120 000 GRT на кривую, он может создать 2000 долей. +- Сигнал куратора "Б" появляется на подграфе через некоторое время. Чтобы получить такое же количество, как у куратора "А", ему придется добавить 360 000 GRT на кривую. +- Поскольку оба куратора имеют половину от общего числа, они получат равное количество наград. +- Если любой из кураторов теперь сожжет свои 2000 долей, он получит 360 000 GRT. +- Оставшийся куратор теперь будет получать всё вознаграждение для этого подграфа. Если он сожжет свои для вывода GRT, он получит 120 000 GRT. +- **TLDR:** Оценка GRT кураторских позиций определяется кривой связи и может быть волатильной. Есть вероятность понести большие потери. Если вы подадите сигнал раньше, это означает, что вы вложите меньше GRT за каждую отдельную долю. В свою очередь это означает, что вы получаете больше кураторских роялти на GRT, чем более поздние кураторы для того же субграфа. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +В общем, кривая связывания — это математическая кривая, которая определяет взаимосвязь между предложением токенов и ценой актива. В конкретном случае курирования подграфов, **цена каждой доли подграфа увеличивается с каждым вложенным токеном** и **цена каждой доли уменьшается с каждым проданным токеном.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +В случае с The Graph используется разработанная компанией [Bancor формула кривой связи](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA). -## How to Signal +## Как подавать Сигнал -Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) +Теперь, когда мы рассмотрели основные принципы работы кривой связи, перейдем к подаче сигнала на субграф. На вкладке Curator (Куратор) в Graph Explorer кураторы смогут подавать и не подавать сигналы на определенные субграфы, основываясь на статистике сети. Для получения пошагового обзора того, как это сделать в Explorer, [ перейдите по ссылке.](/network/explorer) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Куратор может выбрать конкретную версию подграфа для сигнализации, или же он может выбрать автоматическую миграцию своего сигнала на самую новую рабочую сборку этого подграфа. Оба варианта являются допустимыми стратегиями и имеют свои плюсы и минусы. -Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Сигнализация на конкретной версии особенно полезна, когда один подграф используется несколькими dApps. Одному dApp может потребоваться регулярно обновлять подграф новыми функциями. Другой dApp может предпочесть использовать старую, хорошо протестированную версию подграфа. При первоначальной курации взимается стандартная комиссия в размере 1%. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Автоматическая миграция вашего сигнала на самую новую рабочую сборку может быть ценной, чтобы гарантировать непрерывное начисление комиссий за запросы. Каждый раз, когда вы осуществляете курирование, взимается комиссия в размере 1%. Вы также заплатите комиссию в размере 0,5% при каждой миграции. Разработчикам подграфов не рекомендуется часто публиковать новые версии - они должны заплатить комиссию на курирование в размере 0,5% на все автоматически мигрированные доли курации. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Примечание**: Первый адрес, сигнализирующий о конкретном подграфе, считается первым куратором и будет выполнять гораздо более ресурсоёмкую работу по сравнению с последующими кураторами. Это связано с тем, что первый куратор инициализирует токены долей курации, инициализирует кривую связывания, а также переводит токены в прокси Graph. -## What does Signaling mean for The Graph Network? +## Что означает сигнализация для сети Graph? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +Чтобы конечным пользователям было возможно осуществлять запросы к подграфу, подграф должен быть проиндексирован. Индексация — это процесс, в ходе которого файлы, данные и метаданные просматриваются, каталогизируются и затем индексируются для ускорения поиска результатов. Чтобы данные подграфа были доступны для поиска, они должны быть организованы. -And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. +Таким образом, если бы индексаторам пришлось угадывать, какие подграфы им следует индексировать, вероятность того, что они получат высокую плату за запросы, была бы низкой, поскольку у них не было бы возможности проверить, какие подграфы имеют хорошее качество. Введите курирование. -Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! +Кураторы делают сеть The Graph эффективной, и сигнализация — это процесс, который кураторы используют, чтобы дать знать индексаторам, что подграф подходит для индексации, причем GRT добавляется на кривую связывания для подграфа. Индексаторы могут полагаться на сигнал от куратора, потому что при сигнализации кураторы доли курации для подграфа, что дает им право на часть будущих комиссий за запросы, которые генерирует подграф. Сигнал куратора представлен как токены ERC20, называемые долями курирования Graph (GCS). Кураторы, которые хотят зарабатывать больше на комиссиях за запросы, должны направлять свои GRT на подграфы, которые, по их мнению, будут генерировать сильный поток комиссий в сеть. Кураторы не могут быть наказаны за плохое поведение, но существует штраф на депозит для кураторов, чтобы не стимулировать плохое принятие решений, которое может навредить целостности сети. Кураторы также получают меньше комиссий за запросы, если они выбирают курировать низкокачественный подграф, так как будет меньше запросов для обработки или меньше индексаторов для обработки этих запросов. Смотрите диаграмму ниже! -![Signaling diagram](/img/curator-signaling.png) +![Сигнальная диаграмма](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +Индексаторы могут находить подграфы для индексации на основе сигналов курирования, которые они видят в Graph Explorer (скриншот ниже). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Explorer по подграфам](/img/explorer-subgraphs.png) -## Risks +## Риски -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. -3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. Рынок запросов в The Graph по своей сути молод, и существует риск того, что ваш %APY может оказаться ниже, чем вы ожидаете, из-за зарождающейся динамики рынка. +2. Комиссия за кураторство — когда куратор сигнализирует GRT на субграфе, он облагается налогом на кураторство в размере 1%. Эта оплата сжигается, а остальная часть депонируется в резервный запас кривой облигаций. +3. Когда кураторы сжигают свои доли для вывода GRT, оценка GRT оставшихся долей будет уменьшена. Имейте в виду, что в некоторых случаях кураторы могут решить сжечь свои доли **все сразу**. Такая ситуация может стать общей, если разработчик dApp перестает обновлять/улучшать и запрашивать свой подграф или если подграф терпит неудачу. В результате оставшиеся кураторы могут иметь возможность вывести только часть своего первоначального GRT. Для роли в сети с более низким риском, смотрите [Делегаторы](/network/delegating). +4. Подграф может выйти из строя из-за ошибки. За неудавшийся подграф не начисляется плата за запрос. В результате вам придется ждать, пока разработчик исправит ошибку и выложит новую версию. + - Если вы подписаны на новейшую версию подграфа, ваши общие ресурсы автоматически перейдут на эту новую версию. При этом будет взиматься кураторская комиссия в размере 0,5%. + - Если вы указали конкретную версию подграфа и она не удалась, вам придется вручную сжечь свои курационные доли. Обратите внимание, что вы можете получить больше или меньше GRT, чем вы изначально вложили в кривую курирования, что представляет собой риск, связанный с работой куратора. Затем вы можете подать сигнал на новую версию подграфа, при этом понеся комиссию на курирование в размере 1%. -## Curation FAQs +## Часто задаваемые вопросы по кураторству -### 1. What % of query fees do Curators earn? +### 1. Какой % от оплаты за запрос получают кураторы? -By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Подавая сигнал о подграфе, вы получаете долю от всех комиссий за запросы, которые генерирует этот подграф. 10 % от всех сборов за запросы переходят кураторам пропорционально их доле курирования. Эти 10% подлежат управлению. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Как определить, какие подграфы являются высококачественными, чтобы подавать на них сигналы? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Поиск высококачественных подграфов — это сложная задача, но её можно решить множеством различных способов. Как куратор, вы хотите искать надежные подграфы, которые генерируют объем запросов. Надежный подграф может быть ценным, если он полный, точный и удовлетворяет потребности dApp в данных. Плохо спроектированный подграф может потребовать пересмотра или повторной публикации и также может потерпеть неудачу. Критически важно для кураторов изучать архитектуру или код подграфа, чтобы оценить его ценность. В результате: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Кураторы могут использовать свое понимание сети, чтобы попытаться предсказать, как отдельный подграф может генерировать больший или меньший объем запросов в будущем +- Кураторы также должны понимать метрики, доступные через Graph Explorer. Такие показатели, как объем прошлых запросов и кто является разработчиком подграфа, могут помочь определить, стоит ли сообщать о подграфе. -### 3. What’s the cost of updating a subgraph? +### 3. Какова стоимость обновления подграфа? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +При переносе Ваших кураторских данных в новую версию подграфа взимается комиссия на курирование в размере 1%. Кураторы могут подписаться на новейшую версию подграфа. Когда кураторские данные автоматически переносятся на новую версию, Кураторы также будут платить половину комиссии на курирование, т. е. 0,5%, потому что обновление подграфов — это действие в сети, которое сопровождается оплатой комиссии сети. -### 4. How often can I update my subgraph? +### 4. Как часто я могу обновлять свой подграф? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Рекомендуется не обновлять свои подграфы слишком часто. См. выше для более подробной информации. -### 5. Can I sell my curation shares? +### 5. Могу ли я продать свои кураторские доли? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. +Доли курации не могут быть "куплены" или "проданы", как другие токены ERC20, с которыми вы, возможно, знакомы. Их можно только чеканить (создавать) или сжигать (уничтожать) в соответствии с кривой связывания для конкретного подграфа. Количество GRT, необходимое для создания нового сигнала, и количество GRT, которое вы получите, когда сожжете свой существующий сигнал, определяются этой кривой связывания. Как куратор, вам нужно знать, что когда вы сжигаете свои доли курации, чтобы вывести GRT, у вас может оказаться больше или меньше GRT, чем вы изначально вложили. -Still confused? Check out our Curation video guide below: +Вы все еще в замешательстве? Ознакомьтесь с нашим видеоруководством по кураторству: diff --git a/website/pages/ru/network/delegating.mdx b/website/pages/ru/network/delegating.mdx index 4a6d6e00b73e..5d2bdf1326a5 100644 --- a/website/pages/ru/network/delegating.mdx +++ b/website/pages/ru/network/delegating.mdx @@ -1,98 +1,98 @@ --- -title: Delegating +title: Делегирование --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +Делегаторы - это участники сети которые делегируют (т.е. "стейкают") GRT одному или нескольким Индексаторам. Делегаторы вносят свой вклад в устойчивость сети без надобности запускать Graph Node самим. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +Делегируя токены Индексатору, Делегаторы получают доход как часть от вознаграждений за индексацию и за обработку запросов. Количество запросов которое Индексатор сможет обработать зависит от собственного стейка Индексатора (и делегированных токенов) и цены которую Индексатор назначает за обработку запроса. Таким образом, чем больше общий стейк индексатора, тем больше запросов он сможет обработать. -## Delegator Guide +## Инструкция Делегатора -This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: +Данный материал объяснит как стать успешным Делегатором в сети The Graph. Делегаторы делят между собой вознаграждение, которые зарабатывает Индексатор, пропорционально размеру делегации. Делегатор должен тщательно выбирать Индексатора, которому будет делегировать исходя из нескольких факторов. Просим отметить что данная инструкция не будет включать в себя такие шаги как настройка Метамаска, ибо это широко доступно в Интернете. В этой инструкции будет 3 основных секции: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator -- A video guide showing the steps to delegate in the Graph Network UI +- Риски связанные с делегацией в сети The Graph +- Как расчитать примерный доход +- Видео ролик показывающий как делегировать через веб интерфейс The Graph сети -## Delegation Risks +## Риски делегации -Listed below are the main risks of being a Delegator in the protocol. +Ниже указаны основные риски Делегатора. -### The delegation tax +### Комиссия на делегацию -Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. +Делегаторы не могут быть наказаны за некорректное поведение, но они уплачивают комиссию на делегацию, который должен стимулировать обдуманный выбор Индексатора для делегации. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +Необходимо помнить, что каждый раз когда Вы делегируете, Вы должны уплатить 0.5%. Это означает, что если Вы делегируете 1000 GRT, автоматически сгорит 5 GRT от Вашей делегации. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Это означает, что Делегаторы должны учитывать эту комиссию при расчетах прибыли. На пример, Делегатор должен подсчитать сколько дней займет вернуть эти 0.5% при делегации. -### The delegation unbonding period +### Период unbonding (разблокировки) делегирования -Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. +Когда Делегатор хочет отозвать делегирование, его токены подлежат 28-дневному периоду разблокировки. Это означает, что он не может перевести свои токены или получать какие-либо вознаграждения в течение 28 дней. -One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. +Еще одним аспектом, который стоит учитывать, является разумный выбор Индексатора. Если вы выберете ненадежного или некомпетентного Индексатора, вам, возможно, захочется отозвать делегирование, что в свою очередь означает потерю множества возможностей для получения вознаграждений. Это может быть столь плохо, как и сжигание токенов GRT.
    - ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + ![Delegation unbonding](/img/Delegation-Unbonding.png) _Обратите внимание на комиссию в размере 0,5% в + пользовательском интерфейсе Delegation, а также на 28-дневный срок период разблокировки._
    -### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Выбор надежного Индексатора со справедливым распределением вознаграждений для Делегаторов -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Это важная часть для понимания. Сначала давайте обсудим три очень важных значения — параметры делегирования. -Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. +Срез вознаграждения за индексацию - это часть вознаграждений, которую Индексатор оставит для себя. Это значит, что если он установлен на 100%, то вы, как Делегатор, не получите вознаграждений за индексацию. Если в пользовательском интерфейсе указано 80%, это означает, что в качестве Делегатора вы получите 20%. Важное примечание - в начале работы сети вознаграждения за индексацию будут составлять большую часть всех вознаграждений.
    - ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *Топ-индексатор отдает делегаторам 90% вознаграждений. Средний + отдает - 20%. Нижний индексатор дает ~83%
    -- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Срез комиссии за запросы - это работает точно так же, как и срез вознаграждения за индексацию. Однако это конкретно касается доходов от сборов за запросы, которые собирает Индексатор. Стоит отметить, что на начальном этапе работы сети доходы от комиссий за запросы будут очень малы по сравнению с вознаграждением за индексацию. Рекомендуется следить за сетью, чтобы определить, когда комиссии за запросы в сети начнут иметь большее значение. -As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. +Как вы можете заметить, выбор правильного Индексатора требует серьезного подхода. По этой причине мы настоятельно рекомендуем изучить Discord The Graph, чтобы определить, какие Индексаторы имеют лучшую социальную и техническую репутацию для стабильного вознаграждения Делегаторов. Многие Индексаторы очень активны в Discord и с удовольствием ответят на ваши вопросы. Многие из них уже месяцами занимаются индексацией в тестовой сети и делают все возможное, чтобы помочь Делегаторам получать хорошую доходность, так как это способствует здоровью и успеху всей сети. -### Calculating Delegators expected return +### Расчет ожидаемой доходности делегатов -A Delegator has to consider a lot of factors when determining the return. These include: +Делегатор должен учитывать множество факторов при определении дохода. К ним относятся: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- Технически подкованный Делегатор также может оценить способность Индексатора использовать доступные ему делегированные токены. Если Индексатор не распределяет все доступные токены, он не зарабатывает максимальную прибыль, которую мог бы получить для себя или своих Делегаторов. +- В настоящее время в сети Индексатор может выбрать закрытие распределения и собрать вознаграждения в любое время между 1 и 28 днями. Таким образом, возможно, что у Индексатора есть много еще не собранных вознаграждений, и, следовательно, их общая сумма вознаграждений невелика. Этот аспект следует учитывать на ранних этапах. -### Considering the query fee cut and indexing fee cut +### Учёт среза комиссии за запросы и срез вознаграждения за индексацию -As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: +Как описано в предыдущих разделах, следует выбирать Индексатора, который является прозрачным и честным в установлении своего Среза комиссии за запросы и Среза вознаграждения за индексацию. Делегатору также стоит обратить внимание на время охлаждения параметров, чтобы понять, какой временной запас у него есть. После этого достаточно просто рассчитать количество вознаграждений, которое получают Делегаторы. Формула следующая: ![Delegation Image 3](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### Осмотр пула делегирования индексатора -Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: +Еще одним аспектом, который Делегатор должен учитывать, является доля в общем пуле делегирования, которая принадлежит ему. Все вознаграждения от делегирования распределяются равномерно, с простым перебалансированием пула, определяемым количеством токенов, которое Делегатор внес в пул. Это дает Делегатору определенную долю в пуле: ![Share formula](/img/Share-Forumla.png) -Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. +Используя эту формулу, мы можем увидеть, что на самом деле возможно такое, что Индексатор, предлагающий Делегаторам всего 20%, на самом деле может предоставить Делегаторам даже лучшее вознаграждение, чем Индексатор, предоставляющий 90% Делегаторам. -A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. +Таким образом, Делегатор может подсчитать, чтобы определить, что Индексатор, предлагающий 20% Делегаторам, предлагает более высокую прибыль. -### Considering the delegation capacity +### Учёт возможности делегирования -Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. +Еще одним фактором, который следует учитывать, является вместимость делегирования. В настоящее время соотношение делегирования установлено на уровне 16. Это означает, что если у Индексатора есть 1,000,000 застейканых GRT, его вместимость делегирования составляет 16,000,000 GRT делегированных токенов, которые он может использовать в протоколе. Любые делегированные токены сверх этого объема будут разбавлять вознаграждения всех Делегаторов. -Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. +Представьте, что у Индексатора 100,000,000 GRT делегировано к нему, а его вместимость составляет всего 16,000,000 GRT. Это эффективно означает, что 84,000,000 GRT токенов не используются для заработка токенов. И все Делегаторы, а также сам Индексатор, зарабатывают гораздо меньше вознаграждений, чем могли бы. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Поэтому Делегатор всегда должен учитывать возможности делегирования индексатора и учитывать их при принятии решений. -## Delegator FAQs and Bugs +## Частые вопросы и баги -### MetaMask "Pending Transaction" Bug +### Ошибка Метамаска "Pending Transaction" -**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** +**Когда я пытаюсь делегировать мою транзакцию в MetaMask, она отображается как "Pending" или "Queued" дольше, чем ожидалось. Что мне делать?** -At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. +Временами попытки делегировать индексаторам через MetaMask могут завершиться неудачей и приводить к продолжительным периодам "Pending" или "Queued" при попытках транзакции. Например, пользователь может попытаться сделать делегирование с недостаточной комиссией за газ по отношению к текущим ценам, что приведет к отображению попытки транзакции как "Pending" в кошельке MetaMask на протяжении 15+ минут. Когда это происходит, последующие транзакции могут быть предприняты пользователем, но они не будут обработаны до момента завершения первоначальной транзакции, поскольку транзакции для одного адреса должны обрабатываться в порядке очереди. В таких случаях эти транзакции можно отменить в MetaMask, но попытки транзакций будут накапливать комиссии за газ без каких-либо гарантий, что последующие попытки будут успешными. Более простым решением этой проблемы является перезапуск браузера (например, используя "abort:restart" в адресной строке), что отменит все предыдущие попытки без вычета газа из кошелька. Несколько пользователей, столкнувшихся с этой проблемой, сообщили об успешных транзакциях после перезапуска своего браузера и попытки делегирования. -## Video guide for the network UI +## Видео руководство по сетевому интерфейсу -This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. +В этом руководстве представлен полный обзор этого документа и показано, как учитывать все, что содержится в этом документе, при взаимодействии с пользовательским интерфейсом. diff --git a/website/pages/ru/network/indexing.mdx b/website/pages/ru/network/indexing.mdx index 53839bddb1e2..ab88480b0ba3 100644 --- a/website/pages/ru/network/indexing.mdx +++ b/website/pages/ru/network/indexing.mdx @@ -2,7 +2,7 @@ title: Индексирование --- -Индексаторы — это операторы нод в сети The Graph, которые стейкают токены Graph (GRT) для предоставления услуг индексирования и обработки запросов. Индексаторы получают оплату за запросы и вознаграждение за свои услуги индексирования. Они также зарабатывают из Rebate Pool, который распределяется между всеми участниками сети пропорционально их работе в соответствии с Rebate функцией от Cobb-Douglas. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. Токены GRT, которые застейканы в протоколе, подлежат периоду "оттаивания" и могут быть срезаны, если индексаторы являются вредоносными и передают неверные данные приложениям или если они некорректно осуществляют индексирование. Индексаторы также получают вознаграждение за делегированный стейк от делегаторов, внося свой вклад в работу сети. @@ -26,7 +26,7 @@ title: Индексирование Вознаграждения за индексацию поступают от инфляции протокола, которая установлена на 3% в год. Оно распределяется между подграфами в зависимости от соотношения всех сигналов на каждом из них, а затем пропорционально распределяется между индексаторами в зависимости от их выделенного стейка на этом подграфе. **Чтобы получить право на вознаграждение, распределение должно быть закрыто достоверным доказательством индексации (POI), соответствующим стандартам, установленным arbitration charter.** -Сообщество создало множество инструментов для расчета вознаграждения; Вы найдете их в коллекции [Руководства сообщества](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Вы также можете найти актуальный список инструментов на каналах #Delegators и #Indexers на [сервере Discord](https://discord.gg/graphprotocol). А здесь ссылка на [рекомендуемый оптимизатор распределения](https://github.com/graphprotocol/AllocationOpt.jl), интегрированный со стеком программного обеспечения индексатора. +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. ### Что такое подтверждение индексации (proof of indexing - POI)? @@ -81,17 +81,17 @@ query indexerAllocations { ### Что такое query fee rebates и когда они распределяются? -Плата за запросы собирается межсетевым шлюзом каждый раз, когда распределение закрывается, и накапливается в пуле возврата платы за запросы подграфа. Rebate pool предназначен для поощрения индексаторов к тому, чтобы они выделяли долю величины стейка примерно пропорционально количеству сборов за запросы, которые они получают для сети. Часть платы за запросы в пуле, которая распределяется между конкретными индексаторами, рассчитывается с помощью производственной функции Cobb-Douglas; распределяемая сумма на каждого индексатора является функцией их вкладов в пул и распределения доли на подграфе. +Плата за запрос взимается шлюзом и распределяется между индексаторами в соответствии с экспоненциальной функцией скидки (см. GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). Экспоненциальная функция скидки предлагается как способ гарантии, что индексаторы достигают наилучшего результата за счет добросовестного обслуживания запросов. Это работает, стимулируя индексаторов выделять большую сумму ставки (которая может быть уменьшена за ошибку при обслуживании запроса) относительно суммы комиссии за запрос, которую они могут собрать. -После закрытия распределения и по истечении периода спора возмещение может быть заклеймлено индексатором. После клейма, возмещение комиссии за запрос распределяется между индексатором и его делегатами на основе query fee cut и пула делегирования. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### Что такое query fee cut и indexing reward cut? Значения `queryFeeCut` и `indexingRewardCut` — это параметры делегирования, которые индексатор может установить вместе с cooldownBlocks для управления распределением GRT между индексатором и его делегаторами. См. последние шаги в разделе [Стейкинг в протоколе](/network/indexing#stake-in-the-protocol), чтобы получить инструкции по настройке параметров делегирования. -- **queryFeeCut** – процентная доля комиссионных сборов за запросы, накопленная для подграфа, которая будет распределена индексатору. Если установлено значение 95 %, индексатор получит 95 % query fee rebate pool, когда будет заявлено распределение, а остальные 5 % перейдут делегаторам. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** — процентная доля вознаграждений за индексацию, накопленных в подграфе, которые будут распределены индексатору. Если для этого параметра установлено значение 95%, индексатор получит 95% пула вознаграждений за индексацию, когда распределение будет закрыто, а делегаторы разделят остальные 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### Как индексаторы узнают, какие подграфы индексировать? @@ -375,7 +375,7 @@ docker-compose up #### Начало работы -Indexer agent и Indexer service должны быть расположены вместе с инфраструктурой вашей Graph Node. Существует множество способов настроить виртуальные среды выполнения для компонентов индексатора; здесь мы объясним, как запускать их на «голом железе» с помощью пакетов NPM или исходного кода, или через kubernetes и докер на Google Cloud Kubernetes Engine. Если эти примеры настройки не подходят для вашей инфраструктуры, вы найдете помощь в сообществе в [Discord](https://discord.gg/graphprotocol). Не забудьте [произвести стейкинг в протоколе](/network/indexing#stake-in-the-protocol) перед запуском компонентов индексатора! +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! #### Из пакетов NPM @@ -662,21 +662,21 @@ ActionType { Пример использования из исходного кода: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Обратите внимание, что поддерживаемые типы действий для управления распределением имеют разные входные требования: @@ -798,8 +798,4 @@ setDelegationParameters(950000, 600000, 500) - **Closed**. Индексатор может закрыть аллокацию по при истечении 1 эпохи ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) или Indexer agent автоматически закроет его по истечении эпохи **maxAllocationEpochs** (в настоящее время 28 дней). Когда оно закрыто с действительным доказательством индексации (POI), вознаграждения за индексирование распределяются между индексатором и его делегаторами (ниже см. подробнее «Как распределяются вознаграждения?»). -- **Finalized**. После закрытия наступает период спора, после которого аллокация считается **finalized** и становится возможным запрашивать query fee rebates (claim()). Indexer agent отслеживает сеть, чтобы обнаружить **finalized** аллокации и утверждает их, если они превышают настраиваемый (и необязательный) порог, **—-allocation-claim-threshold**. - -- **Claimed** — окончательное состояние аллокации, когда срок действия истек, все подходящие вознаграждения распределены, а комиссия за запрос возвращена. - Индексаторам рекомендуется использовать функцию синхронизации вне сети для синхронизации развертывания подграфов в chainhead перед созданием аллокации в сети. Эта функция особенно полезна для подграфов, синхронизация которых может занять более 28 эпох или имеющих некоторую вероятность неопределенного сбоя. diff --git a/website/pages/ru/new-chain-integration.mdx b/website/pages/ru/new-chain-integration.mdx index c5934efa6f87..d6bd088665be 100644 --- a/website/pages/ru/new-chain-integration.mdx +++ b/website/pages/ru/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +\*_Протестируйте интеграцию, локально развернув субграф_ 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ru/operating-graph-node.mdx b/website/pages/ru/operating-graph-node.mdx index 4fdde021178d..dd51cc588968 100644 --- a/website/pages/ru/operating-graph-node.mdx +++ b/website/pages/ru/operating-graph-node.mdx @@ -22,7 +22,7 @@ Graph Node (и весь стек индексаторов) можно запус В то время как для некоторых субграфов может потребоваться полная нода, другие могут иметь функции индексации, для которых требуются дополнительные функции RPC. В частности, для субграфов, которые выполняют `eth_calls` как часть индексации, потребуется нода архива, поддерживающая [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), а субграфы с `callHandlers` или `blockHandlers` с фильтром `call` требуют поддержки `trace_filter` ([см. документацию по модулю трассировки здесь](https://openethereum.github.io/JSONRPC-trace-module)). -**Предстоящий запуск Network Firehoses**. Firehose — это служба gRPC, предоставляющая упорядоченный поток блоков с поддержкой разветвления. Она разработана разработчиками ядра The Graph для лучшей поддержки крупномасштабного высокопроизводительного индексирования. В настоящее время это не является обязательным требованием для индексаторов, но индексаторам рекомендуется ознакомиться с технологией до начала полной поддержки сети. Подробнее о Firehose можно узнать [здесь](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### Ноды IPFS diff --git a/website/pages/ru/publishing/publishing-a-subgraph.mdx b/website/pages/ru/publishing/publishing-a-subgraph.mdx index ea803fac0d99..596bf3ff5d23 100644 --- a/website/pages/ru/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ru/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ title: Публикация подграфа в децентрализованн Публикация Подграфа в децентрализованной сети делает его доступным для [Кураторов](/network/curating), чтобы начать его курирование, и для [Индексаторов](/network/indexing), чтобы начать его индексирование. -О том, как опубликовать подграф в децентрализованной сети, смотрите в [этом видео](https://youtu.be/HfDgC2oNnwo?t=580). + Вы можете найти список поддерживаемых сетей [тут](/developing/supported-networks). diff --git a/website/pages/ru/querying/graphql-api.mdx b/website/pages/ru/querying/graphql-api.mdx index 82ad9e950a08..2637a70c52e8 100644 --- a/website/pages/ru/querying/graphql-api.mdx +++ b/website/pages/ru/querying/graphql-api.mdx @@ -51,7 +51,7 @@ title: API GraphQL #### Пример сортировки вложенных объектов -Начиная с Ноды Graph, объекты [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) можно сортировать на основе вложенных сущностей. +Начиная с Graph Node, объекты [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) можно сортировать на основе вложенных содержаний. В следующем примере мы сортируем токены по имени их владельца: @@ -66,7 +66,7 @@ title: API GraphQL } ``` -> В настоящее время Вы можете осуществлять сортировку по одноуровневым типам `String` или `ID` в полях `@entity` и `@derivedFrom`. К сожалению, [сортировка по интерфейсам на одноуровневых структурах](https://github.com/graphprotocol/graph-node/pull/4058), сортировка по полям, которые являются массивами и вложенными объектами, еще не поддерживается. +> В настоящее время Вы можете осуществлять сортировку по одно уровневым типам `String` или `ID` в полях `@entity` и `@derivedFrom`. К сожалению, [сортировка по интерфейсам на одно уровневых структурах](https://github.com/graphprotocol/graph-node/pull/4058), сортировка по полям, которые являются массивами и вложенными объектами, еще не поддерживается. ### Пагинация @@ -278,7 +278,7 @@ _change_block(number_gte: Int) Вы можете запрашивать состояние своих объектов не только для последнего блока, который используется по умолчанию, но и для произвольного блока в прошлом. Блок, в котором должен выполняться запрос, можно указать либо по номеру блока, либо по его хэшу, включив аргумент `block` в поля верхнего уровня запросов. -Результат такого запроса не изменится со временем, т.е. запрос к конкретному прошлому блоку будет возвращать один и тот же результат каждый раз при его выполнении, за исключением того, когда Вы запрашиваете блок, очень близкий к началу цепочки, результат в этом случае может измениться, если этот блок окажется не в основной цепочке и цепочка будет реорганизована. После того, как блок можно будет считать окончательным, результат запроса не изменится. +Результат такого запроса не изменится со временем, т.е. запрос к конкретному прошлому блоку будет возвращать один и тот же результат каждый раз при его выполнении, за исключением того, когда Вы запрашиваете блок, очень близкий к началу цепочки, результат в этом случае может измениться, если этот блок окажется не в основной сети и сеть будет реорганизована. После того как блок можно будет считать окончательным, результат запроса не изменится. Обратите внимание, что текущая реализация имеет определенные ограничения, которые могут нарушить эти гарантии. На данный момент не всегда можно определить, что заданный хэш блока вообще не находится в основной цепочке или что на результат запроса по хэшу блока для блока, который нельзя считать окончательным, может повлиять реорганизация блока, которая происходит одновременно с запросом. Это не влияет на результаты запросов по хэшу блока, когда известно, что блок в конечном итоге находится в основной цепочке. Здесь подробно описана [эта проблема](https://github.com/graphprotocol/graph-node/issues/1405). @@ -316,7 +316,7 @@ _change_block(number_gte: Int) ### Полнотекстовые поисковые запросы -Поля запроса полнотекстового поиска предоставляют API-интерфейс содержательного текстового поиска, который можно добавить в схему субграфа и настроить. См. [Определение полей полнотекстового поиска](/developing/creating-a-subgraph#defining-fulltext-search-fields), чтобы добавить полнотекстовый поиск в свой субграф. +Поля запроса полнотекстового поиска предоставляют API-интерфейс содержательного текстового поиска, который можно добавить в схему субграфа и настроить. См. [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields), чтобы добавить полнотекстовый поиск в свой субграф. Запросы полнотекстового поиска имеют одно обязательное поле, `text`, для предоставления поисковых запросов. В этом поле поиска `text` можно использовать несколько специальных операторов полнотекстового поиска. @@ -372,7 +372,7 @@ _change_block(number_gte: Int) ### Валидация -Graph Node реализует [на основе спецификаций](https://spec.graphql.org/October2021/#sec-Validation) проверку запросов GraphQL, которые получает с помощью [graphql-tools-rs](https:// github.com/dotansimha/graphql-tools-rs#validation-rules), основанного на [референсная реализация graphql-js](https://github.com/graphql/graphql-js /tree/main/src/validation). Запросы, которые не соответствуют правилу проверки, вызывают стандартную ошибку. Подробнее см. в [спецификации GraphQL](https://spec.graphql.org/October2021/#sec-Validation). +Graph Node реализует [на основе спецификаций](https://spec.graphql.org/October2021/#sec-Validation) проверку запросов GraphQL, которые получает с помощью [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), основанного на [референтная реализация graphql-js](https://github.com/graphql/graphql-js/tree/main/src/validation). Запросы, которые не соответствуют правилу проверки, вызывают стандартную ошибку. Подробнее см. в [спецификации GraphQL](https://spec.graphql.org/October2021/#sec-Validation). ## Схема diff --git a/website/pages/ru/querying/managing-api-keys.mdx b/website/pages/ru/querying/managing-api-keys.mdx index ee7c274bca10..7861cc0fc39b 100644 --- a/website/pages/ru/querying/managing-api-keys.mdx +++ b/website/pages/ru/querying/managing-api-keys.mdx @@ -1,26 +1,26 @@ --- -title: Managing your API keys +title: Управление вашими ключами API --- -Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. +Независимо от того, являетесь ли вы разработчиком dapp или разработчиком субграфов, вам нужно будет управлять своими API-ключами. Это важно для того, чтобы вы могли запрашивать субграфы, поскольку ключи API гарантируют, что соединения между службами приложений действительны и авторизованы. Это включает в себя аутентификацию конечного пользователя и устройства с помощью приложения. -The Studio will list out existing API keys, which will give you the ability to manage or delete them. +Studio выведет список существующих API-ключей, что даст вам возможность управлять ими или удалять их. -1. The **Overview** section will allow you to: - - Edit your key name - - Regenerate API keys - - View the current usage of the API key with stats: - - Number of queries - - Amount of GRT spent -2. Under **Security**, you’ll be able to opt into security settings depending on the level of control you’d like to have over your API keys. In this section, you can: - - View and manage the domain names authorized to use your API key - - Assign subgraphs that can be queried with your API key -3. Under **Indexer Preference**, you’ll be able to set different preferences for Indexers who are indexing subgraphs that your API key is used for. You can assign up to 5 points for each of these: - - **Fastest Speed**: Time between the query and the response from an indexer. If you mark this as important we will optimize for fast indexers. - - **Lowest Price**: The amount paid per query. If you mark this as important we will optimize for the less expensive indexers. - - **Data Freshness**: How recent the latest block an indexer has processed for the subgraph you are querying. If you mark this as important we will optimize to find the indexers with the freshest data. - - **Economic Security**: The amount of GRT an indexer can lose if they respond incorrectly to your query. If you mark this as important we will optimize for indexers with a large stake. -4. Under **Budget**, you’ll be able to update the maximum price per query. Note that we have a dynamic setting for that that's based on a volume discounting algorithm. **We strongly recommend using the default settings unless you are experiencing a specific problem.** Otherwise, you can update it under "Set a custom maximum budget". On this page you can also view different KPIs (in GRT and USD): - - Average cost per query - - Failed queries over max price - - Most expensive query +1. Раздел ** Overview ** позволит вам: + - Отредактируйте свое ключевое имя + - Регенерировать ключи API + - Просмотр текущего использования ключа API со статистикой: + - Количество запросов + - Количество потраченных GRT +2. В разделе ** Security ** вы сможете выбрать настройки безопасности в зависимости от уровня контроля, который вы хотели бы иметь над своими ключами API. В этом разделе вы можете: + - Просматривайте доменные имена, авторизованные для использования вашего API-ключа, и управляйте ими + - Назначьте субграфы, которые могут быть запрошены с помощью вашего API-ключа +3. В разделе **Indexer Preference** вы сможете установить различные настройки для индексаторов, которые индексируют субграфы, для которых используется ваш ключ API. Вы можете присвоить до 5 баллов за каждый из них: + - **Fastest Speed**: время между запросом и ответом от индексатора. Если вы отметите это как важное, мы проведем оптимизацию для быстрых индексаторов. + - **Lowest Price**: Сумма, уплаченная за запрос. Если вы отметите это как важное, мы проведем оптимизацию для менее дорогих индексаторов. + - **Data Freshness**: Насколько свеж последний блок, обработанный индексатором для запрашиваемого вами субграфа. Если вы отметите это как важное, мы проведем оптимизацию, чтобы найти индексаторов с самыми свежими данными. + - **Economic Security**: количество GRT, которое может потерять индексатор, если он неправильно ответит на ваш запрос. Если вы отметите это как важное, мы проведем оптимизацию для индексаторов с большой долей участия. +4. В разделе ** Budget ** вы сможете обновить максимальную цену за запрос. Обратите внимание, что для этого у нас есть динамическая настройка, основанная на алгоритме дисконтирования объема. **Мы настоятельно рекомендуем использовать настройки по умолчанию, если только вы не столкнулись с конкретной проблемой.** В противном случае вы можете обновить их в разделе "Set a custom maximum budget". На этой странице вы также можете просмотреть различные ключевые показатели эффективности (в GRT и USD): + - Средняя стоимость одного запроса + - Неудачные запросы по максимальной цене + - Самый дорогой запрос diff --git a/website/pages/ru/querying/querying-best-practices.mdx b/website/pages/ru/querying/querying-best-practices.mdx index 98c0ffb72c61..ce1bbcead58d 100644 --- a/website/pages/ru/querying/querying-best-practices.mdx +++ b/website/pages/ru/querying/querying-best-practices.mdx @@ -356,7 +356,7 @@ fragment MyFragment on BigInt { Fragments are defined on specific types and should be used accordingly in queries. -Example: +Пример: ```graphql query { diff --git a/website/pages/ru/querying/querying-the-graph.mdx b/website/pages/ru/querying/querying-the-graph.mdx index af9dcaaf2477..39bfd2f0a3bd 100644 --- a/website/pages/ru/querying/querying-the-graph.mdx +++ b/website/pages/ru/querying/querying-the-graph.mdx @@ -6,7 +6,7 @@ With the subgraph deployed, visit the [Graph Explorer](https://thegraph.com/expl An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. -## Example +## Пример This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: diff --git a/website/pages/ru/querying/querying-the-hosted-service.mdx b/website/pages/ru/querying/querying-the-hosted-service.mdx index 14777da41247..d28355b14cd0 100644 --- a/website/pages/ru/querying/querying-the-hosted-service.mdx +++ b/website/pages/ru/querying/querying-the-hosted-service.mdx @@ -2,11 +2,11 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. -## Example +## Пример This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/ru/querying/querying-with-python.mdx b/website/pages/ru/querying/querying-with-python.mdx new file mode 100644 index 000000000000..59852d651525 --- /dev/null +++ b/website/pages/ru/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Начало работы + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ru/quick-start.mdx b/website/pages/ru/quick-start.mdx new file mode 100644 index 000000000000..b7ac25cf9f89 --- /dev/null +++ b/website/pages/ru/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Быстрый старт +--- + +Из этого руководства Вы быстро узнаете, как инициализировать, создать и развернуть свой субграф в Subgraph Studio или [хостинговом сервисе](#hosted-service). + +Убедитесь, что ваш субграф будет индексировать данные из [поддерживаемой сети](/developing/supported-networks). + +Это руководство написано, исходя из того, что у Вас есть: + +- Адрес смарт-контракта в выбранной Вами сети +- GRT для курирования вашего субграфа +- Криптовалютный кошелек + +## 1. Создание субграфа в Subgraph Studio + +Перейдите в [Subgraph Studio](https://thegraph.com/studio/) и подключите свой кошелек. + +После подключения Вы можете начать с нажатия кнопки «Создать субграф». Выберите нужную сеть и нажмите «Продолжить». + +## 2. Установка Graph CLI + +Графический интерфейс Graph CLI написан на JavaScript, и для его использования Вам потребуется установить либо `npm`, либо `yarn`. + +Выполните одну из следующих команд на своём локальном компьютере: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Инициализация вашего Подграфа + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +При инициализации субграфа инструмент CLI запросит у Вас следующую информацию: + +- Протокол: выберите протокол, по которому Ваш субграф будет индексировать данные +- Слаг субграфа: создайте имя для своего субграфа. Ваш слаг субграфа — это идентификатор для Вашего субграфа. +- Директория для создания субграфа: выберите локальную директорию +- Сеть Ethereum (необязательно): Вам может потребоваться указать, из какой сети, совместимой с EVM, ваш субграф будет индексировать данные +- Адрес контракта: найдите адрес смарт-контракта, с которого хотите запросить данные +- ABI: если ABI не заполняется автоматически, Вам нужно будет ввести его вручную как файл JSON +- Стартовый блок: рекомендуется ввести стартовый блок, чтобы сэкономить время, пока Ваш субграф индексирует данные блокчейна. Вы можете найти стартовый блок, найдя блок, в котором был развернут Ваш контракт. +- Имя контракта: введите имя Вашего контракта +- Индексировать события контракта как объекты: рекомендуется установить для этого параметра значение true, поскольку он автоматически добавит сопоставления в Ваш субграф для каждого запускаемого события +- Добавить еще один контракт (опционально): Вы можете добавить еще один контракт + +Инициализируйте свой субграф из существующего контракта, выполнив следующую команду: + +```sh +graph init --studio +``` + +На следующем скриншоте показан пример того, чего следует ожидать при инициализации субграфа: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Создание Вашего субграфа + +Предыдущая команда создала "скелет" субграфа, который Вы можете использовать в качестве отправной точки для построения своего субграфа. При внесении изменений в субграф Вы будете в основном работать с тремя файлами: + +- Манифест (subgraph.yaml) - Манифест определяет, какие источники данных будут индексироваться Вашими субграфами. +- Схема (schema.graphql) - Схема GraphQL определяет, какие данные Вы хотите извлечь из субграфа. +- AssemblyScript Mappings (mapping.ts) - это код, который преобразует данные из ваших источников данных в объекты, определенные в схеме. + +Для получения дополнительной информации о том, как написать свой подграф, см. [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Развертывание в Subgraph Studio + +После того как Ваш субграф будет написан, выполните следующие команды: + +```sh +$ graph codegen +$ graph build +``` + +- Аутентифицируйте и разверните свой субграф. Ключ развертывания можно найти на странице Subgraph в Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Тестирование Вашего субграфа + +Вы можете протестировать свой субграф, осуществив пример запроса в разделе игровой площадки. + +Логи сообщат вам, есть ли какие-либо ошибки с вашим субграфом. Логи рабочего субграфа будут выглядеть следующим образом: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## Публикация субграфа в децентрализованной сети The Graph + +После того как Ваш субграф был развернут в Subgraph Studio, Вы его протестировали и готовы запустить в производство, Вы можете опубликовать его в децентрализованной сети. + +В Subgraph Studio кликните по своему субграфу. На странице субграфа Вы сможете нажать кнопку публикации в правом верхнем углу. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Прежде чем Вы сможете запросить свой субграф, индексаторы должны начать обслуживать запросы к нему. Чтобы упростить этот процесс, Вы можете создать свой собственный субграф, используя GRT. + +На момент написания этой статьи рекомендуется создать собственный субграф с 10 000 GRT, чтобы обеспечить его индексацию и доступность для запросов как можно скорее. + +Чтобы сэкономить на расходах за газ, Вы можете курировать свой субграф в той же транзакции, в которой Вы его опубликовали, нажав эту кнопку при публикации своего субграфа в децентрализованной сети The Graph: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Запрос Вашего Субграфа + +Теперь Вы можете запросить свой субграф, отправив запросы GraphQL на URL-адрес запроса Вашего субграфа, который можно найти, нажав кнопку запроса. + +Вы можете сделать запрос из своего децентрализованного приложения, если у Вас нет ключа API, с помощью бесплатного временного URL-адреса запроса с ограниченной скоростью, который можно использовать для разработки и подготовки. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ru/release-notes/assemblyscript-migration-guide.mdx b/website/pages/ru/release-notes/assemblyscript-migration-guide.mdx index 2ba6cc28e1e1..9bd43965043a 100644 --- a/website/pages/ru/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/ru/release-notes/assemblyscript-migration-guide.mdx @@ -1,5 +1,5 @@ --- -title: AssemblyScript Migration Guide +title: Руководство по миграции AssemblyScript --- До сих пор для субграфов использовалась одна из [первых версий AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Наконец, мы добавили поддержку [последней доступной версии](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 @@ -14,21 +14,21 @@ title: AssemblyScript Migration Guide ### Новый функционал -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- Теперь `TypedArray` можно создавать, используя `ArrayBuffer`6 с помощью [нового статического метода `wrap`](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- Новые функции стандартной библиотеки: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` и `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Добавлена поддержка x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Добавлен `StaticArray`, более эффективный вариант массива ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Добавлен `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Реализован аргумент `radix` для `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Добавлена поддержка разделителей в литералах с плавающей точкой ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Добавлена поддержка функций первого класса ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Добавление встроенных модулей: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Внедрение `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Добавлена поддержка литеральных строк шаблона ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Добавление `encodeURI(Component)` и `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Добавление `toString`, `toDateString` и `toTimeString` к `Date` ([v0.18.29](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Добавление `toUTCString` для `Date` ([v0.18.30](https://github.com/ AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Добавление встроенного типа `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) ### Оптимизации @@ -187,7 +187,7 @@ let bytes = new Bytes(2) Есть два сценария, в которых Вы можете захотеть выполнить преобразование, но использовать `as`/`var` **небезопасно**: - Понижение уровня наследования классов (superclass → subclass) -- Между двумя типами, имеющими общий суперкласс +- Между двумя типами, имеющими общий супер класс ```typescript // понижение уровня наследования классов @@ -306,7 +306,7 @@ let somethingOrElse: string = data ? data : 'else' // компилируется ### Перегрузка оператора при доступе к свойствам -Если Вы попытаетесь суммировать (например) тип, допускающий значение NULL (из доступа к свойству), с типом, не допускающим значение NULL, компилятор AssemblyScript вместо того, чтобы выдать предупреждение об ошибке компиляции, предупреждающую о том, что одно из значений допускает значение NULL, просто компилируется молча, давая возможность сломать код во время выполнения. +Если Вы попытаетесь суммировать (например) тип, допускающий значение Null (из доступа к свойству), с типом, не допускающим значение Null, компилятор AssemblyScript вместо того, чтобы выдать предупреждение об ошибке компиляции, предупреждающую, что одно из значений допускает значение Null, просто компилируется молча, давая возможность сломать код во время выполнения. ```typescript class BigInt extends Uint8Array { @@ -465,7 +465,7 @@ arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -Для того, чтобы фактически начать, Вы должны либо инициализировать `Array` нулевым размером, следующим образом: +Для того чтобы фактически начать, Вы должны либо инициализировать `Array` нулевым размером, следующим образом: ```typescript let arr = new Array(0) // [] @@ -517,7 +517,7 @@ type MyEntity @entity { - `Map#set` и `Set#add` согласованы со спецификацией, произведён возврат к `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Массивы больше не наследуются от ArrayBufferView, а являются самостоятельными ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Классы, инициализируемые из объектных литералов, больше не могут дефинировать конструктор ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Классы, инициализируемые из объектных литералов, больше не могут определять конструктор ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) - Результатом бинарной операции `**` теперь является целое число с общим знаменателем, если оба операнда являются целыми числами. Раньше результатом было число с плавающей запятой, как при вызове `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) - Приведение `NaN` к `false` при преобразовании в `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) - При сдвиге небольшого целочисленного значения типа `i8`/`u8` или `i16`/`u16`, на результат влияют только соответственно 3 или 4 младших разряда значения RHS, аналогично тому, как при сдвиге `i32.shl` на результат влияют только 5 младших разрядов значения RHS. Пример: `someI8 << 8` ранее выдавал значение `0`, но теперь выдает значение `someI8` благодаря маскировке RHS как `8 & 7 = 0` (3 бита) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) diff --git a/website/pages/ru/release-notes/graphql-validations-migration-guide.mdx b/website/pages/ru/release-notes/graphql-validations-migration-guide.mdx index 9a20adff94e4..1b815a5aaf06 100644 --- a/website/pages/ru/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/ru/release-notes/graphql-validations-migration-guide.mdx @@ -8,13 +8,13 @@ title: Руководство по переходу на валидацию Grap Поддержка валидации GraphQL является основой для будущих новых функций и производительности в масштабе The Graph Network. -Это также обеспечит детерминизм ответов на запросы, что является ключевым требованием в сети Graph. +Это также обеспечит детерминизм ответов на запросы, что является ключевым требованием в сети The Graph. **Включение валидации GraphQL нарушит работу некоторых существующих запросов**, отправленных в API The Graph. Чтобы выполнить эти валидации, следуйте руководству по миграции. -> ⚠️ Если Вы не перенесете свои запросы до развертывания валидаций, они вернут ошибки и, возможно, повредят ваши интерфейсы/клиенты. +> ⚠️ Если Вы не перенесете свои запросы до развертывания валидаций, они будут возвращать ошибки и, возможно, повредят ваши интерфейсы/клиенты. ## Руководство по миграции @@ -464,7 +464,7 @@ API GraphQL вызовет ошибку, если используется ка Эти неизвестные ссылки необходимо исправить: - переименуйте, если это опечатка -- в противном случае удалить +- в противном случае удалите ### Фрагмент: недопустимый спред или определение diff --git a/website/pages/ru/substreams.mdx b/website/pages/ru/substreams.mdx index b9a131deb937..9a9549833e85 100644 --- a/website/pages/ru/substreams.mdx +++ b/website/pages/ru/substreams.mdx @@ -2,8 +2,43 @@ title: Подпотоки --- -Substreams (Подпотоки) — это новая технология, которая была разработана ключевыми разработчиками протокола The Graph и предназначена для чрезвычайно эффективного потребления и обработки индексированных данных в блокчейне. В настоящее время технология находится на стадии открытого бета-тестирования и доступна для тестирования и разработки на нескольких блокчейнах. +![Substreams Logo](/img/substreams-logo.png) -Перейдите к [документации](https://substreams.streamingfast.io/), чтобы узнать больше и приступить к их созданию. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Начало работы + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ru/sunrise.mdx b/website/pages/ru/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ru/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ru/tokenomics.mdx b/website/pages/ru/tokenomics.mdx index 3a5be68b3dab..6a245723f89a 100644 --- a/website/pages/ru/tokenomics.mdx +++ b/website/pages/ru/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph - это децентрализованный протокол, кот Это похоже на модель B2B2C, за исключением того, что она основана на децентрализованной сети участников. Участники сети работают сообща, предоставляя данные конечным пользователям в обмен на вознаграждение в GRT. GRT - это токен рабочей утилиты, который координирует поставщиков данных и потребителей. Токен GRT служит средством координации поставщиков и потребителей данных в сети и стимулирует участников протокола к эффективной организации данных. -Используя The Graph, пользователи могут легко получить доступ к данным из блокчейна, платя только за ту конкретную информацию, которая им нужна. The Graph используется многими популярными на сегодняшний день [приложениями](https://thegraph.com/explorer) в экосистеме web3. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph индексирует данные блокчейна так же, как Google индексирует данные в интернете. Что интересно, Вы даже, возможно, уже пользовались The Graph, просто Вы этого не знаете. Если Вы взглянете на Front End dapp и увидите там subgraph, знайте - Вы получаете данные благодаря The Graph! @@ -75,7 +75,7 @@ The Graph играет решающую роль в том, чтобы сдел Индексаторы могут зарабатывать GRT двумя путями: -1. Комиссия за запросы: GRT, выплачиваемые разработчиками или пользователями за запросы к данным субграфа. Плата за запрос зачисляется в пул скидок и распределяется среди Индексаторов. +1. Плата за запросы: GRT, выплачиваемая разработчиками или пользователями за запросы данных субграфа. Плата за запрос напрямую распределяется между индексаторами в соответствии с экспоненциальной функцией скидки (см. GIP [здесь](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Вознаграждения за индексирование: ежегодная эмиссия в размере 3% распределяется между Индексаторами в зависимости от количества индексируемых ими субграфов. Эти вознаграждения стимулируют Индексаторов индексировать субграфы, иногда до начала сборов за запросы, чтобы накопить и отправить доказательства индексирования (POI), подтверждающие, что они точно проиндексировали данные. diff --git a/website/pages/sv/about.mdx b/website/pages/sv/about.mdx index c1f7c886900f..7d6022949987 100644 --- a/website/pages/sv/about.mdx +++ b/website/pages/sv/about.mdx @@ -1,47 +1,47 @@ --- -title: About The Graph +title: Om The Graph --- -This page will explain what The Graph is and how you can get started. +Denna sida kommer att förklara vad The Graph är och hur du kan komma igång. -## What is The Graph? +## Vad är The Graph? -The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. +The Graph är en decentraliserad protokoll för indexering och frågning av blockkedjedata. The Graph möjliggör frågor på data som är svår att fråga direkt. -Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. +Projekt med komplexa smarta kontrakt som [Uniswap](https://uniswap.org/) och NFT-initiativ som [Bored Ape Yacht Club](https://boredapeyachtclub.com/) lagrar data på Ethereum-blockkedjan, vilket gör det mycket svårt att läsa något annat än grundläggande data direkt från blockkedjan. -In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. +I fallet med Bored Ape Yacht Club kan vi utföra grundläggande läsfunktioner på [kontraktet](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) som att hämta ägaren av en viss Ape, hämta innehålls-URI:en för en Ape baserat på deras ID eller den totala tillgången, eftersom dessa läsfunktioner är programmerade direkt i det smarta kontraktet. Men mer avancerade frågor och operationer i den verkliga världen, som aggregering, sökning, relationer och icke trivial filtrering, är inte möjliga. Om vi till exempel ville fråga efter apor som ägs av en viss adress och filtrera efter en av deras egenskaper, skulle vi inte kunna få den informationen genom att interagera direkt med kontraktet självt. -To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. +För att få denna data skulle du behöva bearbeta varje enskilt [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746)-händelse som någonsin har emitterats, läsa metadata från IPFS med hjälp av Token-ID och IPFS-hash, och sedan aggregera den. Även för dessa typer av relativt enkla frågor skulle det ta **timmar eller till och med dagar** för en decentraliserad applikation (dapp) som körs i en webbläsare att få ett svar. -You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. +Du skulle också kunna bygga din egen server, bearbeta transaktionerna där, spara dem i en databas och skapa en API-slutpunkt ovanpå alltihop för att fråga data. Men den här möjligheten är [resurskrävande](/network/benefits/), kräver underhåll, utgör en enskild felkälla och bryter viktiga säkerhetsegenskaper som krävs för decentralisering. -**Indexing blockchain data is really, really hard.** +**Indexering av blockkedjedata är verkligen, verkligen svårt.** -Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. +Blockkedjeegenskaper som slutgiltighet, kedjereorganisationer eller "uncled blocks" komplicerar denna process ytterligare och gör det inte bara tidskrävande utan också konceptuellt svårt att hämta korrekta frågeresultat från blockkedjedata. -The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). +The Graf löser detta med ett decentraliserat protokoll som indexerar och möjliggör prestanda- och effektiv frågning av blockkedjedata. Dessa API:er (indexerade "subgrafer") kan sedan frågas med en standard GraphQL-API. Idag finns det en värdtjänst samt ett decentraliserat protokoll med samma funktioner. Båda stöds av den öppna källkoden för [Graf Node](https://github.com/graphprotocol/graph-node)-implementeringen. -## How The Graph Works +## Hur The Graph Fungerar -The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. +The Graf lär sig vad och hur man indexerar Ethereum-data baserat på subgrafbeskrivningar, kända som subgraf-manifestet. Subgrafbeskrivningen definierar de intressanta smarta kontrakten för en subgraf, händelserna i dessa kontrakt att vara uppmärksam på och hur man kartlägger händelsedata till data som The Graf kommer att lagra i sin databas. -Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. +När du har skrivit ett `subgraf-manifest`, använder du Graf CLI för att lagra definitionen i IPFS och talar om för indexeringen att börja indexera data för den subgrafen. -This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: +Denna diagram ger mer detaljer om datatillflödet när ett subgraf-manifest har distribuerats och hanterar Ethereum-transaktioner: -![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) +![En grafik som förklarar hur The Graf använder Graf Node för att servera frågor till datakonsumenter](/img/graph-dataflow.png) -The flow follows these steps: +Följande steg följs: -1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. -5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. +1. En dapp lägger till data i Ethereum genom en transaktion på ett smart kontrakt. +2. Det smarta kontraktet sänder ut en eller flera händelser under bearbetningen av transaktionen. +3. Graf Node skannar kontinuerligt Ethereum efter nya block och den data för din subgraf de kan innehålla. +4. Graf Node hittar Ethereum-händelser för din subgraf i dessa block och kör de kartläggande hanterarna du tillhandahållit. Kartläggningen är en WASM-modul som skapar eller uppdaterar de dataenheter som Graph Node lagrar som svar på Ethereum-händelser. +5. Dappen frågar Graph Node om data som indexerats från blockkedjan med hjälp av nodens [GraphQL-slutpunkt](https://graphql.org/learn/). Graph Node översätter i sin tur GraphQL-frågorna till frågor för sin underliggande datalagring för att hämta dessa data, och använder lagrets indexeringsegenskaper. Dappen visar dessa data i ett användarvänligt gränssnitt för slutanvändare, som de använder för att utfärda nya transaktioner på Ethereum. Cykeln upprepas. -## Next Steps +## Nästa steg -In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. +I de följande avsnitten går vi in mer detaljerat på hur man definierar subgrafer, hur man distribuerar dem och hur man frågar data från indexen som Graph Node bygger. -Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. +Innan du börjar skriva din egen subgraf kan du vilja titta på Graph Explorer och utforska några av de subgrafer som redan har distribuerats. Sidan för varje subgraf innehåller en lekplats som låter dig fråga den subgrafens data med GraphQL. diff --git a/website/pages/sv/arbitrum/arbitrum-faq.mdx b/website/pages/sv/arbitrum/arbitrum-faq.mdx index 849d08c92b93..3978366c8f4e 100644 --- a/website/pages/sv/arbitrum/arbitrum-faq.mdx +++ b/website/pages/sv/arbitrum/arbitrum-faq.mdx @@ -1,78 +1,78 @@ --- -title: Arbitrum FAQ +title: Arbitrum Vanliga frågor --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Klicka [here](#billing-on-arbitrum-faqs) om du vill hoppa till Arbitrum Billing Vanliga frågor. -## Why is The Graph implementing an L2 Solution? +## Varför implementerar The Graf en L2 lösning? -By scaling The Graph on L2, network participants can expect: +Genom att skala The Graf på L2 kan nätverksdeltagare förvänta sig: -- Upwards of 26x savings on gas fees +- Uppemot 26x besparingar på gasavgifter -- Faster transaction speed +- Snabbare transaktionshastighet -- Security inherited from Ethereum +- Säkerhet ärvt från Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Genom att skala protokollets smarta kontrakt till L2 kan nätverksdeltagare interagera oftare till en reducerad kostnad i gasavgifter. Till exempel kan indexerare öppna och stänga allokeringar för att indexera ett större antal subgrafer med högre frekvens, utvecklare kan distribuera och uppdatera subgrafer med större lätthet, delegatorer kan delegera GRT med ökad frekvens och curatorer kan lägga till eller ta bort signaler till ett större antal subgrafer – åtgärder som tidigare ansågs vara för kostsamma för att utföra ofta på grund av gas. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +Graph gemenskapen beslutade att gå vidare med Arbitrum förra året efter resultatet av diskussionen [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). -## What do I need to do to use The Graph on L2? +## Vad behöver jag göra för att använda The Graph på L2? -Users bridge their GRT and ETH  using one of the following methods: +Användare överbryggar sina GRT och ETH  med någon av följande metoder: -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Grafbron på Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +För att dra fördel av att använda The Graph på L2, använd den här rullgardinsmenyn för att växla mellan kedjor. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Dropdown-väljare för att växla Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## Som subgrafutvecklare, datakonsument, indexerare, curator eller delegator, vad behöver jag göra nu? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Det krävs inga omedelbara åtgärder, men nätverksdeltagare uppmuntras att börja flytta till Arbitrum för att dra nytta av fördelarna med L2. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Kärnutvecklarteam arbetar med att skapa L2 överföringsverktyg som kommer att göra det betydligt lättare att flytta delegering, kurering och subgrafer till Arbitrum. Nätverksdeltagare kan förvänta sig att L2 överföringsverktyg ska vara tillgängliga till sommaren 2023. -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +Från och med den 10 april 2023 präglas 5 % av alla indexeringsbelöningar på Arbitrum. När nätverksdeltagandet ökar, och när rådet godkänner det, kommer indexeringsbelöningar gradvis att flyttas från Ethereum till Arbitrum, och så småningom flyttas helt till Arbitrum. -## If I would like to participate in the network on L2, what should I do? +## Om jag skulle vilja delta i nätverket på L2, vad ska jag göra? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Vänligen hjälp [testa nätverket](https://testnet.thegraph.com/explorer) på L2 och rapportera feedback om din upplevelse av [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Finns det några risker med att skala nätverket till L2? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Alla smarta kontrakt har [audited] grundligt (https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Allt har testats noggrant och en beredskapsplan finns på plats för att säkerställa en säker och sömlös övergång. Detaljer finns [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## Kommer befintliga subgrafer på Ethereum att fortsätta att fungera? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Ja, The Graph Nätverk kontrakt kommer att fungera parallellt på både Ethereum och Arbitrum tills de flyttas helt till Arbitrum vid ett senare tillfälle. -## Will GRT have a new smart contract deployed on Arbitrum? +## Kommer GRT att ha ett nytt smart kontrakt utplacerat på Arbitrum? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Ja, GRT har ytterligare ett [smart kontrakt på Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). Ethereums huvudnät [GRT-kontrakt](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) kommer dock att fortsätta att fungera. -## Billing on Arbitrum FAQs +## Fakturering på Arbitrum Vanliga frågor -## What do I need to do about the GRT in my billing balance? +## Vad behöver jag göra med GRT i mitt faktureringssaldo? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Ingenting! Din GRT har migrerats på ett säkert sätt till Arbitrum och används för att betala för frågor när du läser detta. -## How do I know my funds have migrated securely to Arbitrum? +## Hur vet jag att mina pengar har migrerats säkert till Arbitrum? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Alla GRT faktureringssaldon har redan migrerats till Arbitrum. Du kan se faktureringsavtalet på Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Hur vet jag att Arbitrum bron är säker? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Bron har blivit [hårt granskad](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) för att garantera säkerhet och säkerhet för alla användare. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Vad behöver jag göra om jag lägger till färsk GRT från min Ethereum mainnet plånbok? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Att lägga till GRT till ditt Arbitrum faktureringssaldo kan göras med ett klick i [Subgraf Studio](https://thegraph.com/studio/). Du kommer enkelt att kunna koppla din GRT till Arbitrum och fylla dina API nycklar i en transaktion. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Besök [Faktureringssidan](https://thegraph.com/docs/en/billing/) för mer detaljerade instruktioner om hur du lägger till, tar ut eller skaffar BRT. diff --git a/website/pages/sv/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/sv/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..e3f54470bf34 100644 --- a/website/pages/sv/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/sv/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: Vanliga frågor om L2 Överföringsverktyg --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## Allmän -## What are L2 Transfer Tools? +### Vad är L2 Överföringsverktyg? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph har gjort det 26 gånger billigare för bidragsgivare att delta i nätverket genom att implementera protokollet på Arbitrum One. L2 Överföringsverktygen skapades av kärnutvecklarna för att göra det enkelt att flytta till L2. -## Can I use the same wallet I use on Ethereum mainnet? +För varje nätverksdeltagare finns en uppsättning L2 Överföringsverktyg tillgängliga för att göra upplevelsen smidig när du flyttar till L2, vilket undviker upptiningsperioder eller att manuellt ta ut och broa GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Dessa verktyg kommer att kräva att du följer en specifik uppsättning steg beroende på vilken roll du har inom The Graph och vad du överför till L2. -## Subgraph Transfer +### Kan jag använda samma plånbok som jag använder på Ethereum huvudnätet? -## How do I transfer my subgraph? +Om du använder en [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account)-plånbok kan du använda samma adress. Om din Ethereum huvudnät plånbok är en kontrakt (t.ex. en multisig) måste du specificera en [Arbitrum plånboksadress](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) där din överföring kommer att skickas. Kontrollera adressen noggrant eftersom felaktiga överföringar till felaktig adress kan resultera i permanent förlust. Om du vill använda en multisig på L2 måste du se till att du implementerar en multisig-kontrakt på Arbitrum One. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Vad händer om jag inte avslutar min överföring på 7 dagar? -3. Confirm subgraph transfer on Arbitrum\* +L2 Överföringsverktygen använder Arbitrums nativa mekanism för att skicka meddelanden från L1 till L2. Denna mekanism kallas en "retryable ticket" och används av alla nativa token-broar, inklusive Arbitrum GRT-broen. Du kan läsa mer om retryable tickets i [Arbitrums dokumentation](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +När du överför dina tillgångar (subgraf, insats, delegation eller kurering) till L2 skickas ett meddelande genom Arbitrum GRT-broen, vilket skapar en retryable ticket i L2. Överföringsverktyget inkluderar ett visst ETH-värde i transaktionen, som används för att 1) betala för att skapa biljetten och 2) betala för gasen för att utföra biljetten i L2. Men eftersom gaspriserna kan variera fram till att biljetten är redo att utföras i L2 kan det hända att detta automatiska utförsel försöket misslyckas. När det händer kommer Arbitrum-broen att behålla retryable ticket i livet i upp till 7 dagar, och vem som helst kan försöka "inlösa" biljetten (vilket kräver en plånbok med en viss mängd ETH broad till Arbitrum). -5. Update Query URL (recommended) +Detta är vad vi kallar "Bekräfta"-steget i alla överföringsverktygen - det kommer att köras automatiskt i de flesta fall, eftersom den automatiska utförandet oftast är framgångsrikt, men det är viktigt att du kontrollerar att det gick igenom. Om det inte lyckas och det inte finns några framgångsrika försök på 7 dagar kommer Arbitrum-broen att kasta biljetten, och dina tillgångar (subgraf, insats, delegation eller kurering) kommer att gå förlorade och kan inte återvinnas. The Graphs kärnutvecklare har ett övervakningssystem på plats för att upptäcka dessa situationer och försöka lösa biljetterna innan det är för sent, men det är i slutändan ditt ansvar att se till att din överföring är klar i tid. Om du har svårt att bekräfta din transaktion, kontakta oss via [detta formulär](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms), och kärnutvecklarna kommer att vara där för att hjälpa dig. -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Jag startade min överföring av delegation/insats/kurering, och jag är osäker på om den lyckades komma till L2, hur kan jag bekräfta att den överfördes korrekt? -## Where should I initiate my transfer from? +Om du inte ser en banner på din profil som ber dig att slutföra överföringen är det troligt att transaktionen säkert nådde L2, och ingen ytterligare åtgärd behövs. Om du är osäker kan du kontrollera om Utforskaren visar din delegation, insats eller kurering på Arbitrum One. -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Om du har L1-transaktionshashen (som du kan hitta genom att titta på de senaste transaktionerna i din plånbok) kan du också bekräfta om "retryable ticket" som bar meddelandet till L2 blev inlöst här: https://retryable-dashboard.arbitrum.io/ - om det automatiska inlösandet misslyckades kan du också ansluta din plånbok där och lösa in det. Var säker på att kärnutvecklarna också övervakar meddelanden som fastnar och kommer att försöka lösa dem innan de går ut. -## How long do I need to wait until my subgraph is transferred +## Subgraf Överföring -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +### Hur överför jag min subgraf? -## Will my subgraph still be discoverable after I transfer it to L2? + -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +För att överföra din subgraf måste du slutföra följande steg: -## Does my subgraph need to be published to transfer it? +1. Initiera överföringen på Ethereum huvudnätet -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +2. Vänta 20 minuter på bekräftelse -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +3. Bekräfta subgraföverföringen på Arbitrum\* -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +4. Slutför publiceringen av subgraf på Arbitrum -## After I transfer, do I also need to re-publish on Arbitrum? +5. Uppdatera fråge-URL (rekommenderas) -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +\*Observera att du måste bekräfta överföringen inom 7 dagar, annars kan din subgraf gå förlorad. I de flesta fall kommer detta steg att köras automatiskt, men en manuell bekräftelse kan behövas om det finns en gasprisspike på Arbitrum. Om det uppstår några problem under denna process finns det resurser för att hjälpa: kontakta support på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). -## Will there be a down-time to my endpoint while re-publishing? +### Var ska jag initiera min överföring från? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Du kan initiera din överföring från [Subgraph Studio](https://thegraph.com/studio/), [Utforskaren,](https://thegraph.com/explorer) eller från vilken som helst subgrafsdetaljsida. Klicka på knappen "Överför subgraf" på subgrafsdetaljsidan för att starta överföringen. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Hur länge måste jag vänta tills min subgraf överförs? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Överföringstiden tar ungefär 20 minuter. Arbitrum-broen arbetar i bakgrunden för att slutföra broöverföringen automatiskt. I vissa fall kan gasavgifterna öka, och du måste bekräfta transaktionen igen. -## Will my subgraph's curation move with my subgraph? +### Kommer min subgraf fortfarande vara sökbar efter att jag har överfört den till L2? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Din subgraf kommer endast vara sökbar på det nätverk där den är publicerad. Till exempel, om din subgraf är på Arbitrum One, kan du endast hitta den i Utforskaren på Arbitrum One och kommer inte att kunna hitta den på Ethereum. Se till att du har valt Arbitrum One i nätverksväxlaren högst upp på sidan för att säkerställa att du är på rätt nätverk.  Efter överföringen kommer L1-subgrafen att visas som föråldrad. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### Måste min subgraf vara publicerad för att kunna överföra den? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +För att dra nytta av subgraföverföringsverktyget måste din subgraf redan vara publicerad på Ethereum huvudnät och måste ha något kureringssignal ägt av plånboken som äger subgrafen. Om din subgraf inte är publicerad rekommenderas det att du helt enkelt publicerar direkt på Arbitrum One - de associerade gasavgifterna kommer att vara betydligt lägre. Om du vill överföra en publicerad subgraf men ägarplånboken inte har kuraterat något signal på den kan du signalera en liten mängd (t.ex. 1 GRT) från den plånboken; se till att välja "automigrering" signal. -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### Vad händer med Ethereum huvudnätversionen av min subgraf efter att jag har överfört till Arbitrum? -## Why do I need bridged ETH to complete my transfer? +Efter att ha överfört din subgraf till Arbitrum kommer Ethereum huvudnätversionen att föråldras. Vi rekommenderar att du uppdaterar din fråge-URL inom 48 timmar. Det finns dock en nådperiod som gör att din huvudnät-URL fungerar så att stöd från tredjeparts-dappar kan uppdateras. -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### Behöver jag också publicera om på Arbitrum efter överföringen? -## Curation Signal +Efter de 20 minuters överföringsfönstret måste du bekräfta överföringen med en transaktion i användargränssnittet för att slutföra överföringen, men överföringsverktyget kommer att guida dig genom detta. Din L1-endpunkt kommer att fortsätta att stödjas under överföringsfönstret och en nådperiod efter. Det rekommenderas att du uppdaterar din endpunkt när det passar dig. -## How do I transfer my curation? +### Kommer min endpunkt att ha nertid under ompubliceringen? -To transfer your curation, you will need to complete the following steps: +Det är osannolikt, men det är möjligt att uppleva en kort nertid beroende på vilka indexeringar som stöder subgrafen på L1 och om de fortsätter att indexera den tills subgrafen är fullt stödd på L2. -1. Initiate signal transfer on Ethereum mainnet +### Är publicering och versionering densamma på L2 som på Ethereum huvudnätet? -2. Specify an L2 Curator address\* +Ja. Välj Arbitrum One som ditt publicerade nätverk när du publicerar i Subgraph Studio. I studion kommer den senaste ändpunkt att vara tillgänglig, som pekar till den senaste uppdaterade versionen av subgrafen. -3. Wait 20 minutes for confirmation +### Kommer min subgrafs kurering att flyttas med min subgraf? -\*If necessary - i.e. you are using a contract address. +Om du har valt automatisk migreringssignal kommer 100% av din egen kurering att flyttas med din subgraf till Arbitrum One. All subgrafens kureringssignal kommer att konverteras till GRT vid överföringstillfället, och GRT som motsvarar din kureringssignal kommer att användas för att prägla signal på L2-subgrafen. -## How will I know if the subgraph I curated has moved to L2? +Andra kuratorer kan välja att ta tillbaka sin del av GRT eller också överföra den till L2 för att prägla signal på samma subgraf. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Kan jag flytta min subgraf tillbaka till Ethereum huvudnätet efter överföringen? -## What if I do not wish to move my curation to L2? +När den är överförd kommer din Ethereum huvudnätversion av denna subgraf att vara föråldrad. Om du vill flytta tillbaka till huvudnätet måste du omimplementera och publicera på huvudnätet igen. Dock avråds starkt från att flytta tillbaka till Ethereum huvudnätet eftersom indexbelöningar till sist kommer att fördelas helt på Arbitrum One. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Varför behöver jag bridged ETH för att slutföra min överföring? -## How do I know my curation successfully transferred? +Gasavgifter på Arbitrum One betalas med bridged ETH (dvs. ETH som har broats till Arbitrum One). Gasavgifterna är dock betydligt lägre jämfört med Ethereum huvudnätet. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +## Delegering -## Can I transfer my curation on more than one subgraph at a time? +### Hur överför jag min delegation? -There is no bulk transfer option at this time. + -## Indexer Stake +För att överföra din delegation måste du följa följande steg: -## How do I transfer my stake to Arbitrum? +1. Initiera överföring av delegationen på Ethereum huvudnät +2. Vänta 20 minuter på bekräftelse +3. Bekräfta överföring av delegationen på Arbitrum -To transfer your stake, you will need to complete the following steps: +\*\*\*\*Du måste bekräfta transaktionen för att slutföra överföringen av delegationen på Arbitrum. Detta steg måste slutföras inom 7 dagar, annars kan delegationen gå förlorad. I de flesta fall kommer detta steg att köras automatiskt, men manuell bekräftelse kan behövas om det uppstår en gasprisökning på Arbitrum. Om det uppstår problem under denna process finns det resurser för att hjälpa: kontakta support på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). -1. Initiate stake transfer on Ethereum mainnet +### Vad händer med mina belöningar om jag inleder en överföring med en öppen tilldelning på Ethereum huvudnät? -2. Wait 20 minutes for confirmation +Om Indexern som du delegerar till fortfarande är aktiv på L1, kommer du att förlora eventuella delegationsbelöningar från öppna tilldelningar på Ethereum huvudnät när du överför till Arbitrum. Det innebär att du kommer att förlora belöningar från högst den senaste 28-dagarsperioden. Om du tajmar överföringen precis efter att Indexern har stängt tilldelningarna kan du se till att detta är den minsta möjliga mängd. Om du har en kommunikationskanal med dina Indexer(s), överväg att diskutera med dem för att hitta den bästa tiden att göra din överföring. -3. Confirm stake transfer on Arbitrum +### Vad händer om Indexern som jag för närvarande delegerar till inte finns på Arbitrum One? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Överföringsverktyget för L2 kommer endast att aktiveras om Indexern som du har delegerat till har överfört sin egen insats till Arbitrum. -## Will all of my stake transfer? +### Har delegatörer möjlighet att delegera till en annan Indexer? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Om du vill delegera till en annan Indexerare kan du överföra till samma Indexerare på Arbitrum, sedan sluta delegera och vänta på upptiningstiden. Efter detta kan du välja en annan aktiv Indexerare att delegera till. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Vad gör jag om jag inte kan hitta Indexern som jag delegerar till på L2? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +Överföringsverktyget för L2 kommer automatiskt att upptäcka Indexern som du tidigare har delegerat till. -## How much time do I have to confirm my stake transfer to Arbitrum? +### Kan jag blanda min delegation över flera nya Indexers istället för den tidigare Indexern? -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +Överföringsverktyget för L2 kommer alltid att flytta din delegation till samma Indexer som du tidigare delegerat till. När du har flyttat till L2 kan du sluta delegera, vänta på upptiningstiden och bestämma om du vill dela upp din delegation. -## What if I have open allocations? +### Omfattas jag av en återhämtningsperiod eller kan jag ta ut direkt efter att ha använt överföringsverktyget för L2-delegering? -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +Överföringsverktyget låter dig omedelbart flytta till L2. Om du vill sluta delegera måste du vänta på upptiningstiden. Om en Indexer har överfört all sin insats till L2 kan du dock ta ut på Ethereum huvudnätet direkt. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Kan mina belöningar påverkas negativt om jag inte överför min delegation? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +Det förväntas att all nätverksdeltagande kommer att flyttas till Arbitrum One i framtiden. -## How long will it take to transfer my stake? +### Hur lång tid tar det att slutföra överföringen av min delegering till L2? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +En 20-minuters bekräftelse krävs för delegationens överföring. Observera att efter de 20 minuterna måste du komma tillbaka och slutföra steg 3 i överföringsprocessen inom 7 dagar. Om du inte gör det kan din delegation gå förlorad. Notera att i de flesta fall kommer överföringsverktyget att slutföra detta steg åt dig automatiskt. Om det uppstår en misslyckad automatisk försök måste du slutföra det manuellt. Om några problem uppstår under denna process, oroa dig inte, vi kommer att vara här för att hjälpa: kontakta oss på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). -## Do I have to index on Arbitrum before I transfer my stake? +### Kan jag överföra min delegation om jag använder en GRT vestingkontrakt/token lock wallet? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +Ja! Processen är något annorlunda eftersom vestingkontrakt inte kan vidarebefordra ETH som behövs för att betala för L2-gasen, så du måste sätta in det i förväg. Om ditt vestingkontrakt inte är helt mogen måste du också först initialisera ett motsvarande vestingkontrakt på L2 och kommer endast att kunna överföra delegationen till detta L2-vestingkontrakt. Användargränssnittet på Utforskaren kan vägleda dig genom denna process när du har anslutit till Utforskaren med hjälp av vestinglåsningsplånboken. -## Can Delegators move their delegation before I move my indexing stake? +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Finns det någon delegatsskatt? -## Delegation +Nej. Mottagna tokens på L2 delegeras till den angivna indexeraren på uppdrag av den angivna delegeraren utan att ta ut en delegeringsskatt. -## How do I transfer my delegation? +### Kommer mina orealiserade belöningar att överföras när jag överför min delegation? -To transfer your delegation, you will need to complete the following steps: +Ja! De enda belöningarna som inte kan överföras är de som är för öppna tilldelningar, eftersom de inte kommer att finnas förrän Indexern stänger tilldelningarna (vanligtvis var 28:e dag). Om du har delegerat under en tid är detta troligen bara en liten del av belöningarna. -1. Initiate delegation transfer on Ethereum mainnet +På smart kontraktsnivå är orealiserade belöningar redan en del av din delegationsbalans, så de kommer att överföras när du överför din delegation till L2.​ -2. Wait 20 minutes for confirmation +### Är det obligatoriskt att flytta delegationer till L2? Finns det en deadline? -3. Confirm delegation transfer on Arbitrum +Det är inte obligatoriskt att flytta delegationen till L2, men indexbelöningarna ökar på L2 enligt tidslinjen som beskrivs i [GIP-0052](https://forum.thegraph.com/t/gip-0052-tidslinje-och-krav-för-att-öka-belöningarna-på-l2/4193). Till slut, om rådet fortsätter att godkänna ökningarna, kommer alla belöningar att distribueras på L2 och det kommer inte att finnas några indexbelöningar för Indexers och Delegators på L1.​ -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Om jag delegerar till en Indexer som redan har överfört insats till L2, slutar jag att få belöningar på L1? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Många Indexers överför insatsen successivt, så Indexers på L1 kommer fortfarande att tjäna belöningar och avgifter på L1, som sedan delas med Delegators. När en Indexer har överfört all sin insats kommer de att sluta agera på L1, så Delegators kommer inte längre att få några belöningar om de inte överför till L2. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +Till slut, om rådet fortsätter att godkänna ökningarna av indexbelöningar på L2, kommer alla belöningar att distribueras på L2 och det kommer inte att finnas några indexbelöningar för Indexers och Delegators på L1.​ -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +### Jag ser ingen knapp för att överföra min delegation. Varför är det så? -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +Din Indexer har förmodligen inte använt L2-överföringsverktygen för att överföra insatsen ännu. -## Do Delegators have the option to delegate to another Indexer? +Om du kan kontakta Indexern kan du uppmana dem att använda L2-överföringsverktygen så att Delegators kan överföra delegationer till deras L2-Indexer-adress.​ -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Min Indexer finns också på Arbitrum, men jag ser ingen knapp för att överföra delegationen i mitt profil. Varför är det så? -## What if I can't find the Indexer I'm delegating to on L2? +Det är möjligt att Indexern har etablerat verksamhet på L2, men ännu inte har använt L2-överföringsverktygen för att överföra insatsen. L1-smartkontrakten kommer därför inte att känna till Indexerns L2-adress. Om du kan kontakta Indexern kan du uppmana dem att använda överföringsverktyget så att Delegators kan överföra delegationer till deras L2-Indexer-adress.​ -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Kan jag överföra min delegation till L2 om jag har påbörjat avdelegeringsprocessen och ännu inte har tagit ut den? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Nej. Om din delegation tinar måste du vänta 28 dagar och ta ut den. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +De tokens som avdelegeras är "låsta" och kan därför inte överföras till L2. -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +## Kureringssignal -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Hur överför jag min kurering? -## Can my rewards be negatively impacted if I do not transfer my delegation? +För att överföra din kurering måste du följa följande steg: -It is anticipated that all network participation will move to Arbitrum One in the future. +1. Initiera signalöverföring på Ethereum huvudnät -## How long does it take to complete the transfer of my delegation to L2? +2. Ange en L2-kuratorsadress\* -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +3. Vänta 20 minuter på bekräftelse -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +\*Om det behövs - dvs. du använder en kontraktadress. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Hur vet jag om den subgraph jag har kuraterat har flyttats till L2? -## Is there any delegation tax? +När du tittar på sidan med detaljer om subgraphen kommer en banner att meddela dig att denna subgraph har flyttats. Du kan följa uppmaningen för att överföra din kurering. Du kan också hitta denna information på sidan med detaljer om subgraphen som har flyttat. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Vad händer om jag inte vill flytta min kurering till L2? -## Vesting Contract Transfer +När en subgraph avvecklas har du möjlighet att ta tillbaka din signal. På samma sätt, om en subgraph har flyttats till L2, kan du välja att ta tillbaka din signal på Ethereum huvudnät eller skicka signalen till L2. -## How do I transfer my vesting contract? +### Hur vet jag att min kurering har överförts framgångsrikt? -To transfer your vesting, you will need to complete the following steps: +Signaldetaljer kommer att vara tillgängliga via Explorer ungefär 20 minuter efter att L2-överföringsverktyget har initierats. -1. Initiate the vesting transfer on Ethereum mainnet +### Kan jag överföra min kurering på fler än en subgraph samtidigt? -2. Wait 20 minutes for confirmation +Det finns för närvarande ingen möjlighet till bulköverföring. -3. Confirm vesting transfer on Arbitrum +## Indexer-insats -## How do I transfer my vesting contract if I am only partially vested? +### Hur överför jag min insats till Arbitrum? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +> Ansvarsfriskrivning: Om du för närvarande avstår från någon del av ditt GRT hos din Indexer kommer du inte att kunna använda L2 överföringsverktyg. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +För att överföra din insats måste du följa dessa steg: -4. Withdraw any remaining ETH from the transfer tool contract +1. Initiera insatsöverföring på Ethereum huvudnät -## How do I transfer my vesting contract if I am fully vested? +2. Vänta 20 minuter på bekräftelse -For those that are fully vested, the process is similar: +3. Bekräfta insatsöverföring på Arbitrum. -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +\*Observera att du måste bekräfta överföringen inom 7 dagar annars kan din insats gå förlorad. I de flesta fall kommer detta steg att köras automatiskt, men en manuell bekräftelse kan behövas om det är en gasprisspik på Arbitrum. Om det uppstår problem under processen finns det resurser för att hjälpa: kontakta support på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). -2. Set your L2 address with a call to the transfer tool contract +### Kommer hela min insats att överföras? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +Du kan välja hur mycket av din insats du vill överföra. Om du väljer att överföra hela din insats på en gång måste du stänga alla öppna tilldelningar först. -4. Withdraw any remaining ETH from the transfer tool contract +Om du planerar att överföra delar av din insats över flera transaktioner måste du alltid ange samma förmånstagaradress. -## Can I transfer my vesting contract to Arbitrum? +Observera: Du måste uppfylla minimikraven för insats på L2 första gången du använder överföringsverktyget. Indexers måste skicka minst 100 000 GRT (när de kallar på denna funktion första gången). Om du lämnar en del av insatsen på L1 måste den också överstiga det minimibelopp på 100 000 GRT och vara tillräcklig (tillsammans med dina delegationer) för att täcka dina öppna tilldelningar. -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +### Hur lång tid har jag att bekräfta min insatsöverföring till Arbitrum? -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +\*\*\* Du måste bekräfta din transaktion för att slutföra insatsöverföringen på Arbitrum. Detta steg måste slutföras inom 7 dagar, annars kan insatsen gå förlorad. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +### Vad händer om jag har öppna tilldelningar? -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Om du inte skickar hela din insats kommer L2-överföringsverktyget att validera att minst 100 000 GRT finns kvar på Ethereum huvudnät och att din återstående insats och delegation är tillräcklig för att täcka öppna tilldelningar. Du kan behöva stänga öppna tilldelningar om din GRT-balans inte täcker minimierna + öppna tilldelningar. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### Måste jag vänta 28 dagar innan jag kan avsluta min insats på Ethereum huvudnät innan jag överför? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +Nej, du kan överföra din insats till L2 omedelbart, det är ingen nödvändighet att avsluta och vänta innan du använder överföringsverktyget. Väntetiden på 28 dagar gäller endast om du vill ta tillbaka insatsen till din plånbok, på Ethereum huvudnät eller L2. -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Hur lång tid tar det att överföra min insats? -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +Det tar ungefär 20 minuter för L2-överföringsverktyget att slutföra överföringen av din insats. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Måste jag indexer på Arbitrum innan jag överför min insats? -## Can I specify a different beneficiary for my vesting contract on L2? +Du kan effektivt överföra din insats först innan du sätter upp indexering, men du kommer inte att kunna hämta några belöningar på L2 förrän du allokerar till subgrapher på L2, indexerar dem och presenterar POI. -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### Kan Delegators flytta sin delegation innan jag flyttar min indexinsats? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Nej, för att Delegators ska kunna överföra sina delegerade GRT till Arbitrum måste Indexern de delegerar till vara aktiv på L2. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### Kan jag överföra min insats om jag använder en GRT-vestingkontrakt / tokenspärrplånbok? -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Ja! Processen är något annorlunda, eftersom vestingkontrakt inte kan vidarebefordra ETH som behövs för att betala för L2-gasen, så du måste sätta in det i förväg. Om ditt vestingkontrakt inte är fullt utdelat måste du också först initialisera ett motsvarande vestingkontrakt på L2 och kommer endast att kunna överföra insatsen till detta L2-vestingkontrakt. UI på Explorer kan vägleda dig genom denna process när du har anslutit till Explorer med hjälp av vestingplånboken. -This allows you to transfer your stake or delegation to any L2 address. +### Jag har redan en insats på L2. Måste jag fortfarande skicka 100k GRT när jag använder överföringsverktygen första gången? -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +​Ja. L1-smartkontrakten kommer inte att vara medvetna om din L2-insats, så de kommer att kräva att du överför minst 100k GRT när du överför första gången. ​ -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### Kan jag överföra min insats till L2 om jag är mitt i processen att avsätta GRT? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +​Nej. Om någon del av din insats är på väg att tina måste du vänta 28 dagar och ta tillbaka den innan du kan överföra insatsen. De tokens som är insatta är "låsta" och kommer att förhindra alla överföringar eller insatser till L2. -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +## Överföring av vestingkontrakt -2. Revoke protocol access to the vesting contract (needed for the next step) +### Hur överför jag mitt vestingkontrakt? -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +För att överföra ditt vestingkontrakt måste du följa dessa steg: -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +1. Initiera överföringen av vesting på Ethereum huvudnät -5. Wait 20 minutes for confirmation +2. Vänta 20 minuter på bekräftelse -6. Confirm the balance transfer on L2 +3. Bekräfta överföringen av vesting på Arbitrum -\*If necessary - i.e. you are using a contract address. +### Hur överför jag mitt vestingkontrakt om jag bara är delvis utdelad? -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + -## Can I move my vesting contract back to L1? +1. Sätt in lite ETH i överföringsverktygskontraktet (UI kan hjälpa till att uppskatta en rimlig summa) -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +2. Skicka lite låst GRT genom överföringsverktygskontraktet till L2 för att initialisera låsningsmekanismen för L2. Detta kommer också att ange deras L2-förmånstagareadress. -## Why do I need to move my vesting contract to begin with? +3. Skicka deras insats/delegering till L2 genom "låsta" överföringsverktygsfunktioner i L1Staking-kontraktet. -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +4. Ta ut eventuell kvarvarande ETH från överföringsverktygskontraktet -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Hur överför jag mitt vestingkontrakt om jag är fullständigt utdelad? -This is not a possibility. You can move funds back to L1 and withdraw them there. + -## What if I don't want to move my vesting contract to L2? +För de som är fullständigt utdelade är processen liknande: -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +1. Sätt in lite ETH i överföringsverktygskontraktet (UI kan hjälpa till att uppskatta en rimlig summa) + +2. Ange din L2-adress med ett anrop till överföringsverktygskontraktet + +3. Skicka din insats/delegering till L2 genom "låsta" överföringsverktygsfunktioner i L1-stake-kontraktet. + +4. Ta ut eventuell kvarvarande ETH från överföringsverktygskontraktet + +### Kan jag överföra mitt vestingkontrakt till Arbitrum? + +Du kan överföra ditt vestingkontrakt GRT-saldo till ett vestingkontrakt i L2. Detta är en förutsättning för att överföra insats eller delegering från ditt vestingkontrakt till L2. Vestingkontraktet måste innehålla en icke-noll mängd GRT (du kan överföra en liten mängd som 1 GRT om det behövs). + +När du överför GRT från ditt L1-vestingkontrakt till L2 kan du välja beloppet att skicka och du kan göra detta så många gånger du vill. L2-vestingkontraktet initialiseras första gången du överför GRT. + +Överföringarna utförs med hjälp av ett överföringsverktyg som kommer att vara synligt på din Explorer-profil när du ansluter med vestingkontraktkontot. + +Observera att du inte kommer att kunna släppa/tillbakadra GRT från L2-vestingkontraktet förrän vid slutet av din utdelningstid när ditt kontrakt är fullständigt utdelat. Om du behöver släppa GRT före det kan du överföra GRT tillbaka till L1-vestingkontraktet med ett annat överföringsverktyg som är tillgängligt för detta ändamål. + +Om du inte har överfört något vestingkontraktsaldo till L2 och ditt vestingkontrakt är fullständigt utdelat bör du inte överföra ditt vestingkontrakt till L2. Istället kan du använda överföringsverktygen för att ange en L2-plånboksadress och direkt överföra din insats eller delegering till denna vanliga plånbok på L2. + +### Jag använder mitt vestingkontrakt för att satsa på huvudnätet. Kan jag överföra min insats till Arbitrum? + +Ja, men om ditt kontrakt fortfarande är utdelat kan du bara överföra insatsen så att den ägs av ditt L2-vestingkontrakt. Du måste först initialisera detta L2-kontrakt genom att överföra en viss GRT-saldo med hjälp av vestingkontraktoverföringsverktyget på Explorer. Om ditt kontrakt är fullständigt utdelat kan du överföra din insats till valfri adress i L2, men du måste ställa in den i förväg och sätta in lite ETH för överföringsverktyget på L2 för att betala för L2-gasen. + +### Jag använder mitt vestingkontrakt för att delegera på huvudnätet. Kan jag överföra mina delegationer till Arbitrum? + +Ja, men om ditt kontrakt fortfarande är utdelat kan du bara överföra delegationen så att den ägs av ditt L2-vestingkontrakt. Du måste först initialisera detta L2-kontrakt genom att överföra en viss GRT-saldo med hjälp av vestingkontraktoverföringsverktyget på Explorer. Om ditt kontrakt är fullständigt utdelat kan du överföra din delegation till valfri adress i L2, men du måste ställa in den i förväg och sätta in lite ETH för överföringsverktyget på L2 för att betala för L2-gasen. + +### Kan jag ange en annan förmånstagare för mitt vestingkontrakt på L2? + +Ja, första gången du överför ett saldo och konfigurerar ditt L2-vestingkontrakt kan du ange en L2-förmånstagare. Se till att denna förmånstagare är en plånbok som kan utföra transaktioner på Arbitrum One, dvs. den måste vara en EOA eller en multisig som är distribuerad till Arbitrum One. + +Om ditt kontrakt är fullständigt utdelat kommer du inte att konfigurera ett vestingkontrakt på L2; istället kommer du att ställa in en L2-plånboksadress och detta kommer att vara den mottagande plånboken för din insats eller delegering på Arbitrum. + +### Mitt kontrakt är fullständigt utdelat. Kan jag överföra min insats eller delegering till en annan adress som inte är ett L2-vestingkontrakt? + +Ja. Om du inte har överfört något saldo från vestingkontraktet till L2 och ditt vestingkontrakt är fullständigt utdelat bör du inte överföra ditt vestingkontrakt till L2. Istället kan du använda överföringsverktygen för att ange en L2-plånboksadress och direkt överföra din insats eller delegering till denna vanliga plånbok på L2. + +Detta gör att du kan överföra din insats eller delegering till valfri L2-adress. + +### Mitt vestingkontrakt är fortfarande utdelat. Hur överför jag mitt vestingkontrakts saldo till L2? + +Dessa steg gäller endast om ditt kontrakt fortfarande är utdelat eller om du har använt denna process tidigare när ditt kontrakt fortfarande var utdelat. + +För att överföra ditt vestingkontrakt till L2 kommer du att skicka en viss GRT-saldo till L2 med hjälp av överföringsverktygen, vilket kommer att initialisera ditt L2-vestingkontrakt: + +1. Sätt in lite ETH i överföringsverktygskontraktet (detta kommer att användas för att betala för L2-gas) + +2. Återkalla protokollåtkomst till vestingkontraktet (nödvändigt för nästa steg) + +3. Ge protokollåtkomst till vestingkontraktet (möjliggör för ditt kontrakt att interagera med överföringsverktyget) + +4. Ange en L2-förmånstagareadress\* och initiera saldöverföringen på Ethereum huvudnät + +5. Vänta 20 minuter på bekräftelse + +6. Bekräfta saldöverföringen på L2 + +\*Om det behövs - dvs. du använder en kontraktadress. + +\*\*\*\* Du måste bekräfta din transaktion för att slutföra saldöverföringen på Arbitrum. Detta steg måste slutföras inom 7 dagar eller saldot kan gå förlorat. I de flesta fall kommer detta steg att köras automatiskt, men manuell bekräftelse kan behövas om det finns en gasprisökning på Arbitrum. Om det uppstår några problem under denna process kommer det att finnas resurser att hjälpa till: kontakta supporten på support@thegraph.com eller på [Discord](https://discord.gg/graphprotocol). + +### Mitt vestingkontrakt visar 0 GRT så jag kan inte överföra det, varför är det här och hur fixar jag det? + +För att initialisera ditt L2-vestingkontrakt måste du överföra en icke-noll mängd GRT till L2. Detta krävs av Arbitrum GRT-broen som används av L2 Transfer Tools. GRT måste komma från vestingkontraktets saldo, så det inkluderar inte stakade eller delegerade GRT. + +Om du har stakat eller delegerat all din GRT från vestingkontraktet kan du manuellt skicka en liten mängd som 1 GRT till vestingkontraktadressen från någon annanstans (t.ex. från en annan plånbok eller en börs). + +### Jag använder ett vestingkontrakt för att satsa eller delegera, men jag ser inte någon knapp för att överföra min insats eller delegering till L2, vad gör jag? + +Om ditt vestingkontrakt inte har avslutat utdelningen måste du först skapa ett L2-vestingkontrakt som kommer att ta emot din insats eller delegering på L2. Detta vestingkontrakt tillåter inte utgivning av token på L2 förrän i slutet av utdelningstiden, men kommer att tillåta dig att överföra GRT tillbaka till L1-vestingkontraktet för att släppas där. + +När du är ansluten till vestingkontraktet på Explorer bör du se en knapp för att initialisera ditt L2-vestingkontrakt. Följ den processen först, och du kommer sedan att se knapparna för att överföra din insats eller delegering i din profil. + +### Om jag initierar mitt L2-vestingkontrakt, kommer detta också att överföra min delegering till L2 automatiskt? + +Nej, att initiera ditt L2-vestingkontrakt är en förutsättning för att överföra insats eller delegering från vestingkontraktet, men du måste fortfarande överföra dessa separat. + +Du kommer att se en banner på din profil som uppmanar dig att överföra din insats eller delegering efter att du har initierat ditt L2-vestingkontrakt. + +### Kan jag flytta mitt vestingkontrakt tillbaka till L1? + +Det finns ingen anledning att göra det eftersom ditt vestingkontrakt fortfarande finns i L1. När du använder överföringsverktygen skapar du bara ett nytt kontrakt i L2 som är anslutet till ditt L1-vestingkontrakt, och du kan skicka GRT fram och tillbaka mellan de två. + +### Varför måste jag flytta mitt vestingkontrakt överhuvudtaget? + +Du måste konfigurera ett L2-vestingkontrakt så att detta konto kan äga din insats eller delegering på L2. Annars skulle det inte finnas något sätt för dig att överföra insatsen/delegeringen till L2 utan att "fly" vestingkontraktet. + +### Vad händer om jag försöker ta ut mitt kontrakt när det bara är delvis utdelat? Är det möjligt? + +Detta är inte möjligt. Du kan flytta pengar tillbaka till L1 och ta ut dem där. + +### Vad händer om jag inte vill flytta mitt vestingkontrakt till L2? + +Du kan fortsätta satsa/delegera på L1. Över tid kan du överväga att flytta till L2 för att möjliggöra belöningar där när protokollet skalar på Arbitrum. Observera att dessa överföringsverktyg är för vestingkontrakt som får satsa och delegera i protokollet. Om ditt kontrakt inte tillåter satsning eller delegering eller om det är återkallbart finns det inget överföringsverktyg tillgängligt. Du kommer fortfarande att kunna ta ut din GRT från L1 när det är tillgängligt. diff --git a/website/pages/sv/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/sv/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..e0ce37c88e07 100644 --- a/website/pages/sv/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/sv/arbitrum/l2-transfer-tools-guide.mdx @@ -1,165 +1,165 @@ --- -title: L2 Transfer Tools Guide +title: L2 Guide för överföringsverktyg --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +The Graph har gjort det enkelt att flytta till L2 på Arbitrum One. För varje protokolldeltagare finns det en uppsättning L2 överföringsverktyg som gör överföringen till L2 sömlös för alla nätverksdeltagare. Dessa verktyg kräver att du följer en specifik uppsättning steg beroende på vad du överför. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Några vanliga frågor om dessa verktyg besvaras i [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). De vanliga frågorna innehåller djupgående förklaringar av hur du använder verktygen, hur de fungerar och saker att tänka på när du använder dem. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Så här överför du din subgraf till Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Fördelar med att överföra dina subgrafer -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +The Graphs community och kärnutvecklare har [förberett sig](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) för att flytta till Arbitrum under det senaste året. Arbitrum, en blockkedja av lager 2 eller "L2", ärver säkerheten från Ethereum men ger drastiskt lägre gasavgifter. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +När du publicerar eller uppgraderar din subgraf till The Graph Network, interagerar du med smarta kontrakt på protokollet och detta kräver att du betalar för gas med ETH. Genom att flytta dina subgrafer till Arbitrum kommer alla framtida uppdateringar av din subgraf att kräva mycket lägre gasavgifter. De lägre avgifterna, och det faktum att curation bonding-kurvorna på L2 är platta, gör det också lättare för andra curatorer att kurera på din subgraf, vilket ökar belöningarna för Indexers på din subgraf. Denna miljö med lägre kostnader gör det också billigare för indexerare att indexera och betjäna din subgraf. Indexeringsbelöningar kommer att öka på Arbitrum och minska på Ethereums mainnet under de kommande månaderna, så fler och fler indexerare kommer att överföra sin andel och sätta upp sin verksamhet på L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Förstå vad som händer med signal, din L1 subgraf och frågewebbadresser -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Att överföra en subgraf till Arbitrum använder Arbitrum GRT-bryggan, som i sin tur använder den inhemska Arbitrum-bryggan, för att skicka subgrafen till L2. "Överföringen" kommer att fasa ut subgrafen på mainnet och skicka informationen för att återskapa subgrafen på L2 med hjälp av bryggan. Den kommer också att inkludera subgrafägarens signalerade GRT, som måste vara mer än noll för att bryggan ska acceptera överföringen. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +När du väljer att överföra subgrafen kommer detta att konvertera hela subgrafens kurationssignal till GRT. Detta motsvarar att "avskriva" subgrafen på mainnet. GRT som motsvarar din kuration kommer att skickas till L2 tillsammans med subgrafen, där de kommer att användas för att skapa signaler å dina vägnar. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Andra kuratorer kan välja om de vill ta tillbaka sin del av GRT eller också överföra den till L2 för att få en signal på samma subgraf. Om en subgrafägare inte överför sin subgraf till L2 och manuellt fasar ut den via ett kontraktsanrop, kommer Curatorer att meddelas och kommer att kunna dra tillbaka sin curation. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Så snart subgrafen har överförts, eftersom all kuration konverteras till GRT, kommer indexerare inte längre att få belöningar för att indexera subgrafen. Det kommer dock att finnas indexerare som kommer 1) att fortsätta visa överförda subgrafer i 24 timmar och 2) omedelbart börja indexera subgrafen på L2. Eftersom dessa indexerare redan har subgrafen indexerad, borde det inte finnas något behov av att vänta på att subgrafen ska synkroniseras, och det kommer att vara möjligt att fråga L2-subgrafen nästan omedelbart. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Förfrågningar till L2-subgrafen kommer att behöva göras till en annan URL (på `arbitrum-gateway.thegraph.com`), men L1-URL:n fortsätter att fungera i minst 48 timmar. Efter det kommer L1-gatewayen att vidarebefordra frågor till L2-gatewayen (under en tid), men detta kommer att lägga till latens så det rekommenderas att byta alla dina frågor till den nya URL:en så snart som möjligt. -## Choosing your L2 wallet +## Välja din L2 plånbok -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +När du publicerade din subgraf på mainnet använde du en ansluten plånbok för att skapa subgrafen, och denna plånbok äger NFT som representerar denna subgraf och låter dig publicera uppdateringar. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +När du överför subgrafen till Arbitrum kan du välja en annan plånbok som kommer att äga denna subgraf NFT på L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Om du använder en "vanlig" plånbok som MetaMask (ett externt ägt konto eller EOA, d.v.s. en plånbok som inte är ett smart kontrakt), så är detta valfritt och det rekommenderas att behålla samma ägaradress som i L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Om du använder en smart kontraktsplånbok, som en multisig (t.ex. ett kassaskåp), är det obligatoriskt att välja en annan L2-plånboksadress, eftersom det är mest troligt att det här kontot bara finns på mainnet och att du inte kommer att kunna göra transaktioner på Arbitrum med denna plånbok. Om du vill fortsätta använda en smart kontraktsplånbok eller multisig, skapa en ny plånbok på Arbitrum och använd dess adress som L2-ägare till din subgraf. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Det är mycket viktigt att använda en plånboksadress som du kontrollerar, och som kan göra transaktioner på Arbitrum. Annars kommer subgrafen att gå förlorad och kan inte återställas.** -## Preparing for the transfer: bridging some ETH +## Förbereder för överföringen: överbrygga lite ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Att överföra subgrafen innebär att man skickar en transaktion genom bryggan och sedan utför en annan transaktion på Arbitrum. Den första transaktionen använder ETH på huvudnätet och inkluderar en del ETH för att betala för gas när meddelandet tas emot på L2. Men om denna gas är otillräcklig måste du göra om transaktionen och betala för gasen direkt på L2 (detta är "Steg 3: Bekräfta överföringen" nedan). Detta steg **måste utföras inom 7 dagar efter att överföringen påbörjats**. Dessutom kommer den andra transaktionen ("Steg 4: Avsluta överföringen på L2") att göras direkt på Arbitrum. Av dessa skäl behöver du lite ETH på en Arbitrum-plånbok. Om du använder ett multisig- eller smart kontraktskonto måste ETH: en finnas i den vanliga (EOA) plånboken som du använder för att utföra transaktionerna, inte på själva multisig plånboken. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Du kan köpa ETH på vissa börser och ta ut den direkt till Arbitrum, eller så kan du använda Arbitrum-bryggan för att skicka ETH från en mainnet-plånbok till L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Eftersom gasavgifterna på Arbitrum är lägre bör du bara behöva en liten summa. Det rekommenderas att du börjar vid en låg tröskel (0.t.ex. 01 ETH) för att din transaktion ska godkännas. -## Finding the subgraph Transfer Tool +## Hitta subgrafen Överföringsverktyg -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Du kan hitta L2 Överföringsverktyg när du tittar på din subgrafs sida på Subgraf Studio: -![transfer tool](/img/L2-transfer-tool1.png) +![Överföringsverktyg](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +Den är också tillgänglig på Explorer om du är ansluten till plånboken som äger en subgraf och på den subgrafens sida på Explorer: -![Transferring to L2](/img/transferToL2.png) +![Överför till L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +Genom att klicka på knappen Överför till L2 öppnas överföringsverktyget där du kan starta överföringsprocessen. -## Step 1: Starting the transfer +## Steg 1: Starta överföringen -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Innan du påbörjar överföringen måste du bestämma vilken adress som ska äga subgrafen på L2 (se "Välja din L2 plånbok" ovan), och det rekommenderas starkt att ha lite ETH för gas som redan är överbryggad på Arbitrum (se "Förbereda för överföringen: brygga" lite ETH" ovan). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Observera också att överföring av subgrafen kräver att en signal som inte är noll på subgrafen med samma konto som äger subgrafen; om du inte har signalerat på subgrafen måste du lägga till lite curation (att lägga till en liten mängd som 1 GRT skulle räcka). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +Efter att ha öppnat överföringsverktyget kommer du att kunna ange L2-plånboksadressen i fältet "Mottagande plånboksadress" - **se till att du har angett rätt adress här**. Om du klickar på Transfer Subgraph kommer du att uppmana dig att utföra transaktionen på din plånbok (observera att ett ETH-värde ingår för att betala för L2-gas); detta kommer att initiera överföringen och fasa ut din L1-subgraf (se "Förstå vad som händer med signal, din L1-subgraf och sökadresser" ovan för mer information om vad som händer bakom kulisserna). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Om du utför det här steget, **se till att du fortsätter tills du har slutfört steg 3 om mindre än 7 dagar, annars försvinner subgrafen och din signal-GRT.** Detta beror på hur L1-L2-meddelanden fungerar på Arbitrum: meddelanden som skickas genom bryggan är "omförsökbara biljetter" som måste utföras inom 7 dagar, och det första utförandet kan behöva ett nytt försök om det finns toppar i gaspriset på Arbitrum. -![Start the trnasfer to L2](/img/startTransferL2.png) +![Starta överföringen till L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Steg 2: Väntar på att subgrafen ska komma till L2 -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +När du har startat överföringen måste meddelandet som skickar din L1 subgraf till L2 spridas genom Arbitrum bryggan. Detta tar cirka 20 minuter (bryggan väntar på att huvudnäts blocket som innehåller transaktionen är "säkert" från potentiella kedjereorganisationer). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +När denna väntetid är över kommer Arbitrum att försöka utföra överföringen automatiskt på L2 kontrakten. -![Wait screen](/img/screenshotOfWaitScreenL2.png) +![Vänteskärm](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## Steg 3: Bekräfta överföringen -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +I de flesta fall kommer detta steg att utföras automatiskt eftersom L2-gasen som ingår i steg 1 borde vara tillräcklig för att utföra transaktionen som tar emot subgrafen på Arbitrum-kontrakten. I vissa fall är det dock möjligt att en topp i gaspriserna på Arbitrum gör att denna autoexekvering misslyckas. I det här fallet kommer "biljetten" som skickar din subgraf till L2 att vara vilande och kräver ett nytt försök inom 7 dagar. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Om så är fallet måste du ansluta med en L2 plånbok som har lite ETH på Arbitrum, byta ditt plånboksnätverk till Arbitrum och klicka på "Bekräfta överföring" för att försöka genomföra transaktionen igen. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +![Bekräfta överföringen till L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## Steg 4: Avsluta överföringen på L2 -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +Vid det här laget har din subgraf och GRT tagits emot på Arbitrum, men subgrafen är inte publicerad ännu. Du måste ansluta med L2 plånboken som du valde som mottagande plånbok, byta ditt plånboksnätverk till Arbitrum och klicka på "Publicera subgraf" -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![Publicera subgrafen](/img/publishSubgraphL2TransferTools.png) -![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) +![Vänta på att subgrafen ska publiceras](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Detta kommer att publicera subgrafen så att indexerare som är verksamma på Arbitrum kan börja servera den. Det kommer också att skapa kurations signaler med hjälp av GRT som överfördes från L1. -## Step 5: Updating the query URL +## Steg 5: Uppdatera sökfrågans URL -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Din subgraf har överförts till Arbitrum! För att fråga subgrafen kommer den nya webbadressen att vara: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Observera att subgraf-ID: t på Arbitrum kommer att vara ett annat än det du hade på mainnet, men du kan alltid hitta det på Explorer eller Studio. Som nämnts ovan (se "Förstå vad som händer med signal, dina L1-subgraf- och sökwebbadresser") kommer den gamla L1-URL: n att stödjas under en kort stund, men du bör byta dina frågor till den nya adressen så snart subgrafen har synkroniserats på L2. -## How to transfer your curation to Arbitrum (L2) +## Så här överför du din kuration till Arbitrum (L2) -## Understanding what happens to curation on subgraph transfers to L2 +## Förstå vad som händer med curation vid subgraf överföringar till L2 -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +När ägaren av en subgraf överför en subgraf till Arbitrum, omvandlas all subgrafs signal till GRT samtidigt. Detta gäller för "auto-migrerad" signal, det vill säga signal som inte är specifik för en subgraf version eller utbyggnad men som följer den senaste versionen av en subgraf. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Denna omvandling från signal till GRT är densamma som vad som skulle hända om subgrafägaren avskaffade subgrafen i L1. När subgrafen föråldras eller överförs, "bränns" all curation-signal samtidigt (med hjälp av curation bonding-kurvan) och den resulterande GRT hålls av GNS smarta kontraktet (det är kontraktet som hanterar subgrafuppgraderingar och automatisk migrerad signal). Varje kurator i det stycket har därför ett anspråk på den GRT som är proportionell mot antalet aktier de hade för stycket. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +En bråkdel av dessa BRT som motsvarar subgrafägaren skickas till L2 tillsammans med subgrafen. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +Vid denna tidpunkt kommer den kurerade BRT inte att samla på sig några fler frågeavgifter, så kuratorer kan välja att dra tillbaka sin BRT eller överföra den till samma subgraf på L2, där den kan användas för att skapa en ny kurationssignal. Det är ingen brådska att göra detta eftersom BRT kan hjälpa till på obestämd tid och alla får ett belopp som är proportionellt mot sina aktier, oavsett när de gör det. -## Choosing your L2 wallet +## Välja din L2 plånbok -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Om du bestämmer dig för att överföra din kurerade GRT till L2 kan du välja en annan plånbok som kommer att äga kurationssignalen på L2. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +Om du använder en "vanlig" plånbok som Metamask (ett externt ägt konto eller EOA, d.v.s. en plånbok som inte är ett smart kontrakt), så är detta valfritt och det rekommenderas att behålla samma Curator-adress som i L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Om du använder en smart kontraktsplånbok, som en multisig (t.ex. ett kassaskåp), är det obligatoriskt att välja en annan L2-plånboksadress, eftersom det är mest troligt att det här kontot bara finns på mainnet och att du inte kommer att kunna göra transaktioner på Arbitrum med denna plånbok. Om du vill fortsätta använda en smart kontraktsplånbok eller multisig, skapa en ny plånbok på Arbitrum och använd dess adress som L2-mottagande plånboksadress. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**Det är mycket viktigt att använda en plånboksadress som du kontrollerar och som kan göra transaktioner på Arbitrum, eftersom annars kurationen går förlorad och inte kan återställas.** -## Sending curation to L2: Step 1 +## Skicka kuration till L2: Steg 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +Innan du påbörjar överföringen måste du bestämma vilken adress som ska äga kurationen på L2 (se "Välja din L2-plånbok" ovan), och det rekommenderas att ha en del ETH för gas som redan är överbryggad på Arbitrum ifall du behöver försöka utföra exekveringen av meddelande på L2. Du kan köpa ETH på vissa börser och ta ut den direkt till Arbitrum, eller så kan du använda Arbitrum-bryggan för att skicka ETH från en mainnet-plånbok till L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - eftersom gasavgifterna på Arbitrum är så låga ska du bara behöva en liten summa, t.ex. 0,01 ETH kommer förmodligen att vara mer än tillräckligt. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +Om en subgraf som du kurerar till har överförts till L2 kommer du att se ett meddelande i Explorer som talar om att du kurerar till en överförd subgraf. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +När du tittar på subgraf sidan kan du välja att dra tillbaka eller överföra kurationen. Genom att klicka på "Överför signal till Arbitrum" öppnas överföringsverktyget. -![Transfer signal](/img/transferSignalL2TransferTools.png) +![Överföringssignal](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +Efter att ha öppnat överföringsverktyget kan du bli ombedd att lägga till lite ETH i din plånbok om du inte har någon. Då kommer du att kunna ange L2 plånboksadressen i fältet "Mottagande plånboksadress" - **se till att du har angett rätt adress här**. Om du klickar på Överför signal kommer du att uppmana dig att utföra transaktionen på din plånbok (observera att ett ETH-värde ingår för att betala för L2-gas); detta kommer att initiera överföringen. -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Om du utför det här steget, **se till att du fortsätter tills du har slutfört steg 3 om mindre än 7 dagar, annars försvinner din signal-GRT.** Detta beror på hur L1-L2-meddelanden fungerar på Arbitrum: meddelanden som skickas via bron är "återförsökbara biljetter" som måste utföras inom 7 dagar, och det första utförandet kan behöva ett nytt försök om det finns toppar i gaspriset på Arbitrum. -## Sending curation to L2: step 2 +## Skickar kuration till L2: steg 2 -Starting the transfer: +Starta överföringen: -![Send signal to L2](/img/sendingCurationToL2Step2First.png) +![Skicka signal till L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +När du har startat överföringen måste meddelandet som skickar din L1 kuration till L2 spridas genom Arbitrum bryggan. Detta tar cirka 20 minuter (bryggan väntar på att huvudnäts blocket som innehåller transaktionen är "säkert" från potentiella kedjereorganiseringar). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +När denna väntetid är över kommer Arbitrum att försöka utföra överföringen automatiskt på L2 kontrakten. -![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) +![Sänder kurations signal till L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## Skickar kuration till L2: steg 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +I de flesta fall kommer detta steg att utföras automatiskt eftersom L2-gasen som ingår i steg 1 borde vara tillräcklig för att utföra transaktionen som får kurationen på Arbitrum-kontrakten. I vissa fall är det dock möjligt att en topp i gaspriserna på Arbitrum gör att denna autoexekvering misslyckas. I det här fallet kommer "biljetten" som skickar din kuration till L2 att vara vilande och kräver ett nytt försök inom 7 dagar. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Om så är fallet måste du ansluta med en L2 plånbok som har lite ETH på Arbitrum, byta ditt plånboksnätverk till Arbitrum och klicka på "Bekräfta överföring" för att försöka genomföra transaktionen igen. -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) +![Skicka signal till L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## Dra tillbaka din kuration på L1 -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +Om du föredrar att inte skicka din GRT till L2, eller om du hellre vill överbrygga GRT manuellt, kan du ta tillbaka din kurerade BRT på L1. På bannern på subgraf sidan väljer du "Ta tillbaka signal" och bekräftar transaktionen; GRT kommer att skickas till din kurator adress. diff --git a/website/pages/sv/billing.mdx b/website/pages/sv/billing.mdx index 3c21e5de1cdc..39b6d6ad2cda 100644 --- a/website/pages/sv/billing.mdx +++ b/website/pages/sv/billing.mdx @@ -1,158 +1,208 @@ --- -title: Billing +title: Fakturering --- -> Invoices are generated on a weekly basis. +> Fakturor genereras veckovis. -There are two options for paying for your query fees: +Det finns två alternativ för att betala dina frågeavgifter: -- [Paying with fiat currency with Banxa](#billing-with-banxa) -- [Paying with crypto wallet](#billing-on-arbitrum) +- [Betala med fiatvaluta med Banxa](#billing-with-banxa) +- [Betala med krypto-plånbok](#billing-on-arbitrum) -## Billing with Banxa +## Betala med Banxa -Banxa enables you to bypass the need for an exchange and pay for your query fees using the fiat currency of your choice. The fiat currency will be converted to GRT, added to your account balance on the billing contract, and used to pay for queries associated with your API keys. +Banxa möjliggör att du kan kringgå behovet av en börs och betala dina frågeavgifter med den fiatvaluta du väljer. Fiatvalutan kommer att konverteras till GRT, läggas till ditt kontosaldo på faktureringskontraktet och användas för att betala för frågor som är kopplade till dina API-nycklar. -There may be KYC requirements depending on the regulations in your country. For more information about KYC, please visit [Banxa's FAQ page](https://docs.banxa.com/docs/faqs). +Beroende på reglerna i ditt land kan det finnas krav på KYC (känn din kund). För mer information om KYC, besök [Banxas FAQ-sida](https://docs.banxa.com/docs/faqs). -You can learn more about Banxa by reading their [documentation](https://docs.banxa.com/docs). +Du kan lära dig mer om Banxa genom att läsa deras [dokumentation](https://docs.banxa.com/docs). -### Paying for query fees with Banxa +### Betala för frågeavgifter med Banxa -1. Select “Pay with Card” option in [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). -2. Enter the amount of GRT to be added to your account balance. -3. Click the 'Continue with Banxa' button. -4. Enter necessary banking information on Banxa including payment method & fiat currency of choice. -5. Finish the transaction. +1. Välj alternativet "Betala med kort" i [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). +2. Ange beloppet av GRT som ska läggas till ditt kontosaldo. +3. Klicka på knappen 'Fortsätt med Banxa'. +4. Ange nödvändig bankinformation på Banxa, inklusive betalningsmetod & önskad fiatvaluta. +5. Slutför transaktionen. -It may take up to 10 minutes to complete the transaction. Once the transaction is confirmed, the purchased GRT will automatically be added to your account balance on Arbitrum. +Det kan ta upp till 10 minuter att slutföra transaktionen. När transaktionen är bekräftad kommer de köpta GRT automatiskt att läggas till ditt kontosaldo på Arbitrum. -## Billing on Arbitrum +## Betala på Arbitrum -While The Graph protocol operates on Ethereum Mainnet, [the billing contract](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) lives on the [Arbitrum](https://arbitrum.io/) network to reduce transaction times and cost. You'll be required to pay the query fees generated from your API keys. Using the billing contract, you'll be able to: +Medan The Graf-protokollet verkar på Ethereum Mainnet, [faktureringskontraktet](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) finns på [Arbitrum](https://arbitrum.io/) nätverket för att minska transaktionstider och kostnader. Du kommer att behöva betala de frågeavgifter som genereras från dina API-nycklar. Genom att använda faktureringskontraktet kommer du att kunna: -- Add and withdraw GRT from your account balance. -- Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. -- Automatically pay invoices based on query fees generated, as long as there is enough GRT in your account balance. +- Lägga till och ta ut GRT från ditt kontosaldo. +- Hålla koll på dina saldon baserat på hur mycket GRT du har lagt till på ditt kontosaldo, hur mycket du har tagit bort och dina fakturor. +- Automatiskt betala fakturor baserat på genererade frågeavgifter, så länge det finns tillräckligt med GRT på ditt kontosaldo. -### Adding GRT using a crypto wallet +### Lägga till GRT med en krypto-plånbok -> This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). + -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +> Detta avsnitt är skrivet under förutsättning att du redan har GRT i din krypto-plånbok och att du är på Ethereum Mainnet. Om du inte har GRT kan du lära dig hur du får GRT [här](#getting-grt). -2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". +För en videogenomgång av att lägga till GRT på ditt faktureringskonto med en krypto-plånbok, titta på den här [videon](https://youtu.be/4Bw2sh0FxCg). -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. +1. Gå till [Subgraf Studio Billing-sidan](https://thegraph.com/studio/billing/). -4. Enter the amount of GRT you want to add to your account balance. You can also select the maximum amount of GRT you want to add to your account balance by clicking on the "Max" button. +2. Klicka på knappen "Anslut plånbok" längst upp till höger på sidan. Du kommer att omdirigeras till sidan för plånboksval. Välj din plånbok och klicka på "Anslut". -5. Click 'Allow GRT Access' to allow the Subgraph Studio to access your GRT. Sign the associated transaction in your wallet. This will not cost any gas. +3. Klicka på knappen 'Lägg till GRT' i mitten av sidan. En sidopanel kommer att visas. -6. Click 'Add GRT to account balance' to add the GRT to your account balance. Sign the associated transaction in your wallet. This will cost gas. +4. Ange beloppet av GRT du vill lägga till på ditt kontosaldo. Du kan också välja det maximala beloppet av GRT du vill lägga till på ditt kontosaldo genom att klicka på knappen "Max". -7. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +5. Klicka på 'Tillåt GRT-åtkomst' för att tillåta Subgraf Studio att komma åt ditt GRT. Signera den associerade transaktionen i din plånbok. Detta kommer inte att kosta någon gas. -### Withdrawing GRT using a crypto wallet +6. Klicka på 'Lägg till GRT på kontosaldo' för att lägga till GRT på ditt kontosaldo. Signera den associerade transaktionen i din plånbok. Detta kommer att kosta gas. -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on the Arbitrum network. +7. När transaktionen är bekräftad kommer du att se GRT som har lagts till på ditt kontosaldo inom en timme. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +### Ta ut GRT med en krypto-plånbok -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +> Detta avsnitt är skrivet under förutsättning att du har satt in GRT på ditt kontosaldo på [Subgraf Studio](https://thegraph.com/studio/billing/) och att du är på Arbitrum-nätverket. -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +1. Gå till [Subgraph Studio Billing-sidan](https://thegraph.com/studio/billing/). -4. Enter the amount of GRT you would like to withdraw. +2. Klicka på knappen "Anslut plånbok" längst upp till höger på sidan. Välj din plånbok och klicka på "Anslut". -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. +3. Klicka på rullgardinsmenyn bredvid knappen 'Lägg till GRT' i mitten av sidan. Välj att ta ut GRT. En sidopanel kommer att visas. -6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. +4. Ange beloppet av GRT du vill ta ut. -### Adding GRT using a multisig wallet +5. Klicka på 'Ta ut GRT' för att ta ut GRT från ditt kontosaldo. Signera den associerade transaktionen i din plånbok. Detta kommer att kosta gas. GRT kommer att skickas till din Arbitrum-plånbok. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +6. När transaktionen är bekräftad kommer du att se att GRT har tagits ut från ditt kontosaldo på din Arbitrum-plånbok. -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. +### Lägga till GRT med en multisig-plånbok -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. + -4. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +1. Gå till [Subgraf Studios faktureringsida](https://thegraph.com/studio/billing/). -### Withdrawing GRT using a multisig wallet +2. Klicka på knappen "Anslut plånbok" längst upp till höger på sidan. Välj din plånbok och klicka på "Anslut". Om du använder [Gnosis-Safe](https://gnosis-safe.io/), kan du ansluta din multisig såväl som din signeringsplånbok. Signera sedan det associerade meddelandet. Detta kommer inte att kosta någon gas. -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on Ethereum mainnet. +3. Klicka på knappen 'Lägg till GRT' i mitten av sidan. En sidopanel kommer att visas. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +4. När transaktionen är bekräftad kommer du att se GRT som har lagts till på ditt kontosaldo inom en timme. -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +### Ta ut GRT med en multisig-plånbok -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +> Detta avsnitt är skrivet under förutsättning att du har satt in GRT på ditt kontosaldo på [Subgraf Studio](https://thegraph.com/studio/billing/) och att du är på Ethereum Mainnet. -4. Enter the amount of GRT you would like to withdraw. Specify the receiving wallet which will receive the GRT from this transaction. The GRT will be sent to the receiving wallet on Arbitrum. +1. Gå till [Subgraph Studios faktureringsida](https://thegraph.com/studio/billing/). -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. +2. Klicka på knappen "Anslut plånbok" längst upp till höger på sidan. Välj din plånbok och klicka på "Anslut". -6. Once the transaction is confirmed, you'll see the GRT added to your Arbitrum wallet within an hour. +3. Klicka på rullgardinsmenyn bredvid knappen 'Lägg till GRT' i mitten av sidan. Välj att ta ut GRT. En sidopanel kommer att visas. -## Getting GRT +4. Ange beloppet av GRT du vill ta ut. Ange den mottagande plånbok som kommer att ta emot GRT från denna transaktion. GRT kommer att skickas till den mottagande plånboken på Arbitrum. -This section will show you how to get GRT to pay for query fees. +5. Klicka på 'Ta ut GRT' för att ta ut GRT från ditt kontosaldo. Signera den associerade transaktionen i din plånbok. Detta kommer att kosta gas. + +6. När transaktionen är bekräftad kommer du att se GRT som har lagts till i din Arbitrum-plånbok inom en timme. + +## Att få GRT + +Detta avsnitt kommer att visa dig hur du får GRT för att betala frågeavgifter. ### Coinbase -This will be a step by step guide for purchasing GRT on Coinbase. +Detta kommer att vara en stegvis guide för att köpa GRT på Coinbase. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select GRT. -5. Select the payment method. Select your preferred payment method. -6. Select the amount of GRT you want to purchase. -7. Review your purchase. Review your purchase and click "Buy GRT". -8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. -9. You can transfer the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - To transfer the GRT to your crypto wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the GRT account. - - Enter the amount of GRT you want to send and the wallet address you want to send it to. - - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a crypto wallet. +1. Gå till [Coinbase](https://www.coinbase.com/) och skapa ett konto. +2. När du har skapat ett konto måste du verifiera din identitet genom en process som kallas KYC (Känn Din Kund). Detta är en standardprocedur för alla centraliserade eller förvarande kryptobörser. +3. När du har verifierat din identitet kan du köpa GRT. Du kan göra detta genom att klicka på knappen "Köp/Sälj" längst upp till höger på sidan. +4. Välj den valuta du vill köpa. Välj GRT. +5. Välj betalningsmetod. Välj din föredragna betalningsmetod. +6. Välj beloppet av GRT du vill köpa. +7. Granska ditt köp och klicka på "Köp GRT". +8. Bekräfta ditt köp och du kommer att ha köpt GRT framgångsrikt. +9. Du kan överföra GRT från ditt konto till din krypto-plånbok, som [MetaMask](https://metamask.io/). + - För att överföra GRT till din krypto-plånbok, klicka på knappen "Konton" längst upp till höger på sidan. + - Klicka på "Skicka" bredvid GRT-kontot. + - Ange beloppet av GRT du vill skicka och plånboksadressen du vill skicka det till. + - Klicka på "Fortsätt" och bekräfta din transaktion. Observera att Coinbase vid större köpbelopp kan kräva att du väntar 7-10 dagar innan du kan överföra hela beloppet till en krypto-plånbok. -You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +Du kan lära dig mer om att få GRT på Coinbase [här](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -This will be a step by step guide for purchasing GRT on Binance. +Detta kommer att vara en stegvis guide för att köpa GRT på Binance. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. -4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. -5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. -6. Select the amount of GRT you want to purchase. -7. Review your purchase and click "Buy GRT". -8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. -9. You can withdraw the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your crypto wallet, add your crypto wallet's address to the withdrawel whitelist. - - Click on the "wallet" button, click withdraw, and select GRT. - - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. +1. Gå till [Binance](https://www.binance.com/en) och skapa ett konto. +2. När du har skapat ett konto måste du verifiera din identitet genom en process som kallas KYC (Känn Din Kund). Detta är en standardprocedur för alla centraliserade eller förvarande kryptobörser. +3. När du har verifierat din identitet kan du köpa GRT. Du kan göra detta genom att klicka på knappen "Köp nu" på startsidan. +4. Du kommer att tas till en sida där du kan välja den valuta du vill köpa. Välj GRT. +5. Välj din föredragna betalningsmetod. Du kommer att kunna betala med olika fiatvalutor som euro, amerikanska dollar och mer. +6. Välj beloppet av GRT du vill köpa. +7. Granska ditt köp och klicka på "Köp GRT". +8. Bekräfta ditt köp och du kommer att kunna se din GRT i din Binance Spot Wallet. +9. Du kan ta ut GRT från ditt konto till din krypto-plånbok, som [MetaMask](https://metamask.io/). + - [För att ta ut](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) GRT till din krypto-plånbok, lägg till din krypto-plånboks adress till uttagsvitslistan. + - Klicka på knappen "Plånbok", klicka på "Ta ut" och välj GRT. + - Ange beloppet av GRT du vill skicka och den vitlistade plånboksadressen du vill skicka det till. + - Klicka på "Fortsätt" och bekräfta din transaktion. -You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Du kan lära dig mer om att få GRT på Binance [här](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -This is how you can purchase GRT on Uniswap. +Detta är hur du kan köpa GRT på Uniswap. + +1. Gå till [Uniswap](https://app.uniswap.org/#/swap) och anslut din plånbok. +2. Välj den token du vill byta från. Välj ETH. +3. Välj den token du vill byta till. Välj GRT. + - Se till att du byter till rätt token. GRT-smartkontraktsadressen är: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` +4. Ange det belopp av ETH du vill byta. +5. Klicka på "Byt". +6. Bekräfta transaktionen i din plånbok och vänta på att transaktionen behandlas. + +Du kan lära dig mer om att få GRT på Uniswap [här](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Få Ethereum + +Detta avsnitt kommer att visa dig hur du får Ethereum (ETH) för att betala transaktionsavgifter eller gas. ETH är nödvändigt för att utföra operationer på Ethereum-nätverket, såsom överföring av tokens eller interaktion med smarta kontrakt. + +### Coinbase + +Detta kommer att vara en stegvis guide för att köpa ETH på Coinbase. + +1. Gå till [Coinbase](https://www.coinbase.com/) och skapa ett konto. +2. När du har skapat ett konto, verifiera din identitet genom en process som kallas KYC (Känn Din Kund). Detta är en standardprocedur för alla centraliserade eller förvarande kryptobörser. +3. När du har verifierat din identitet kan du köpa ETH genom att klicka på knappen "Köp/Sälj" längst upp till höger på sidan. +4. Välj den valuta du vill köpa. Välj ETH. +5. Välj din föredragna betalningsmetod. +6. Ange det belopp av ETH du vill köpa. +7. Granska ditt köp och klicka på "Köp ETH". +8. Bekräfta ditt köp och du kommer att ha köpt ETH framgångsrikt. +9. Du kan överföra ETH från ditt Coinbase-konto till din krypto-plånbok, som [MetaMask](https://metamask.io/). + - För att överföra ETH till din krypto-plånbok, klicka på knappen "Konton" längst upp till höger på sidan. + - Klicka på knappen "Skicka" bredvid ETH-kontot. + - Ange det belopp av ETH du vill skicka och plånboksadressen du vill skicka det till. + - Klicka på "Fortsätt" och bekräfta din transaktion. + +Du kan lära dig mer om att få ETH på Coinbase [här](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +Detta kommer att vara en stegvis guide för att köpa ETH på Binance. -1. Go to [Uniswap](https://app.uniswap.org/#/swap) and connect your wallet. -2. Select the token you want to swap from. Select ETH. -3. Select the token you want to swap to. Select GRT. - - Make sure you're swapping for the correct token. The GRT smart contract address is: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` -4. Enter the amount of ETH you want to swap. -5. Click "Swap". -6. Confirm the transaction in your wallet and you wait for the transaction to process. +1. Gå till [Binance](https://www.binance.com/en) och skapa ett konto. +2. När du har skapat ett konto, verifiera din identitet genom en process som kallas KYC (Känn Din Kund). Detta är en standardprocedur för alla centraliserade eller förvarande kryptobörser. +3. När du har verifierat din identitet kan du köpa ETH genom att klicka på knappen "Köp nu" på startsidan. +4. Välj den valuta du vill köpa. Välj ETH. +5. Välj din föredragna betalningsmetod. +6. Ange det belopp av ETH du vill köpa. +7. Granska ditt köp och klicka på "Köp ETH". +8. Bekräfta ditt köp och du kommer att se din ETH i din Binance Spot Wallet. +9. Du kan ta ut ETH från ditt konto till din krypto-plånbok, som [MetaMask](https://metamask.io/). + - För att ta ut ETH till din krypto-plånbok, lägg till din krypto-plånboks adress i uttagsvitslistan. + - Klicka på knappen "plånbok", klicka på "Ta ut" och välj ETH. + - Ange det belopp av ETH du vill skicka och den vitlistade plånboksadressen du vill skicka det till. + - Klicka på "Fortsätt" och bekräfta din transaktion. -You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +Du kan lära dig mer om att få ETH på Binance [här](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). -## Arbitrum Bridge +## Arbitrum-bron -The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Faktureringskontraktet är endast utformat för att överföra GRT från Ethereum-mainnet till Arbitrum-nätverket. Om du vill överföra din GRT från Arbitrum tillbaka till Ethereum-mainnet måste du använda [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/sv/chain-integration-overview.mdx b/website/pages/sv/chain-integration-overview.mdx new file mode 100644 index 000000000000..3511f1e5a650 --- /dev/null +++ b/website/pages/sv/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Översikt över Kedjeintegrationsprocessen +--- + +En transparent och styrbaserad integrationsprocess utformades för blockchain-team som söker [integration med The Graf-protokollet](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). Det är en process i 3 faser, sammanfattad nedan. + +## Fas 1. Teknisk Integration + +- Team arbetar med en Graph Node-integration och Firehose för icke-EVM-baserade kedjor. [Här är hur](/new-chain-integration/). +- Team startar protokollintegrationsprocessen genom att skapa en Forumtråd [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (Ny Datakällor underkategori under Governance & GIPs). Att använda standardforummallen är obligatoriskt. + +## Fas 2. Integrationsvalidering + +- Team samarbetar med kärnutvecklare, Graph Foundation och operatörer av GUI:er och nätverksportar, såsom [Subgraf Studio](https://thegraph.com/studio/), för att säkerställa en smidig integrationsprocess. Detta innebär att tillhandahålla nödvändig backend-infrastruktur, såsom den integrerande kedjans JSON RPC eller Firehose-endpoints. Team som vill undvika självhostning av sådan infrastruktur kan dra nytta av The Graphs gemenskap av nodoperatörer (Indexers) för att göra det, vilket Stiftelsen kan hjälpa till med. +- Graf Indexers testar integrationen på The Graphs testnät. +- Kärnutvecklare och Indexers övervakar stabilitet, prestanda och datadeterminism. + +## Fas 3. Mainnet-integration + +- Team föreslår mainnet-integration genom att skicka in en Graph Improvement Proposal (GIP) och starta en pull request (PR) på [funktionsstödsmatrisen](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (mer detaljer på länken). +- The Graf Council granskar begäran och godkänner mainnet-stöd, förutsatt en framgångsrik Fas 2 och positiv feedback från gemenskapen. + +--- + +Om processen verkar skrämmande, oroa dig inte! The Graph Foundation är engagerad i att stödja integratörer genom att främja samarbete, erbjuda väsentlig information och vägleda dem genom olika faser, inklusive att navigera genom styrdighetsprocesser som Graph Improvement Proposals (GIPs) och pull requests. Om du har frågor, var god kontakta [info@thegraph.foundation](mailto:info@thegraph.foundation) eller via Discord (antingen Pedro, The Graph Foundation-medlem, IndexerDAO eller andra kärnutvecklare). + +Redo att forma framtiden för The Graf Nätverk? [Börja ditt förslag](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) nu och bli en del av web3-revolutionen! + +--- + +## Vanliga frågor + +### 1. Hur förhåller sig detta till [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +Denna process är relaterad till Subgraf Data Service och gäller endast nya Subgraf `Data Källor`. + +### 2. Vad händer om stöd för Firehose & Substreams kommer efter det att nätverket stöds på mainnet? + +Detta skulle endast påverka protokollstödet för indexbelöningar på Substreams-drivna subgrafer. Den nya Firehose-implementeringen skulle behöva testas på testnätet, enligt den metodik som beskrivs för Fas 2 i detta GIP. På liknande sätt, förutsatt att implementationen är prestanda- och tillförlitlig, skulle en PR på [Funktionsstödsmatrisen](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) krävas (`Substreams data sources` Subgraf Feature), liksom en ny GIP för protokollstöd för indexbelöningar. Vem som helst kan skapa PR och GIP; Stiftelsen skulle hjälpa till med Rådets godkännande. + +### 3. Hur lång tid tar denna process? + +Tiden till mainnet förväntas vara flera veckor, varierande baserat på tidpunkten för integrationsutveckling, om ytterligare forskning krävs, testning och buggfixar, och, som alltid, timingen av styrdighetsprocessen som kräver gemenskapens återkoppling. + +Protokollstöd för indexbelöningar beror på intressenternas bandbredd att fortsätta med testning, insamling av återkoppling och hantering av bidrag till kärnkodbasen, om det är tillämpligt. Detta är direkt kopplat till integrationens mognad och hur responsivt integreringsteamet är (som kan vara eller inte vara teamet bakom RPC/Firehose-implementeringen). Stiftelsen är här för att hjälpa till med stöd genom hela processen. + +### 4. Hur kommer prioriteringar att hanteras? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/sv/cookbook/arweave.mdx b/website/pages/sv/cookbook/arweave.mdx index 17ef88db5366..df6806b6cec1 100644 --- a/website/pages/sv/cookbook/arweave.mdx +++ b/website/pages/sv/cookbook/arweave.mdx @@ -2,60 +2,60 @@ title: Bygga subgrafer på Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! -In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. +I den här guiden kommer du att lära dig hur du bygger och distribuerar subgrafer för att indexera Weaver-blockkedjan. -## What is Arweave? +## Vad är Arweave? -The Arweave protocol allows developers to store data permanently and that is the main difference between Arweave and IPFS, where IPFS lacks the feature; permanence, and files stored on Arweave can't be changed or deleted. +Arweave-protokollet tillåter utvecklare att lagra data permanent och det är den största skillnaden mellan Arweave och IPFS, där IPFS saknar funktionen; beständighet och filer lagrade på Arweave kan inte ändras eller raderas. -Arweave already has built numerous libraries for integrating the protocol in a number of different programming languages. For more information you can check: +Arweave har redan byggt ett flertal bibliotek för att integrera protokollet i ett antal olika programmeringsspråk. För mer information kan du kolla: - [Arwiki](https://arwiki.wiki/#/en/main) - [Arweave Resources](https://www.arweave.org/build) -## What are Arweave Subgraphs? +## Vad är Arweave-subgrafer? -The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). +Grafen låter dig bygga anpassade öppna API:er som kallas "Subgraphs". Subgrafer används för att tala om för indexerare (serveroperatörer) vilka data som ska indexeras på en blockchain och sparas på deras servrar så att du när som helst ska kunna fråga dem med [ GraphQL ](https://graphql.org/). -[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. +[Graph Node](https://github.com/graphprotocol/graph-node) Kan nu indexera data på Arweave-protokollet. Den nuvarande integrationen indexerar bara Arweave som en blockchain (block och transaktioner), den indexerar inte de lagrade filerna ännu. -## Building an Arweave Subgraph +## Bygga en Arweave-subgraf -To be able to build and deploy Arweave Subgraphs, you need two packages: +För att kunna bygga och distribuera Arweave Subgraphs behöver du två paket: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` ovan version 0.30.2 - Detta är ett kommandoradsverktyg för att bygga och distribuera subgrafer. [ Klicka här ](https://www.npmjs.com/package/@graphprotocol/graph-cli) för att ladda ner med `npm`. +2. `@graphprotocol/graph-ts` ovan version 0.27.0 - Detta är ett bibliotek med subgrafspecifika typer. [Klicka här](https://www.npmjs.com/package/@graphprotocol/graph-ts) för att ladda ner med `npm`. -## Subgraph's components +## Subgraphs komponenter -There are three components of a subgraph: +Det finns tre komponenter i en subgraf: -### 1. Manifest - `subgraph.yaml` +### 1. Manifestera - `subgraph.yaml` -Defines the data sources of interest, and how they should be processed. Arweave is a new kind of data source. +Definierar datakällorna av intresse och hur de ska behandlas. Arweave är en ny typ av datakälla. ### 2. Schema - `schema.graphql` -Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. +Här definierar du vilken data du vill kunna fråga efter att du har indexerat din subgrafer med GraphQL. Detta liknar faktiskt en modell för ett API, där modellen definierar strukturen för en begäran. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +Kraven för Arweave subgrafer täcks av den[ befintliga dokumentationen ](/developing/creating-a-subgraph/#the-graphql-schema). -### 3. AssemblyScript Mappings - `mapping.ts` +### 3. AssemblyScript mappningar - `mapping.ts` -This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. +Detta är logiken som avgör hur data ska hämtas och lagras när någon interagerar med datakällorna du lyssnar på. Data översätts och lagras utifrån det schema du har listat. -During subgraph development there are two key commands: +Under subgrafutveckling finns det två nyckelkommandon: ``` $ graph codegen # generates types from the schema file identified in the manifest $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -## Subgraph Manifest Definition +## Definition av subgraf manifestet -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +Subgrafmanifestet `subgraph.yaml` identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en Arweave-subgraf: ```yaml specVersion: 0.0.5 @@ -82,30 +82,30 @@ dataSources: - handler: handleTx # the function name in the mapping file ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` -- Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet +- Arweave subgrafer introducerar en ny typ av datakälla (`arweave`) +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` +- Arweave datakällor introducerar ett valfritt source.owner fält, som är den publika nyckeln till en Arweave plånbok -Arweave data sources support two types of handlers: +Arweave datakällor stöder två typer av hanterare: -- `blockHandlers` - Run on every new Arweave block. No source.owner is required. -- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` +- `blockHandlers` - Kör på varje nytt Arweave block. Ingen source.owner krävs. +- `transactionHandlers` - Kör på varje transaktion där datakällans `source.owner` är ägare. För närvarande krävs en ägare för `transactionHandlers`, om användare vill bearbeta alla transaktioner ska de ange "" som `source.owner` -> The source.owner can be the owner's address, or their Public Key. +> De source.owner kan vara ägarens adress eller deras publika nyckel. -> Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. +> Transaktioner är byggstenarna i Arweave permaweb och de är objekt skapade av slutanvändare. -> Note: [Bundlr](https://bundlr.network/) transactions are not supported yet. +> Obs! [Bundlr](https://bundlr.network/)-transaktioner stöds inte ännu. ## Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Schema definition beskriver strukturen för den resulterande subgraf databasen och relationerna mellan enheter. Detta är agnostiskt för den ursprungliga datakällan. Det finns mer information om definitionen av subgraf schema [här](/developing/creating-a-subgraph/#the-graphql-schema). -## AssemblyScript Mappings +## AssemblyScript mappningar -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Arweave indexering introducerar Arweave-specifika datatyper till [AssemblyScript API](/developing/assemblyscript-api/). ```tsx class Block { @@ -146,51 +146,51 @@ class Transaction { } ``` -Block handlers receive a `Block`, while transactions receive a `Transaction`. +Blockhanterare får ett `Block`, medan transaktioner får en `Transaktion`. -Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). +Att skriva mappningar av en Arweave subgrafer är mycket lik att skriva mappningar av en Ethereum subgrafer. För mer information, klicka [här](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -## Querying an Arweave Subgraph +## Fråga efter en Arweave-subgraf -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +GraphQL slutpunkten för Arweave subgrafer bestäms av schemadefinitionen, med det befintliga API gränssnittet. Besök [GraphQL API dokumentationen](/querying/graphql-api/) för mer information. -## Example Subgraphs +## Exempel på subgrafer -Here is an example subgraph for reference: +Här är ett exempel på subgraf som referens: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Exempel på subgraf för Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) ## FAQ -### Can a subgraph index Arweave and other chains? +### Kan en subgraf indexera Arweave och andra kedjor? -No, a subgraph can only support data sources from one chain/network. +Nej, en subgraf kan bara stödja datakällor från en kedja/nätverk. -### Can I index the stored files on Arweave? +### Kan jag indexera de lagrade filerna på Arweave? -Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). +För närvarande indexerar The Graph bara Arweave som en blockkedja (dess block och transaktioner). -### Can I identify Bundlr bundles in my subgraph? +### Kan jag identifiera Bundlr buntar i min subgraf? -This is not currently supported. +Detta stöds inte för närvarande. -### How can I filter transactions to a specific account? +### Hur kan jag filtrera transaktioner till ett specifikt konto? -The source.owner can be the user's public key or account address. +Source.owner kan vara användarens publika nyckel eller kontoadress. -### What is the current encryption format? +### Vad är det aktuella krypteringsformatet? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Data skickas i allmänhet till mappningarna som bytes, som om de lagras direkt returneras i subgrafen i ett `hex`-format (ex. block- och transaktionshashar). Du kanske vill konvertera till ett `base64` eller `base64 URL`-säkert format i dina mappningar, för att matcha det som visas i blockutforskare som [Arweave Explorer](https: //viewblock.io/arweave/). -The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: +Följande `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` hjälpfunktion kan användas och kommer att läggas till i `graph-ts`: ``` const base64Alphabet = [ diff --git a/website/pages/sv/cookbook/base-testnet.mdx b/website/pages/sv/cookbook/base-testnet.mdx index b1e3a4fc7c6d..575853d3afa4 100644 --- a/website/pages/sv/cookbook/base-testnet.mdx +++ b/website/pages/sv/cookbook/base-testnet.mdx @@ -1,19 +1,19 @@ --- -title: Building Subgraphs on Base +title: Bygga subgrafer på basen --- -This guide will quickly take you through how to initialize, create, and deploy your subgraph on Base testnet. +Den här guiden tar dig snabbt igenom hur du initierar, skapar och distribuerar din subgraf på Base testnet. -What you'll need: +Vad du behöver: -- A Base testnet contract address -- A crypto wallet (e.g. MetaMask or Coinbase Wallet) +- En Base testnet kontraktsadress +- En krypto-plånbok (t.ex. MetaMask eller Coinbase Kallet) -## Subgraph Studio +## Subgraf Studio -### 1. Install the Graph CLI +### 1. Installera Graph CLI -The Graph CLI (>=v0.41.0) is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. +Graph CLI (>=v0.41.0) är skriven i JavaScript och du måste ha antingen `npm` eller `yran` installerad för att använda den. ```sh # NPM @@ -23,71 +23,71 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -### 2. Create your subgraph in the Subgraph Studio +### 2. Skapa din subgraf i Subgraf Studio -Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your crypto wallet. +Gå till [Subgraf Studio] \(https://thegraph.com/studio/) och anslut din kryptoplånbok. -Once connected, click "Create a Subgraph" and enter a name for your subgraph. +När du är ansluten klickar du på "Skapa en subgraf" och anger ett namn för din subgraf. -Select "Base (testnet)" as the indexed blockchain and click Create Subgraph. +Välj "Bas (testnät)" som den indexerade blockkedjan och klicka på Skapa subgraf. -### 3. Initialize your Subgraph +### 3. Initiera din subgraf -> You can find specific commands for your subgraph in the Subgraph Studio. +> Du kan hitta specifika kommandon för din subgraf i Subgraf Studio. -Make sure that the graph-cli is updated to latest (above 0.41.0) +Se till att graph-cli uppdateras till senast (över 0.41.0) ```sh graph --version ``` -Initialize your subgraph from an existing contract. +Initiera din subgraf från ett befintligt kontrakt. ```sh graph init --studio ``` -Your subgraph slug is an identifier for your subgraph. The CLI tool will walk you through the steps for creating a subgraph, including: +Din subgraf snigel är en identifierare för din subgraf. CLI verktyget leder dig genom stegen för att skapa en subgraf, inklusive: -- Protocol: ethereum -- Subgraph slug: `` -- Directory to create the subgraph in: `` -- Ethereum network: base-testnet \_ Contract address: `` -- Start block (optional) -- Contract name: `` -- Yes/no to indexing events (yes means your subgraph will be bootstrapped with entities in the schema and simple mappings for emitted events) +- Protokoll: ethereum +- Subgraf snigel: `` +- Katalog för att skapa subgrafen i: `` +- Ethereum nätverk: base-testnet \_ Kontraktsadress: `` +- Start block(valfritt) +- Kontraktsnamn: `` +- Ja/nej Till indexeringshändelser (ja betyder att din subgraf kommer att vara bootstrappad med entiteter i schemat och enkla mappningar för emitterade händelser) -### 3. Write your Subgraph +### 3. Skriv din Subgraf -> If emitted events are the only thing you want to index, then no additional work is required, and you can skip to the next step. +> Om emitterade händelser är det enda du vill indexera, krävs inget ytterligare arbete, och du kan hoppa till nästa steg. -The previous command creates a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: +Det föregående kommandot skapa grundstruktur för subgraf som du kan använda som utgångspunkt för att bygga din undergraf. När du gör ändringar i subgrafen kommer du huvudsakligen att arbeta med tre filer: -- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. -- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retreive from the subgraph. -- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. +- Manifest (subgraph.yaml) - Manifestet definierar vilka datakällor dina subgrafer kommer att indexera. Se till att lägga till `base-testnet` som nätverksnamnet i manifest filen för att distribuera din subgraf på Base testnet. +- Schema (schema.graphql) - GraphQL schemat definierar vilken data du vill hämta från subgrafen. +- AssemblyScript mappningar (mapping.ts) - Detta är koden som översätter data från dina datakällor till de enheter som definieras i schemat. -If you want to index additional data, you will need extend the manifest, schema and mappings. +Om du vill indexera ytterligare data behöver du utöka manifestet, schemat och mappningarna. -For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). +För mer information om hur du skriver din subgraf, se [Skapa en subgraf](/developing/creating-a-subgraph). -### 4. Deploy to the Subgraph Studio +### 4. Distribuera till Subgraf Studio -Before you can deploy your subgraph, you will need to authenticate with the Subgraph Studio. You can do this by running the following command: +Innan du kan distribuera din subgraf måste du autentisera dig med Subgraf Studio. Du kan göra detta genom att köra följande kommando: -Authenticate the subgraph on studio +Autentisera subgrafen på studion ``` graph auth --studio ``` -Next, enter your subgraph's directory. +Ange sedan subgrafen katalog. ``` cd ``` -Build your subgraph with the following command: +Bygg din subgraf med följande kommando: ```` ``` @@ -95,7 +95,7 @@ graph codegen && graph build ``` ```` -Finally, you can deploy your subgraph using this command: +Slutligen kan du distribuera din subgraf med detta kommando: ```` ``` @@ -103,10 +103,10 @@ graph deploy --studio ``` ```` -### 5. Query your subgraph +### 5. Fråga din subgraf -Once your subgraph is deployed, you can query it from your dapp using the `Development Query URL` in the Subgraph Studio. +När din subgraf har distribuerats kan du fråga den från din app med hjälp av `Utvecklingsfråga URL` i Subgraf Studio. -Note - Studio API is rate-limited. Hence should preferably be used for development and testing. +Obs - Studio API är hastighetsbegränsad. Bör därför helst användas för utveckling och testning. To learn more about querying data from your subgraph, see the [Querying a Subgraph](/querying/querying-the-graph) page. diff --git a/website/pages/sv/cookbook/cosmos.mdx b/website/pages/sv/cookbook/cosmos.mdx index ef21e4bc0855..5e5f4db16222 100644 --- a/website/pages/sv/cookbook/cosmos.mdx +++ b/website/pages/sv/cookbook/cosmos.mdx @@ -1,51 +1,51 @@ --- -title: Building Subgraphs on Cosmos +title: Bygga subgrafer på Cosmos --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. +Den här guiden är en introduktion till att bygga subgrafer som indexerar [Cosmos](https://docs.cosmos.network/)-baserade blockkedjor. -## What are Cosmos subgraphs? +## Vad är Cosmos subgrafer? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +Graph tillåter utvecklare att bearbeta blockchain-händelser och göra den resulterande informationen lätt tillgänglig via en öppen GraphQL API, känd som en subgraf. [Graph Node](https://github.com/graphprotocol/graph-node) kan nu bearbeta Cosmos-händelser, vilket innebär att Cosmos-utvecklare nu kan bygga subgrafer för att enkelt indexera händelser i kedjan. -There are four types of handlers supported in Cosmos subgraphs: +Det finns fyra typer av hanterare som stöds i Cosmos subgrafer: -- **Block handlers** run whenever a new block is appended to the chain. -- **Event handlers** run when a specific event is emitted. -- **Transaction handlers** run when a transaction occurs. -- **Message handlers** run when a specific message occurs. +- ** Blockhanterare** körs när ett nytt block läggs till i kedjan. +- **Händelsehanterare** körs när en specifik händelse sänds ut. +- **Transaktionshanterare** körs när en transaktion inträffar. +- **Meddelandehanterare** körs när ett specifikt meddelande visas. -Based on the [official Cosmos documentation](https://docs.cosmos.network/): +Baserat på den [officiella Cosmos-dokumentationen](https://docs.cosmos.network/): -> [Events](https://docs.cosmos.network/main/core/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. +> [Händelser](https://docs.cosmos.network/main/core/events) är objekt som innehåller information om hur programmet körs. De används främst av tjänsteleverantörer som blockutforskare och plånböcker för att spåra utförandet av olika meddelanden och indextransaktioner. -> [Transactions](https://docs.cosmos.network/main/core/transactions) are objects created by end-users to trigger state changes in the application. +> [Transaktioner](https://docs.cosmos.network/main/core/transactions) är objekt som skapas av slutanvändare för att utlösa tillståndsändringar i applikationen. -> [Messages](https://docs.cosmos.network/main/core/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. +> [Meddelanden](https://docs.cosmos.network/main/core/transactions#messages) är modul-specifika objekt som utlöser tillståndsövergångar inom ramen för modulen de tillhör. -Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. +Även om all data kan nås med en blockhanterare, gör andra hanterare det möjligt för subgraf utvecklare att behandla data på ett mycket mer detaljerat sätt. -## Building a Cosmos subgraph +## Bygga en Cosmos subgraf -### Subgraph Dependencies +### Subgraf beroenden -[graph-cli](https://github.com/graphprotocol/graph-cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. +[graph-cli](https://github.com/graphprotocol/graph-cli) är ett CLI-verktyg för att bygga och distribuera subgrafer, version `>=0.30.0` krävs för att arbeta med Cosmos subgrafer. -[graph-ts](https://github.com/graphprotocol/graph-ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. +[graph-ts](https://github.com/graphprotocol/graph-ts) är ett bibliotek med subgrafspecifika typer, version `>=0.27.0` krävs för att arbeta med Cosmos subgrafer. -### Subgraph Main Components +### Subgraf Huvudkomponenter -There are three key parts when it comes to defining a subgraph: +Det finns tre viktiga delar när det gäller att definiera en subgraf: -**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. +**subgraph.yaml**: en YAML fil som innehåller subgraf manifestet, som identifierar vilka händelser som ska spåras och hur de ska behandlas. -**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. +**schema.graphql**: ett GraphQL schema som definierar vilken data som lagras för din subgraf och hur du frågar efter den via GraphQL. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. +**AssemblyScript mappningar**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript)-kod som översätter från blockchain data till de definierade enheterna i ditt schema. -### Subgraph Manifest Definition +### Definition av subgraf manifestet -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: +Subgrafmanifestet (`subgraph.yaml`) identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna (`hanterare`) som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en Cosmos subgraf: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). -- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. +- Cosmos subgrafer introducerar en ny `typ` av datakälla (`cosmos`). +- `Nätverket` bör motsvara en kedja i Cosmos ekosystem. I exemplet används Cosmos Hub huvudnät. ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graph-ql-schema). +Schemadefinition beskriver strukturen för den resulterande subgraf databasen och relationerna mellan enheter. Detta är agnostiskt för den ursprungliga datakällan. Det finns mer information om definition av subgraf schema [här](/developing/creating-a-subgraph/#the-graph-ql-schema). -### AssemblyScript Mappings +### AssemblyScript mappningar -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Cosmos indexering introducerar Cosmos specifika datatyper till [AssemblyScript API](/developing/assemblyscript-api/). ```tsx class Block { @@ -163,40 +163,40 @@ class Any { } ``` -Each handler type comes with its own data structure that is passed as an argument to a mapping function. +Varje hanterartyp kommer med sin egen datastruktur som skickas som ett argument till en mappningsfunktion. - Block handlers receive the `Block` type. - Event handlers receive the `EventData` type. - Transaction handlers receive the `TransactionData` type. - Message handlers receive the `MessageData` type. -As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). +Som en del av `MessageData` får meddelandehanteraren en transaktionskontext, som innehåller den viktigaste informationen om en transaktion som omfattar ett meddelande. Transaktionskontexten är också tillgänglig i typen `EventData`, men bara när motsvarande händelse är associerad med en transaktion. Dessutom får alla hanterare en referens till ett block (`HeaderOnlyBlock`). -You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Du hittar hela listan över typer för Cosmos integrationen [här](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). -### Message decoding +### Meddelan avkodning -It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://developers.google.com/protocol-buffers/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. +Det är viktigt att notera att Cosmos-meddelanden är kedjespecifika och de skickas till en subgraf i form av en serialiserad [Protocol Buffers](https://developers.google.com/protocol-buffers/) nyttolast. Som ett resultat måste meddelandedata avkodas i en mappningsfunktion innan det kan bearbetas. -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Ett exempel på hur man avkodar meddelandedata i en subgraf finns [här](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). -## Creating and building a Cosmos subgraph +## Skapa och bygga en Cosmos subgraf -The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: +Det första steget innan du börjar skriva subgrafmappningarna är att generera typbindningarna baserat på de entiteter som har definierats i subgrafschemafilen (`schema.graphql`). Detta gör det möjligt för mappningsfunktionerna att skapa nya objekt av den typen och spara dem i butiken. Detta görs genom att använda `codegen` CLI-kommandot: ```bash $ graph codegen ``` -Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: +När mappningarna är klara måste subgrafen byggas. Det här steget kommer att markera eventuella fel som manifestet eller mappningarna kan ha. En subgraf måste byggas framgångsrikt för att kunna distribueras till Graph Node. Det kan göras med kommandot `build` CLI: ```bash $ graph build ``` -## Deploying a Cosmos subgraph +## Distribuera en Cosmos subgraf -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: +När din subgraf har skapats kan du distribuera din subgraf genom att använda kommandot `graph deploy` CLI efter att ha kört kommandot `graph create` CLI: **Hosted Service** @@ -208,52 +208,52 @@ graph create account/subgraph-name --product hosted-service graph deploy account/subgraph-name --product hosted-service ``` -**Local Graph Node (based on default configuration):** +**Lokal Graf Nod (baserat på standardkonfiguration):** ```bash graph create subgraph-name --node http://localhost:8020 ``` ```bash -graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 +graph deploy subgraph-name --onde http://localhost:8020/ --ipfs http://localhost:5001 ``` -## Querying a Cosmos subgraph +## Fråga efter en Cosmos subgraf -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +GraphQL slutpunkten för Cosmos subgrafer bestäms av schemadefinitionen, med det befintliga API gränssnittet. Besök [GraphQL API dokumentationen](/querying/graphql-api/) för mer information. -## Supported Cosmos Blockchains +## Stöds Cosmos Blockchains ### Cosmos Hub -#### What is Cosmos Hub? +#### Vad är Cosmos Hub? -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. +[Cosmos Hub blockchain](https://hub.cosmos.network/) Är den första blockkedjan i [Cosmos](https://cosmos.network/) ekosystem. Du kan besöka den [officiella dokumentationen](https://docs.cosmos.network/) för mer information. -#### Networks +#### Nätverk -Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. +Cosmos Hubs huvudnät är `cosmoshub-4`. Cosmos Hub nuvarande testnät är `theta-testnet-001`.
    Andra Cosmos Hub nätverk, d.v.s. `cosmoshub-3`, stoppas, därför tillhandahålls ingen data för dem. ### Osmosis -> Osmosis support in Graph Node and on the Hosted Service is in beta: please contact the graph team with any questions about building Osmosis subgraphs! +> Osmosis stöd i Graph Node och på Hosted Service är i beta: kontakta graf teamet om du har frågor om att bygga Osmosis subgrafer! -#### What is Osmosis? +#### Vad är Osmosis? -[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. +[Osmosis](https://osmosis.zone/) är ett decentraliserat, AMM-protokoll (automated market maker) som byggs ovanpå Cosmos SDK. Det tillåter användare att skapa anpassade likviditetspooler och handla IBC-aktiverade tokens. Du kan besöka den [officiella dokumentationen](https://docs.osmosis.zone/) för mer information. -#### Networks +#### Nätverk -Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. +Osmosis huvudnätet är `osmosis-1`. Osmosis nuvarande testnät är `osmo-test-4`. -## Example Subgraphs +## Exempel på subgrafer -Here are some example subgraphs for reference: +Här är några exempel på subgrafer som referens: -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Exempel på blockfiltrering](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Validator Belöningar Exempel](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Validator Delegationer Exempel](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Osmosis Token Swaps Exampel](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/sv/cookbook/grafting.mdx b/website/pages/sv/cookbook/grafting.mdx index 54ad7a0eaff8..c68416f8cebf 100644 --- a/website/pages/sv/cookbook/grafting.mdx +++ b/website/pages/sv/cookbook/grafting.mdx @@ -1,40 +1,56 @@ --- -title: Replace a Contract and Keep its History With Grafting +title: Byt ut ett kontrakt och behåll dess historia med ympning --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +I den här guiden kommer du att lära dig hur du bygger och distribuerar nya subgrafer genom att ympa befintliga subgrafer. -## What is Grafting? +## Vad är ympning? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Ympning återanvänder data från en befintlig subgraf och börjar indexera den vid ett senare block. Detta är användbart under utveckling för att snabbt komma förbi enkla fel i mappningarna eller för att tillfälligt få en befintlig subgraf att fungera igen efter att den har misslyckats. Det kan också användas när du lägger till en funktion till en subgraf som tar lång tid att indexera från början. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Den ympade subgrafen kan använda ett GraphQL-schema som inte är identiskt med det i bas subgrafen, utan bara är kompatibelt med det. Det måste vara ett giltigt subgraf schema i sig, men kan avvika från bas undergrafens schema på följande sätt: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Den lägger till eller tar bort entitetstyper +- Det tar bort attribut från entitetstyper +- Det tar bort attribut från entitetstyper +- Det förvandlar icke-nullbara attribut till nullbara attribut +- Det lägger till värden till enums +- Den lägger till eller tar bort gränssnitt +- Det ändrar för vilka entitetstyper ett gränssnitt implementeras -For more information, you can check: +För mer information kan du kontrollera: -- [Grafting](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) +- [Ympning](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +I den här handledningen kommer vi att täcka ett grundläggande användningsfall. Vi kommer att ersätta ett befintligt kontrakt med ett identiskt kontrakt (med en ny adress, men samma kod). Ympa sedan den befintliga subgrafen på "bas"-subgrafen som spårar det nya kontraktet. -## Building an Existing Subgraph +## Viktig anmärkning om ympning vid uppgradering till nätverket -Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +> **Akta dig**: Om du uppgraderar din subgraf från Subgraf Studio eller det hostade tjänsten till det decentraliserade nätverket, rekommenderas det starkt att undvika att använda grafting under uppgraderingsprocessen. -- [Subgraph example repo](https://github.com/t-proctor/grafting-tutorial) +### Varför är detta viktigt? -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +Ympning är en kraftfull funktion som gör det möjligt att "transplantera" en subgraph till en annan, och överföra historisk data från den befintliga subgraphen till en ny version. Även om detta är ett effektivt sätt att bevara data och spara tid på indexering, kan grafting introducera komplexiteter och potentiella problem vid övergången från en hostad miljö till det decentraliserade nätverket. Det är inte möjligt att använda grafting för att föra tillbaka en subgraph från The Graf Nätverk till den hostade tjänsten eller Subgraf Studio. -## Subgraph Manifest Definition +### Bästa praxis -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +**Inledande Migration**: När du först distribuerar din subgraph till det decentraliserade nätverket, gör det utan grafting. Se till att subgraphen är stabil och fungerar som förväntat. + +**Senare Uppdateringar**: När din subgraph är aktiv och stabil på det decentraliserade nätverket kan du använda grafting för framtida versioner för att göra övergången smidigare och bevara historisk data. + +Genom att följa dessa riktlinjer minimerar du riskerna och säkerställer en smidigare migreringsprocess. + +## Bygga en befintlig subgraf + +Att bygga subgrafer är en viktig del av The Graph, som beskrivs mer ingående [här](http://localhost:3000/en/cookbook/quick-start/). För att kunna bygga och distribuera den befintliga subgrafen som används i denna handledning tillhandahålls följande repo: + +- [Subgraf exempel repo](https://github.com/t-proctor/grafting-tutorial) + +> Obs: Kontraktet som används i subgrafen togs från följande [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). + +## Definition av subgraf manifestet + +Subgrafmanifestet `subgraph.yaml` identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest som du kommer att använda: ```yaml specVersion: 0.0.4 @@ -63,13 +79,13 @@ dataSources: file: ./src/lock.ts ``` -- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract -- The network should correspond to a indexed network being queried. Since we're running on Goerli testnet, the network is `goerli` -- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. +- `Lock`-datakällan är abi- och kontraktsadressen vi får när vi kompilerar och distribuerar kontraktet +- Nätverket bör motsvara ett indexerat nätverk som efterfrågas. Eftersom vi kör på Goerli testnet är nätverket `goerli` +- Avsnittet `mappning` definierar utlösare av intresse och de funktioner som ska köras som svar på dessa utlösare. I det här fallet lyssnar vi efter händelsen `Withdrawal` och anropar funktionen `handleWithdrawal` när den sänds. -## Grafting Manifest Definition +## Ympnings manifest Definition -Grafting requires adding two new items to the original subgraph manifest: +Ympning kräver att två nya objekt läggs till i det ursprungliga subgraf manifestet: ```yaml --- @@ -80,16 +96,16 @@ graft: block: 1502122 # block number ``` -- `features:` is a list of all used [feature names](developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `funktioner:` är en lista över alla använda [funktionsnamn](developing/creating-a-subgraph/#experimental-features). +- `graft:` är en karta över subgrafen `base` och blocket att ympa på. `block` är blocknumret att börja indexera från. Grafen kopierar data från bas subgrafen till och med det givna blocket och fortsätter sedan att indexera den nya subgrafen från och med det blocket. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +Värdena `base` och `block` kan hittas genom att distribuera två subgrafer: en för basindexering och en med ympning -## Deploying the Base Subgraph +## Distribuera Bas Subgraf -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Gå till [Graf Studio UI](https://thegraph.com/studio/) och skapa en subgraf på Goerli testnet som heter `graft-example` +2. Följ anvisningarna i `AUTH & Sektionen DEPLOY` på din subgraf sida i mappen `graft-example` från repo +3. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Playground ```graphql { @@ -101,37 +117,37 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -It returns something like this: +Den returnerar ungefär så här: ``` { - "data": { - "withdrawals": [ - { - "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", - "amount": "0", - "when": "1664367528" - }, - { - "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", - "amount": "0", - "when": "1664367648" - } - ] - } + "data": { + "uttag": [ + { + "id": "0x13098b538a61837e9f29b32fb40527bbbe63c9120c250242b02b69bb42c287e5-5", + "amount": "0", + "när": "1664367528" + }, + { + "id": "0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498-3", + "amount": "0", + "när": "1664367648" + } + ] + } } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +När du har verifierat att subgrafen indexerar korrekt kan du snabbt uppdatera subgrafen med ympning. -## Deploying the Grafting Subgraph +## Utplacering av ympning subgraf -The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. +Transplantatersättningen subgraph.yaml kommer att ha en ny kontraktsadress. Detta kan hända när du uppdaterar din dapp, omdisponerar ett kontrakt, etc. -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in The Graph Studio UI. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Gå till [Graf Studio UI](https://thegraph.com/studio/) och skapa en subgraf på Goerli testnet som heter `graft-replacement` +2. Skapa ett nytt manifest. `subgraph.yaml` för `graph-replacement` innehåller en annan kontraktsadress och ny information om hur den ska ympa. Dessa är `block` av den [senaste händelsen](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) du bryr dig om det gamla kontraktet och `base` i den gamla subgrafen. `bas` subgraf-ID är `Implementerings-ID` för ditt ursprungliga `graph-example` subgraf. Du hittar detta i The Graph Studio UI. +3. Följ anvisningarna i `AUTH & DEPLOY`-avsnittet på din subgraf sida i mappen `graft-replacement` från repo +4. När du är klar kontrollerar du att subgrafen indexerar korrekt. Om du kör följande kommando i The Graph Lekplats ```graphql { @@ -143,7 +159,7 @@ The graft replacement subgraph.yaml will have a new contract address. This could } ``` -It should return the following: +Det bör returnera följande: ``` { @@ -169,18 +185,18 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) and [Event 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). The new contract emitted one `Withdrawal` after, [Event 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +Du kan se att subgrafen `graft-replacement` indexerar från äldre `graph-example` data och nyare data från den nya kontraktsadressen. Det ursprungliga kontraktet utsände två `Uttag`-händelser, [händelse 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) och [ händelse 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). Det nya kontraktet gav ett `Withdrawal` efter, [ händelse 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). De två tidigare indexerade transaktionerna (händelse 1 och 2) och den nya transaktionen (händelse 3) kombinerades tillsammans i subgrafen `graft-replacement`. -Congrats! You have succesfully grafted a subgraph onto another subgraph. +Grattis! Du har framgångsrikt ympat en subgraf på en annan subgraf. -## Additional Resources +## Ytterligare resurser -If you want more experience with grafting, here's a few examples for popular contracts: +Om du vill ha mer erfarenhet av ympning, här är några exempel på populära kontrakt: - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3/protocols/uniswap-v3/config/templates/uniswap.v3.template.yaml), -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](developing/creating-a-subgraph/#data-source-templates) can achieve similar results +För att bli ännu mer av en Graph-expert, överväg att lära dig om andra sätt att hantera förändringar i underliggande datakällor. Alternativ som [Datakällmallar](developing/creating-a-subgraph/#data-source-templates) kan ge liknande resultat -> Note: A lot of material from this article was taken from the previously published [Arweave article](/cookbook/arweave/) +> Obs! Mycket material från den här artikeln togs från den tidigare publicerade [Arweave-artikeln](/cookbook/arweave/) diff --git a/website/pages/sv/cookbook/near.mdx b/website/pages/sv/cookbook/near.mdx index 879e8e5c15aa..8be252607366 100644 --- a/website/pages/sv/cookbook/near.mdx +++ b/website/pages/sv/cookbook/near.mdx @@ -1,56 +1,56 @@ --- -title: Building Subgraphs on NEAR +title: Bygger subgrafer på NEAR --- -> NEAR support in Graph Node and on the Hosted Service is in beta: please contact near@thegraph.com with any questions about building NEAR subgraphs! +> NEAR support i Graph Node och på Hosted Service är i beta: kontakta near@thegraph.com om du har frågor om att bygga NEAR subgrafer! -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +Den här guiden är en introduktion till att bygga subgrafer som indexerar smarta kontrakt på [NEAR blockchain](https://docs.near.org/). -## What is NEAR? +## Vad är NEAR? -[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/docs/concepts/new-to-near) for more information. +[NEAR](https://near.org/) är en smart kontraktsplattform för att bygga decentraliserade applikationer. Besök den [officiella dokumentationen](https://docs.near.org/docs/concepts/new-to-near) för mer information. -## What are NEAR subgraphs? +## Vad är NEAR subgrafer? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +The Graph ger utvecklare verktyg för att bearbeta blockchain-händelser och göra den resulterande informationen lätt tillgänglig via ett GraphQL API, individuellt känt som en subgraf. [Graph Node](https://github.com/graphprotocol/graph-node) kan nu bearbeta NEAR-händelser, vilket innebär att NEAR-utvecklare nu kan bygga subgrafer för att indexera sina smarta kontrakt. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgrafer är händelsebaserade, vilket innebär att de lyssnar efter och sedan bearbetar händelser i kedjan. Det finns för närvarande två typer av hanterare som stöds för NEAR subgrafer: -- Block handlers: these are run on every new block -- Receipt handlers: run every time a message is executed at a specified account +- Blockhanterare: dessa körs på varje nytt block +- Kvittohanterare: körs varje gång ett meddelande körs på ett angivet konto -[From the NEAR documentation](https://docs.near.org/docs/concepts/transaction#receipt): +[Från NEAR dokumentationen](https://docs.near.org/docs/concepts/transaction#receipt): -> A Receipt is the only actionable object in the system. When we talk about "processing a transaction" on the NEAR platform, this eventually means "applying receipts" at some point. +> Ett kvitto är det enda handlingsbara objektet i systemet. När vi pratar om att "bearbeta en transaktion" på NEAR plattformen betyder det så småningom att "tillämpa kvitton" någon gång. -## Building a NEAR Subgraph +## Att bygga en NEAR Subgraf -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli` är ett kommandoradsverktyg för att bygga och distribuera subgrafer. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts` är ett bibliotek med subgrafspecifika typer. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR subgrafutveckling kräver `graph-cli` ovan version `0.23.0` och `graph-ts` ovan version `0.23.0`. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Att bygga en NEAR subgraf är mycket lik att bygga en subgraf som indexerar Ethereum. -There are three aspects of subgraph definition: +Det finns tre aspekter av subgraf definition: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** undergraf manifestet, som definierar datakällorna av intresse och hur de ska behandlas. NEAR är en ny `typ` av datakälla. -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph#the-graphql-schema). +**schema.graphql:** en schema fil som definierar vilken data som lagras för din subgraf, och hur man frågar den via GraphQL. Kraven för NEAR undergrafer täcks av [den befintliga dokumentationen](/developing/creating-a-subgraph#the-graphql-schema). -**AssemblyScript Mappings:** [AssemblyScript code](/developing/assemblyscript-api) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. +**AssemblyScript Mappings:**[AssemblyScript kod](/developing/assemblyscript-api) som översätter från händelsedata till de enheter som definieras i ditt schema. NEAR stöd introducerar NEAR specifika datatyper och ny JSON parsnings funktion. -During subgraph development there are two key commands: +Under subgrafutveckling finns det två nyckelkommandon: ```bash -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph codegen # genererar typer från schema filen som identifieras i manifestet +$ graph build # genererar Web Assembly från AssemblyScript filerna och förbereder alla subgraffiler i en /build-mapp ``` -### Subgraph Manifest Definition +### Definition av subgraf manifestet -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +Subgrafmanifestet (`subgraph.yaml`) identifierar datakällorna för subgrafen, utlösare av intresse och funktionerna som ska köras som svar på dessa utlösare. Se nedan för ett exempel på subgraf manifest för en NEAR subgraf: ```yaml specVersion: 0.0.2 @@ -72,10 +72,10 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) -- The `network` should correspond to a network on the hosting Graph Node. On the Hosted Service, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` -- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/docs/concepts/account). This can be an account or a sub-account. -- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. +- NEAR undergrafer introducerar en ny `typ` av datakälla (`near`) +- `nätverket` bör motsvara ett nätverk på den värd Graph Node. På värdtjänsten är NEARs huvudnät `near-mainnet` och NEARs testnät är `near-testnet` +- NEAR datakällor introducerar ett valfritt `source.account`-fält, som är ett läsbart ID som motsvarar ett [ NEAR-konto](https://docs.near.org/docs/concepts/account). Detta kan vara ett konto eller ett underkonto. +- NEAR datakällor introducerar ett alternativt valfritt `source.accounts`-fält, som innehåller valfria suffix och prefix. Minst prefix eller suffix måste anges, de kommer att matcha alla konton som börjar eller slutar med värdelistan. Exemplet nedan skulle matcha: `[app|bra].*[morning.near|morning.testnet]`. Om endast en lista med prefix eller suffix är nödvändig kan det andra fältet utelämnas. ```yaml accounts: @@ -87,20 +87,20 @@ accounts: - morning.testnet ``` -NEAR data sources support two types of handlers: +NEAR datakällor stöder två typer av hanterare: -- `blockHandlers`: run on every new NEAR block. No `source.account` is required. -- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/docs/concepts/account#subaccounts) must be added as independent data sources). +- `blockHandlers`: kör på varje nytt NEAR-block. Inget `source.account` krävs. +- `receiptHandlers`: körs på varje kvitto där datakällans `source.account` är mottagaren. Observera att endast exakta matchningar behandlas ([underkonton](https://docs.near.org/docs/concepts/account#subaccounts) måste läggas till som oberoende datakällor). ### Schema Definition -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). +Schemadefinition beskriver strukturen för den resulterande subgraf databasen och relationerna mellan enheter. Detta är agnostiskt för den ursprungliga datakällan. Det finns mer information om definition av subgraf schema [här](/developing/creating-a-subgraph#the-graphql-schema). -### AssemblyScript Mappings +### AssemblyScript mappningar -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Hanterarna för bearbetning av händelser är skrivna i [AssemblyScript](https://www.assemblyscript.org/). -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/assemblyscript-api). +NEAR indexering introducerar NEAR specifika datatyper till [AssemblyScript API](/developing/assemblyscript-api). ```typescript @@ -162,50 +162,50 @@ class ReceiptWithOutcome { } ``` -These types are passed to block & receipt handlers: +Dessa typer skickas till block & kvittohanterare: -- Block handlers will receive a `Block` -- Receipt handlers will receive a `ReceiptWithOutcome` +- Blockhanterare kommer att få ett `Block` +- Kvittohanterare kommer att få ett `ReceiptWithOutcome` -Otherwise, the rest of the [AssemblyScript API](/developing/assemblyscript-api) is available to NEAR subgraph developers during mapping execution. +Annars är resten av [AssemblyScript API](/developing/assemblyscript-api) tillgänglig för NEAR subgraf utvecklare under körning av mappning. -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/developing/assemblyscript-api#json-api) to allow developers to easily process these logs. +Detta inkluderar en ny JSON parsnings funktion - loggar på NEAR sänds ofta ut som strängade JSON. En ny funktion `json.fromString(...)` är tillgänglig som en del av [JSON API](/developing/assemblyscript-api#json-api) för att tillåta utvecklare för att enkelt bearbeta dessa loggar. -## Deploying a NEAR Subgraph +## Utplacera en NEAR Subgraf -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +När du har en byggd subgraf är det dags att distribuera den till Graph Node för indexering. NEAR undergrafer kan distribueras till alla Graph Node `>=v0.26.x` (den här versionen har ännu inte taggats & släppts). -The Graph's Hosted Service currently supports indexing NEAR mainnet and testnet in beta, with the following network names: +The Graph's Hosted Service stöder för närvarande indexering av NEAR mainnet och testnet i beta, med följande nätverks namn: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on the Hosted Service can be found [here](/deploying/deploying-a-subgraph-to-hosted). +Mer information om att skapa och distribuera subgrafer på värdtjänsten finns [här](/deploying/deploying-a-subgraph-to-hosted). -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On the Hosted Service, this can be done from [your Dashboard](https://thegraph.com/hosted-service/dashboard): "Add Subgraph". +Som en snabb primer - det första steget är att "skapa" din subgraf - detta behöver bara göras en gång. På värdtjänsten kan detta göras från [din instrumentpanel](https://thegraph.com/hosted-service/dashboard): "Lägg till subgraf". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +När din subgraf har skapats kan du distribuera din subgraf genom att använda `graph deploy` CLI kommandot: ```sh $ graph create --node subgraph/name # creates a subgraph on a local Graph Node (on the Hosted Service, this is done via the UI) $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash ``` -The node configuration will depend on where the subgraph is being deployed. +Nodkonfigurationen beror på var subgrafen distribueras. -### Hosted Service +### Värdtjänster ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -### Local Graph Node (based on default configuration) +### Lokal graf nod (baserat på standardkonfiguration) ```sh graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +När din subgraf har distribuerats kommer den att indexeras av Graph Node. Du kan kontrollera dess framsteg genom att fråga själva subgrafen: ```graphql { @@ -217,45 +217,45 @@ Once your subgraph has been deployed, it will be indexed by Graph Node. You can } ``` -### Indexing NEAR with a Local Graph Node +### Indexering av NEAR med en lokal grafnod -Running a Graph Node that indexes NEAR has the following operational requirements: +Att köra en Graph Node som indexerar NEAR har följande operativa krav: -- NEAR Indexer Framework with Firehose instrumentation -- NEAR Firehose Component(s) -- Graph Node with Firehose endpoint configured +- NEAR Indexer Framework med Firehose-instrumentering +- NEAR Brandslangskomponent(er) +- Graf Nod med Firehose ändpunkt konfigurerad -We will provide more information on running the above components soon. +Vi kommer snart att ge mer information om hur du kör ovanstående komponenter. -## Querying a NEAR Subgraph +## Fråga efter en NEAR subgraf -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. +GraphQL slutpunkten för NEAR undergrafer bestäms av schemadefinitionen, med det befintliga API gränssnittet. Besök [GraphQL API-dokumentationen](/querying/graphql-api) för mer information. -## Example Subgraphs +## Exempel på subgrafer -Here are some example subgraphs for reference: +Här är några exempel på subgrafer som referens: -[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Block](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR kvitton](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) ## FAQ -### How does the beta work? +### Hur fungerar betan? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR stödet är i beta, vilket innebär att det kan bli ändringar i API:t när vi fortsätter att arbeta med att förbättra integrationen. Skicka ett e-postmeddelande till near@thegraph.com så att vi kan hjälpa dig att bygga NEAR subgrafer och hålla dig uppdaterad om den senaste utvecklingen! -### Can a subgraph index both NEAR and EVM chains? +### Kan en subgraf indexera både NEAR och EVM kedjor? -No, a subgraph can only support data sources from one chain/network. +Nej, en subgraf kan bara stödja datakällor från en kedja/nätverk. -### Can subgraphs react to more specific triggers? +### Kan subgrafer reagera på mer specifika triggers? -Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. +För närvarande stöds endast blockerings- och kvittoutlösare. Vi undersöker utlösare för funktionsanrop till ett specificerat konto. Vi är också intresserade av att stödja eventutlösare, när NEAR har inbyggt eventsupport. -### Will receipt handlers trigger for accounts and their sub-accounts? +### Kommer kvittohanterare att utlösa för konton och deras underkonton? -If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: +Om ett `account` anges kommer det bara att matcha det exakta kontonamnet. Det är möjligt att matcha underkonton genom att ange ett `accounts`-fält, med `suffixes` och `prefixes` angivna för att matcha konton och underkonton, till exempel följande skulle matcha alla `mintbase1.near` underkonton: ```yaml accounts: @@ -263,22 +263,22 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### Kan NEAR subgrafer göra visningsanrop till NEAR konton under mappningar? -This is not supported. We are evaluating whether this functionality is required for indexing. +Detta stöds inte. Vi utvärderar om denna funktionalitet krävs för indexering. -### Can I use data source templates in my NEAR subgraph? +### Kan jag använda data källmallar i min NEAR subgraf? -This is not currently supported. We are evaluating whether this functionality is required for indexing. +Detta stöds inte för närvarande. Vi utvärderar om denna funktionalitet krävs för indexering. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum subgrafer stöder "väntande" och "nuvarande" versioner, hur kan jag distribuera en "väntande" version av en NEAR subgraf? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Väntande funktionalitet stöds ännu inte för NEAR subgrafer. Under tiden kan du distribuera en ny version till en annan "namngiven" subgraf, och när den sedan synkroniseras med kedjehuvudet kan du distribuera om till din primära "namngivna" subgraf, som kommer att använda samma underliggande implementerings-ID, så huvudsubgrafen synkroniseras omedelbart. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### Min fråga har inte besvarats, var kan jag få mer hjälp med att bygga NEAR subgrafer? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +Om det är en generell fråga om subgraffutveckling finns det mycket mer information i resten av [Utvecklardokumentationen](/quick-start). Annars, var vänlig och anslut dig till [The Graph Protocol Discord](https://discord.gg/graphprotocol) och ställ din fråga i kanalen #near eller skicka ett e-postmeddelande till near@thegraph.com. -## References +## Referenser -- [NEAR developer documentation](https://docs.near.org/docs/develop/basics/getting-started) +- [NEAR utvecklar dokumentation](https://docs.near.org/docs/develop/basics/getting-started) diff --git a/website/pages/sv/cookbook/subgraph-debug-forking.mdx b/website/pages/sv/cookbook/subgraph-debug-forking.mdx index 7ac3bf96ca10..0fdaaf7411c0 100644 --- a/website/pages/sv/cookbook/subgraph-debug-forking.mdx +++ b/website/pages/sv/cookbook/subgraph-debug-forking.mdx @@ -1,26 +1,26 @@ --- -title: Quick and Easy Subgraph Debugging Using Forks +title: Snabb och enkel subgraf felsökning med gafflar --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +Som med många system som behandlar stora mängder data, kan det ta ganska lång tid för The Graphs indexerare (Graph-noder) att synkronisera din subgraf med målblockkedjan. Diskrepansen mellan snabba ändringar med syftet att felsöka och långa väntetider som behövs för indexering är extremt kontraproduktiv och vi är väl medvetna om det. Det är därför vi introducerar **subgraf forking**, utvecklad av [LimeChain](https://limechain.tech/), och i den här artikeln Jag kommer att visa dig hur den här funktionen kan användas för att avsevärt påskynda subgraffelsökning! -## Ok, what is it? +## Ok, vad är det? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraf forking** är processen att lätt hämta entiteter från _en annan_ subgrafs butik (vanligtvis en avlägsen sådan). -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +I samband med felsökning låter **subgraf forking** dig felsöka din misslyckade subgraf i block _X_ utan att behöva vänta för att synkronisera för att blockera _X_. -## What?! How? +## Vad?! Hur? -When you deploy a subgraph to a remote Graph node for indexing and it fails at block _X_, the good news is that the Graph node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +När du distribuerar en subgraf till en avlägsen Graph-nod för indexering och den misslyckas vid block _X_, är den goda nyheten att Graph-noden fortfarande kommer att betjäna GraphQL-frågor med hjälp av sitt lager, som synkroniseras för att blockera _X_. Toppen! Det betyder att vi kan dra nytta av denna "up-to-date" butik för att fixa de buggar som uppstår vid indexering av block _X_. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +I ett nötskal, vi ska _gaffla den misslyckade subgrafen_ från en avlägsen Graph-nod som garanterat har subgrafen indexerad upp till blocket _X_ för att ge den lokalt distribuerade subgrafen som felsöks i blocket _X_ en uppdaterad vy av indexeringstillståndet. -## Please, show me some code! +## Snälla, visa mig lite kod! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +För att behålla fokus på subgraffelsökning, låt oss hålla saker och ting enkla och köra tillsammans med [exempel-undergraf](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexera Ethereum Gravity smarta kontrakt. -Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: +Här är hanterarna definierade för att indexera `Gravatar`s, utan några som helst buggar: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,43 +44,43 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) it fails with the _"Gravatar not found!"_ error. +Hoppsan, vad olyckligt, när jag distribuerar min perfekt snygga subgraf till [värdtjänsten](https://thegraph.com/hosted-service/) misslyckas den med _ "Gravatar inte hittat!"_ fel. -The usual way to attempt a fix is: +Det vanliga sättet att försöka fixa är: -1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) (or another remote Graph node). -3. Wait for it to sync-up. -4. If it breaks again go back to 1, otherwise: Hooray! +1. Gör en förändring i mappningskällan, som du tror kommer att lösa problemet (även om jag vet att det inte kommer att göra det). +2. Distribuera om subgrafen till [värdtjänsten](https://thegraph.com/hosted-service/) (eller en annan fjärrnod för Graph). +3. Vänta tills det synkroniseras. +4. Om den går sönder igen gå tillbaka till 1, annars: Hurra! -It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ +Det är faktiskt ganska bekant med en vanlig felsökningsprocess, men det finns ett steg som saktar ner processen fruktansvärt: _3. Vänta tills det synkroniseras._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Genom att använda **subgraf forking** kan vi i princip eliminera detta steg. Så här ser det ut: -0. Spin-up a local Graph node with the **_appropriate fork-base_** set. -1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph node, **_forking the failing subgraph_** and **_starting from the problematic block_**. -3. If it breaks again, go back to 1, otherwise: Hooray! +0. Snurra upp en lokal Graf nod med **_lämplig gaffelbas_**. +1. Gör en ändring i mappningskällan som du tror kommer att lösa problemet. +2. Distribuera till den lokala Graf-noden, **_fördelning av den misslyckade subgrafen_** och ** _med början från det problematiska blocket_**. +3. Om den går sönder igen, gå tillbaka till 1, annars: Hurra! -Now, you may have 2 questions: +Nu kanske du har 2 frågor: -1. fork-base what??? -2. Forking who?! +1. gaffelbas vad??? +2. Forking vem?! -And I answer: +Och jag svarar: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. -2. Forking is easy, no need to sweat: +1. `fork-base` är "bas"-URL, så att när _subgraf id_ läggs till den resulterande URL-adressen (`/`) är en giltig GraphQL slutpunkt för subgrafens arkiv. +2. Gaffling är lätt, du behöver inte svettas: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Glöm inte heller att ställa in `dataSources.source.startBlock`-fältet i undergraf manifestet till numret på det problematiska blocket, så att du kan hoppa över indexering av onödiga block och dra fördel av gaffeln! -So, here is what I do: +Så här är vad jag gör: -0. I spin-up a local graph node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from the [HostedService](https://thegraph.com/hosted-service/). +0. Jag skapar en lokal grafnod ([så här gör du](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) med alternativet `fork-base` inställt på: `https://api.thegraph.com/subgraphs/id/`, eftersom jag kommer att dela en subgraf, den buggy jag distribuerade tidigare, från [HostedService](https://thegraph.com/hosted-service/). ``` $ cargo run -p graph-node --release -- \ @@ -90,13 +90,13 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -1. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -2. After I made the changes I deploy my subgraph to the local Graph node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +1. Efter noggrann inspektion märker jag att det finns en oöverensstämmelse i `id`-representationerna som används vid indexering av `Gravatar`s i mina två hanterare. Medan `handleNewGravatar` konverterar den till en hex (`event.params.id.toHex()`), använder `handleUpdatedGravatar` en int32 (`händelse. params.id.toI32()`) vilket gör att `handleUpdatedGravatar` får panik med "Gravatar not found!". Jag får dem båda att konvertera `id` till en hexadecimal. +2. Efter att jag gjort ändringarna distribuerar jag min subgraf till den lokala Graf noden, **_fördelar den misslyckade subgrafen_** och ställer in `dataSources.source.startBlock` till `6190343` i `subgraph.yaml`: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -3. I inspect the logs produced by the local Graph node and, Hooray!, everything seems to be working. -4. I deploy my now bug-free subgraph to a remote Graph node and live happily ever after! (no potatoes tho) -5. The end... +3. Jag inspekterar loggarna som produceras av den lokala Graf-noden och, hurra!, allt verkar fungera. +4. Jag distribuerar min nu felfria subgraf till en avlägsen Graf-nod och lever lycklig i alla sina dagar! (ingen potatis dock) +5. Slutet... diff --git a/website/pages/sv/cookbook/subgraph-uncrashable.mdx b/website/pages/sv/cookbook/subgraph-uncrashable.mdx index 989310a3f9a0..e6ef7ed8cc76 100644 --- a/website/pages/sv/cookbook/subgraph-uncrashable.mdx +++ b/website/pages/sv/cookbook/subgraph-uncrashable.mdx @@ -1,29 +1,29 @@ --- -title: Safe Subgraph Code Generator +title: Säker subgraf kodgenerator --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) är ett kodgenereringsverktyg som genererar en uppsättning hjälpfunktioner från ett projekts graphql schema. Det säkerställer att alla interaktioner med enheter i din subgraf är helt säkra och konsekventa. -## Why integrate with Subgraph Uncrashable? +## Varför integrera med Subgraf Uncrashable? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Kontinuerlig drifttid**. Misshandlade enheter kan få subgrafer att krascha, vilket kan vara störande för projekt som är beroende av The Graph. Ställ in hjälpfunktioner för att göra dina subgrafer "ofullständig" och säkerställa kontinuitet i verksamheten. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Helt säkert**. Vanliga problem som ses vid subgrafutveckling är problem med att ladda odefinierade enheter, att inte ställa in eller initiera alla värden på entiteter och tävlingsförhållanden för att ladda och spara enheter. Se till att alla interaktioner med enheter är helt atomära. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **Användarkonfigurerbar** Ställ in standardvärden och konfigurera nivån för säkerhetskontroller som passar ditt individuella projekts behov. Varningsloggar registreras och indikerar om det finns ett brott mot subgraflogik för att hjälpa till att korrigera problemet för att säkerställa datanoggrannhet. -**Key Features** +**Viktiga egenskaper** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- Kodgenereringsverktyget rymmer **alla** subgraf typer och är konfigurerbart för användare att ställa in sunda standardvärden för värden. Kodgenereringen kommer att använda denna konfiguration för att generera hjälpfunktioner som är enligt användarens specifikation. -- The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. +- Ramverket innehåller också ett sätt (via konfigurationsfilen) att skapa anpassade, men säkra, sätterfunktioner för grupper av entitetsvariabler. På så sätt är det omöjligt för användaren att ladda/använda en inaktuell grafenhet och det är också omöjligt att glömma att spara eller ställa in en variabel som krävs av funktionen. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. These logs can be viewed in the The Graph's hosted service under the 'Logs' section. +- Varningsloggar registreras som loggar som indikerar var det finns ett brott mot subgraf logik för att hjälpa till att korrigera problemet för att säkerställa datanoggrannhet. Dessa loggar kan ses i The Graphs värdtjänst under avsnittet "Loggar". -Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. +Subgraph Uncrashable kan köras som en valfri flagga med kommandot Graph CLI codegen. ```sh graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Besök [dokumentationen som inte går att krascha under subgraph](https://float-capital.github.io/float-subgraph-uncrashable/docs/) eller titta på denna [självstudievideo](https://float- capital.github.io/float-subgraph-uncrashable/docs/tutorial) för att lära dig mer och komma igång med att utveckla säkrare subgrafer. diff --git a/website/pages/sv/cookbook/substreams-powered-subgraphs.mdx b/website/pages/sv/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..086b76abadf1 100644 --- a/website/pages/sv/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/sv/cookbook/substreams-powered-subgraphs.mdx @@ -1,30 +1,30 @@ --- -title: Substreams-powered subgraphs +title: Substreams-drivna subgrafer --- -[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. +[Substreams](/substreams) är en ny ramverk för att behandla blockkedjedata som utvecklats av StreamingFast för The Graph Network. Ett Substreams-modul kan generera ändringar i entiteter som är kompatibla med Subgraph-entiteter. En subgraf kan använda en sådan Substreams-modul som en datakälla och därmed ta del av Substreams indexeringshastighet och ytterligare data för subgrafutvecklare. -## Requirements +## Krav -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +Den här kokboken kräver [yarn](https://yarnpkg.com/), [de beroenden som krävs för utveckling av lokala underströmmar](https://substreams.streamingfast.io/developers-guide/installation-requirements) och den senaste versionen av graf CLI (>=0.52.0): ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## Skaffa kokboken -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> Den här kokboken använder denna [Substreams-powered subgraph som referens](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). ``` graph init --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## Definiera ett Substreams-paket -A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. +En Substreams-paket består av typer (definierade som [Protocol Buffers](https://protobuf.dev/)), moduler (skrivna i Rust) och en `substreams.yaml`-fil som refererar till typerna och specificerar hur modulerna utlöses. [Besök Substreams-dokumentationen för att lära dig mer om Substreams-utveckling](/substreams), och kolla in [awesome-substreams](https://github.com/pinax-network/awesome-substreams) och [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) för fler exempel. -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): +Substreams-paketet i fråga upptäcker kontraktsdistributioner på Mainnet Ethereum, spårar skapandeblocket och tidsstämpeln för alla nyligen distribuerade kontrakt. För att göra detta finns det en dedikerad "kontrakt"-typ i "/proto/example.proto" ([läs mer om att definiera protokollbuffertar](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: +Kärnlogiken i Substreams-paketet är en `map_contract`-modul i `lib.rs`, som bearbetar varje block, filtrerar efter Skapa-anrop som inte återgick, returnerar `Contracts`: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. +> "substreams_entity_change"-lådan har också en dedikerad "Tables"-funktion för att helt enkelt generera entitetsändringar ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). Entitetsändringarna som genereras måste vara kompatibla med "schema.graphql"-entiteterna som definieras i "subgraph.graphql" i motsvarande subgraf. ``` #[substreams::handlers::map] @@ -88,7 +88,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: +För att förbereda detta Substreams paket för konsumtion av en subgraf måste du köra följande kommandon: ```bash -yarn substreams:protogen # generates types in /src/pb -yarn substreams:build # builds the substreams -yarn substreams:package # packages the substreams in a .spkg file +yarn substreams:protogen # genererar typer i /src/pb +yarn substreams:build # bygger substreams +yarn substreams:package # paketerar substreams i en .spkg-fil -# alternatively, yarn substreams:prepare calls all of the above commands +# alternativt anropar yarn substreams:prepare alla ovanstående kommandon ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> Dessa skript definieras i filen `package.json` om du vill förstå de underliggande substreams-kommandona -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +Detta genererar en `spkg`-fil baserat på paketnamnet och versionen från `substreams.yaml`. `spkg`-filen har all information som Graph Node behöver för att mata in detta Substreams-paket. -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. +> Om du uppdaterar Substreams-paketet, beroende på de ändringar du gör, kan du behöva köra några eller alla ovanstående kommandon så att `spkg` är uppdaterad. -## Defining a Substreams-powered subgraph +## Definiera en Substream-driven subgraf -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. +Substreams-drivna subgrafer introducerar en ny "typ" av datakälla, "substreams". Sådana subgrafer kan bara ha en datakälla. -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +Denna datakälla måste ange det indexerade nätverket, Substreams-paketet ("spkg") som en relativ filplats och modulen i det Substreams-paketet som producerar subgrafkompatibla entitetsändringar (i detta fall "map_entity_changes", från Substreams-paketet ovan). Mappningen är specificerad, men identifierar helt enkelt mappningstypen ("substreams/graph-entities") och apiVersion. -> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). +> För närvarande stöder Subgraph Studio och The Graph Network Substreams-drivna subgrafer som indexerar "mainnet" (Mainnet Ethereum). ```yaml specVersion: 0.0.4 @@ -180,35 +180,35 @@ dataSources: apiVersion: 0.0.5 ``` -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. +`subgraph.yaml` refererar också till en schemafil. Kraven för denna fil är oförändrade, men de angivna entiteterna måste vara kompatibla med entitetsändringarna som produceras av Substreams-modulen som refereras till i `subgraph.yaml`. ```graphql type Contract @entity { id: ID! - "The timestamp when the contract was deployed" + "Tidsstämpeln när kontraktet implementerades" timestamp: String! - "The block number of the contract deployment" + "Blocknummer för kontraktsimplementeringen" blockNumber: BigInt! } ``` -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. +Med tanke på ovanstående kan subgraf utvecklare använda Graph CLI för att distribuera denna Substreams-drivna subgraf. -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). +> Underströmsdrivna subgrafer som indexerar mainnet Ethereum kan distribueras till [Subgraph Studio](https://thegraph.com/studio/). ```bash yarn install # install graph-cli -yarn subgraph:build # build the subgraph +yarn subgraph:build # bygg subgrafen yarn subgraph:deploy # deploy the subgraph ``` -That's it! You have built and deployed a Substreams-powered subgraph. +Det är allt! Du har byggt och distribuerat en Substreams-drivna subgraf. -## Serving Substreams-powered subgraphs +## Visa substreams-drivna subgrafer -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: +För att kunna betjäna Substreams-drivna subgrafer måste Graph Node konfigureras med en Substreams-leverantör för det relevanta nätverket, samt en Firehose eller RPC för att spåra kedjehuvudet. Dessa leverantörer kan konfigureras via en `config.toml`-fil: ```toml [chains.mainnet] diff --git a/website/pages/sv/cookbook/upgrading-a-subgraph.mdx b/website/pages/sv/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..b70d353ae1c9 100644 --- a/website/pages/sv/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/sv/cookbook/upgrading-a-subgraph.mdx @@ -1,25 +1,25 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: Uppgradera en befintlig subgraf till The Graph Nätverk --- -## Introduction +## Introduktion -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +Det här är en guide för hur du uppgraderar din subgraf från värdtjänsten till The Graphs decentraliserade nätverk. Över 1 000 subgrafer har framgångsrikt uppgraderat till The Graph Nätverk inklusive projekt som Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido och många fler! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +Processen att uppgradera är snabb och dina subgrafer kommer för alltid att dra nytta av den tillförlitlighet och prestanda som du bara kan få på The Graph Nätverk. -### Prerequisites +### Förutsättningar -- You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. -- You have a wallet with ETH to publish your subgraph on-chain. -- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. +- Du har redan distribuerat en subgraf på värdtjänsten. +- The subgraph is indexing a chain available on The Graph Network. +- Du har en plånbok hos ETH för att publicera din subgraf i kedjan. +- Du har ~10 000 GRT att kurera din subgraf så att indexerare kan börja indexera den. -## Upgrading an Existing Subgraph to The Graph Network +## Uppgradera en befintlig subgraf till The Graph Nätverk -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> Du kan hitta specifika kommandon för din subgraf i [Subgraf Studio](https://thegraph.com/studio/). -1. Get the latest version of the graph-cli installed: +1. Få den senaste versionen av graph-cli installerad: ```sh npm install -g @graphprotocol/graph-cli @@ -29,29 +29,29 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +Se till att din `apiVersion` i subgraph.yaml är `0.0.5` eller högre. -2. Inside the subgraph's main project repository, authenticate the subgraph to deploy and build on the studio: +2. Inuti subgrafen: s huvud projektförråd, autentisera subgrafen för att distribuera och bygga på studion: ```sh graph auth --studio ``` -3. Generate files and build the subgraph: +3. Generera filer och bygg subgrafen: ```sh graph codegen && graph build ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +Om din subgraf har byggfel, se [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. Logga in på [Subgraf Studio](https://thegraph.com/studio/) med din plånbok och distribuera subgrafen. Du kan hitta din `` i Studio UI, som baseras på namnet på din subgraf. ```sh graph deploy --studio ``` -5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): +5. Testa frågor på Studions lekplats. Här är några exempel för [Sushi - Mainnet Exchange Subgraf](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): ```sh { @@ -68,69 +68,69 @@ graph deploy --studio } ``` -6. At this point, your subgraph is now deployed on Subgraph Studio, but not yet published to the decentralized network. You can now test the subgraph to make sure it is working as intended using the temporary query URL as seen on top of the right column above. As this name already suggests, this is a temporary URL and should not be used in production. +6. Vid det här laget är din subgraf nu distribuerad på Subgraf Studio, men ännu inte publicerad i det decentraliserade nätverket. Du kan nu testa subgrafen för att se till att den fungerar som avsett med hjälp av den tillfälliga sökadressen som visas överst i den högra kolumnen ovan. Som detta namn redan antyder är detta en tillfällig URL och bör inte användas i produktionen. -- Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- Uppdatering är bara att publicera en annan version av din befintliga subgraf i kedjan. +- Eftersom detta medför en kostnad rekommenderas det starkt att du distribuerar och testar din subgraf i Subgraph Studio, med hjälp av "Utvecklingsfrågans URL" innan du publicerar. Se ett exempel på transaktion[here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Priserna är ungefär runt 0,0425 ETH vid 100 gwei. +- Varje gång du behöver uppdatera din subgraf kommer du att debiteras en uppdateringsavgift. Eftersom detta medför en kostnad, rekommenderas det starkt att distribuera och testa din subgraf på Goerli innan du distribuerar till mainnet. Det kan i vissa fall också kräva viss GRT om det inte finns någon signal på den subgrafen. Om det finns signal/kuration på den subgrafversionen (med automigrera), kommer skatterna att delas upp. -7. Publish the subgraph on The Graph's decentralized network by hitting the "Publish" button. +7. Publicera subgrafen på The Graph: s decentraliserade nätverk genom att trycka på knappen "Publicera". -You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. +Du bör kurera din subgraf med GRT för att säkerställa att den indexeras av indexerare. För att spara på gaskostnaderna kan du kurera din subgraf i samma transaktion som du publicerar den i nätverket. Det rekommenderas att kurera din subgraf med minst 10 000 GRT för hög servicekvalitet. -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +Och det är allt! När du är klar med publiceringen kommer du att kunna se dina subgrafer live på det decentraliserade nätverket via [The Graph Explorer](https://thegraph.com/explorer). -Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. +Tveka inte att utnyttja kanalen [#Curators](https://discord.gg/s5HfGMXmbW) på Discord för att informera Curators om att din subgraf är redo att signaleras. Det skulle också vara till hjälp om du delar din förväntade frågevolym med dem. På så sätt kan de uppskatta hur mycket GRT de bör signalera på din subgraf. -### Create an API key +### Skapa en API nyckel -You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). +Du kan generera en API-nyckel i Subgraf Studio [here](https://thegraph.com/studio/apikeys/). -![API key creation page](/img/api-image.png) +![Sida för att skapa API-nyckel](/img/api-image.png) -At the end of each week, an invoice will be generated based on the query fees that have been incurred during this period. This invoice will be paid automatically using the GRT available in your balance. Your balance will be updated after the cost of your query fees are withdrawn. Query fees are paid in GRT via the Arbitrum network. You will need to add GRT to the Arbitrum billing contract to enable your API key via the following steps: +I slutet av varje vecka kommer en faktura att genereras baserat på de frågeavgifter som har uppkommit under denna period. Denna faktura kommer att betalas automatiskt med den GRT som finns på ditt saldo. Ditt saldo kommer att uppdateras efter att kostnaden för dina frågeavgifter har dragits tillbaka. Frågeavgifter betalas i GRT via Arbitrum-nätverket. Du måste lägga till GRT i Arbitrum-faktureringsavtalet för att aktivera din API-nyckel via följande steg: -- Purchase GRT on an exchange of your choice. -- Send the GRT to your wallet. -- On the Billing page in Studio, click on Add GRT. +- Köp BRT på ett valfritt utbyte. +- Skicka GRT till din plånbok. +- På sidan Fakturering i Studio klickar du på Lägg till GRT. -![Add GRT in billing](/img/Add-GRT-New-Page.png) +![Lägg till GRT i fakturering](/img/Add-GRT-New-Page.png) -- Follow the steps to add your GRT to your billing balance. -- Your GRT will be automatically bridged to the Arbitrum network and added to your billing balance. +- Följ stegen för att lägga till din GRT till ditt faktureringssaldo. +- Din GRT kommer automatiskt att kopplas till Arbitrum nätverket och läggas till ditt faktureringssaldo. -![Billing pane](/img/New-Billing-Pane.png) +![Faktureringspanel](/img/New-Billing-Pane.png) -> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. +> Obs! Se den [officiella faktureringssidan](../billing.mdx) för fullständiga instruktioner om hur du lägger till GRT till ditt faktureringssaldo. -### Securing your API key +### Säkra din API nyckel -It is recommended that you secure the API by limiting its usage in two ways: +Det rekommenderas att du säkrar API: et genom att begränsa dess användning på två sätt: -1. Authorized Subgraphs -2. Authorized Domain +1. Auktoriserade subgrafer +2. Auktoriserad Domän -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +Du kan säkra din API-nyckel [here](https://thegraph.com/studio/apikeys/test/). -![Subgraph lockdown page](/img/subgraph-lockdown.png) +![Subgraf lockdown sida](/img/subgraph-lockdown.png) -### Querying your subgraph on the decentralized network +### Fråga din subgraf på det decentraliserade nätverket -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +Nu kan du kontrollera indexeringsstatusen för indexerarna på nätverket i Graph Explorer (exempel [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). Den gröna linjen överst indikerar att 8 indexerare lyckades indexera den subgrafen vid tidpunkten för inlägget. Även på fliken Indexerare kan du se vilka Indexerare som plockade upp din subgraf. -![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) +![Rocket Pool subgraf](/img/rocket-pool-subgraph.png) -As soon as the first Indexer has fully indexed your subgraph you can start to query the subgraph on the decentralized network. In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: +Så snart den första indexeraren har indexerat din subgraf helt och hållet kan du börja fråga subgrafen på det decentraliserade nätverket. För att hämta fråge URL för din subgraf kan du kopiera/klistra in den genom att klicka på symbolen bredvid fråge URL. Du kommer att se något sånt här: `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. +Viktigt: Se till att ersätta `[api-key]` med en faktisk API-nyckel som genereras i avsnittet ovan. -You can now use that Query URL in your dapp to send your GraphQL requests to. +Du kan nu använda den fråge URL i din dapp för att skicka dina GraphQL förfrågningar till. -Congratulations! You are now a pioneer of decentralization! +Grattis! Du är nu en pionjär inom decentralisering! -> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: +> Obs: På grund av nätverkets distribuerade karaktär kan det vara så att olika indexerare har indexerat upp till olika block. För att bara ta emot färska data kan du ange det minsta block som en indexerare måste ha indexerat för att kunna betjäna din fråga med blocket: `{ number_gte: $minBlock }` field argument som visas i exemplet nedan: ```graphql { @@ -140,86 +140,86 @@ Congratulations! You are now a pioneer of decentralization! } ``` -More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). +Mer information om nätverkets karaktär och hur man hanterar omorganisationer beskrivs i dokumentationsartikeln [Distribuerade system](/querying/distributed-systems/). -## Updating a Subgraph on the Network +## Uppdatera en subgraf i nätverket -If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. +Om du vill uppdatera en befintlig subgraf på nätverket kan du göra detta genom att distribuera en ny version av din subgraf till Subgraf Studio med hjälp av Graph CLI. -1. Make changes to your current subgraph. A good idea is to test small fixes on the Subgraph Studio by publishing to Goerli. -2. Deploy the following and specify the new version in the command (eg. v0.0.1, v0.0.2, etc): +1. Gör ändringar i din nuvarande subgraf. En bra idé är att testa små fixar på Subgraf Studio genom att publicera till Goerli. +2. Distribuera följande och ange den nya versionen i kommandot (t. ex. v0.0.1, v0.0.2, etc): ```sh graph deploy --studio ``` -3. Test the new version in the Subgraph Studio by querying in the playground -4. Publish the new version on The Graph Network. Remember that this requires gas (as described in the section above). +3. Testa den nya versionen i Subgraf Studio genom att fråga på lekplatsen +4. Publicera den nya versionen på The Graph Nätverk. Kom ihåg att detta kräver gas (som beskrivs i avsnittet ovan). -### Owner Update Fee: Deep Dive +### Ägaruppdateringsavgift: Djupdykning -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> Obs: Curation på Arbitrum använder inte bindningskurvor. Läs mer om Arbitrum [here](/arbitrum/arbitrum-faq/). -An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). +En uppdatering kräver att GRT migreras från den gamla versionen av subgrafen till den nya versionen. Detta innebär att för varje uppdatering kommer en ny bindningskurva att skapas (mer om bindningskurvor [here](/network/curating#bonding-curve-101)). -The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. +Den nya bindningskurvan tar ut 1% kurationsskatt på all GRT som migreras till den nya versionen. Ägaren ska betala 50% av detta eller 1,25 %. De övriga 1,25 % tas upp av alla kuratorer som en avgift. Denna incitamentsdesign är på plats för att förhindra att en ägare av en subgraf kan tömma alla sina curatorers medel med rekursiva uppdateringsanrop. Om det inte finns någon kurationsaktivitet måste du betala minst 100 GRT för att signalera din egen subgraf. -Let's make an example, this is only the case if your subgraph is being actively curated on: +Låt oss ta ett exempel, detta är bara fallet om din subgraf aktivt kureras på: -- 100,000 GRT is signaled using auto-migrate on v1 of a subgraph -- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned -- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. +- 100 000 GRT signaleras med automigrera på v1 av en subgraf +- Ägaruppdateringar till v2. 100 000 BRT migreras till en ny bindningskurva, där 97,500 BRT sätts in i den nya kurvan och 2,500 GRT bränns +- Ägaren låter sedan bränna 1250 GRT för att betala halva avgiften. Ägaren måste ha detta i sin plånbok innan uppdateringen, annars kommer uppdateringen inte att lyckas. Detta sker i samma transaktion som uppdateringen. -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_Medan den här mekanismen för närvarande är aktiv på nätverket diskuterar communityn för närvarande sätt att minska kostnaderna för uppdateringar för subgraf utvecklare._ -### Maintaining a Stable Version of a Subgraph +### Upprätthålla en stabil version av en subgraf -If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. +Om du gör många ändringar i din subgraf är det inte en bra idé att kontinuerligt uppdatera den och stå för uppdateringskostnaderna. Att upprätthålla en stabil och konsistent version av din subgraf är avgörande, inte bara ur kostnadsperspektiv, utan också så att Indexers kan känna sig trygga med sina synkroniseringstider. Indexers bör informeras när du planerar en uppdatering så att deras synkroniseringstider inte påverkas. Tveka inte att utnyttja kanalen [#Indexers](https://discord.gg/JexvtHa7dq) på Discord för att låta Indexers veta när du versionerar dina subgrafer. -Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. +Subgrafer är öppna API: er som externa utvecklare utnyttjar. Öppna API: er måste följa strikta standarder så att de inte bryter mot externa utvecklares applikationer. I The Graph Nätverk måste en subgrafutvecklare överväga indexerare och hur lång tid det tar för dem att synkronisera en ny subgraf **liksom** andra utvecklare som använder deras subgrafer. -### Updating the Metadata of a Subgraph +### Uppdatera metadata för en subgraf -You can update the metadata of your subgraphs without having to publish a new version. The metadata includes the subgraph name, image, description, website URL, source code URL, and categories. Developers can do this by updating their subgraph details in the Subgraph Studio where you can edit all applicable fields. +Du kan uppdatera metadata för dina subgrafer utan att behöva publicera en ny version. Metadata inkluderar subgrafnamn, bild, beskrivning, webbadress, källkods-URL och kategorier. Utvecklare kan göra detta genom att uppdatera sina subgrafdetaljer i Subgraph Studio där du kan redigera alla tillämpliga fält. -Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +Se till att **Uppdatera subgraf detaljer i Utforskaren** är markerad och klicka på **Spara**. Om detta är markerat kommer en transaktion i kedjan att genereras som uppdaterar subgraf detaljer i Utforskaren utan att behöva publicera en ny version med en ny distribution. -## Best Practices for Deploying a Subgraph to The Graph Network +## Bästa metoder för att distribuera en subgraf till Graph Nätverk -1. Leveraging an ENS name for Subgraph Development: +1. Utnyttja ett ENS namn för subgraf utveckling: -- Set up your ENS [here](https://app.ens.domains/) -- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). +- Konfigurera din ENS [here](https://app.ens.domains/) +- Lägg till ditt ENS namn i dina inställningar [here](https://thegraph.com/explorer/settings?view=display-name). -2. The more filled out your profiles are, the better the chances for your subgraphs to be indexed and curated. +2. Ju mer kompletta dina profiler är, desto bättre är chansen att dina subgrafer indexeras och kureras. -## Deprecating a Subgraph on The Graph Network +## Avskrivning av en subgraf i The Graph Nätverk -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +Följ stegen [here](/managing/deprecating-a-subgraph) för att depreciera din subgraph och ta bort den från The Graph Nätverk. -## Querying a Subgraph + Billing on The Graph Network +## Förfrågan om en undergraf + fakturering på The Graph Nätverk -The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. +Den hostade tjänsten skapades för att låta utvecklare distribuera sina subgrafer utan några begränsningar. -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +För att The Graph Nätverk verkligen ska vara decentraliserat måste förfrågningsavgifter betalas som en central del av protokollets incitament. Mer information om hur man prenumererar på API: er och betalar förfrågningsavgifterna finns i faktureringsdokumentationen [here](/billing/). -### Estimate Query Fees on the Network +### Uppskatta avgifter för förfrågningar på nätverket -While this is not a live feature in the product UI, you can set your maximum budget per query by taking the amount you're willing to pay per month and dividing it by your expected query volume. +Även om detta inte är en live-funktion i produktgränssnittet kan du ställa in din maximala budget per fråga genom att ta det belopp du är villig att betala per månad och dividera det med din förväntade frågevolym. -While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. +Även om du får bestämma din förfrågningsbudget, finns det ingen garanti för att en indexerare är villig att betjäna förfrågningar till det priset. Om en Gateway kan matcha dig med en Indexerare som är villig att hantera en förfrågan till, eller lägre än, det pris du är villig att betala, kommer du att betala delta/skillnaden mellan din budget **och** deras pris. Som en konsekvens av ett lägre pris för frågan minskar poolen av indexerare som är tillgängliga för dig, vilket kan påverka kvaliteten på den tjänst du får. Det är fördelaktigt att ha höga förfrågningsavgifter, eftersom det kan locka curation och stora namnindexerare till din subgraf. -Remember that it's a dynamic and growing market, but how you interact with it is in your control. There is no maximum or minimum price specified in the protocol or the Gateways. For example, you can look at the price paid by a few of the dapps on the network (on a per-week basis), below. See the last column, which shows query fees in GRT. +Kom ihåg att det är en dynamisk och växande marknad, men hur du interagerar med den är i din kontroll. Det finns inget max- eller lägsta pris specificerat i protokollet eller Gateways. Till exempel kan du titta på priset som betalas av några av dapparna på nätverket (per vecka), nedan. Se den sista kolumnen, som visar frågeavgifter i BRT. ![QueryFee](/img/QueryFee.png) -## Additional Resources +## Ytterligare resurser -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +Om du fortfarande är förvirrad, var inte rädd! Kolla in följande resurser eller se vår videoguide om uppgradering av undergrafer till det decentraliserade nätverket nedan: -- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [Subgraph Studio documentation](/deploying/subgraph-studio) +- [The Graf Nätverk Kontrakt](https://github.com/graphprotocol/contracts) +- [Kuration Kontrakt](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - det underliggande kontraktet som GNS omsluter + - Adress - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Dokumentation för Subgraf Studio](/deploying/subgraf-studio) diff --git a/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx index 621f6321a0d4..07d5bc4775ec 100644 --- a/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx +++ b/website/pages/sv/deploying/deploying-a-subgraph-to-hosted.mdx @@ -1,83 +1,83 @@ --- -title: Deploying a Subgraph to the Hosted Service +title: Distribuera en subgraf till värdtjänsten --- -> If a network is not supported on the Hosted Service, you can run your own [graph-node](https://github.com/graphprotocol/graph-node) to index it. +> Om ett nätverk inte stöds på värdtjänsten kan du köra din egen [grafnod](https://github.com/graphprotocol/graph-node) för att indexera den. -This page explains how to deploy a subgraph to the Hosted Service. To deploy a subgraph you need to first install the [Graph CLI](https://github.com/graphprotocol/graph-cli). If you have not created a subgraph already, see [creating a subgraph](/developing/creating-a-subgraph). +Den här sidan förklarar hur man distribuerar en subgraf till värdtjänsten. För att distribuera en subgraf måste du först installera [Graph CLI](https://github.com/graphprotocol/graph-cli). Om du inte redan har skapat en subgraf, se [skapa en subgraf](/developing/creating-a-subgraph). -## Create a Hosted Service account +## Skapa ett värdtjänst konto -Before using the Hosted Service, create an account in our Hosted Service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [Hosted Service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. +Innan du använder värdtjänsten, skapa ett konto i vår värdtjänst. Du behöver ett [Github](https://github.com/)-konto för det; om du inte har en måste du skapa den först. Navigera sedan till [värdtjänsten](https://thegraph.com/hosted-service/), klicka på _"Registrera dig med Github" _-knappen och slutför Githubs auktoriseringsflöde. -## Store the Access Token +## Lagra åtkomsttoken -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. +När du har skapat ett konto navigerar du till din [instrumentpanel](https://thegraph.com/hosted-service/dashboard). Kopiera åtkomsttoken som visas på instrumentpanelen och kör `graph auth --product hosted-service `. Detta kommer att lagra åtkomsttoken på din dator. Du behöver bara göra detta en gång, eller om du någonsin återskapar åtkomsttoken. -## Create a Subgraph on the Hosted Service +## Skapa en subgraf på värdtjänsten -Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: +Innan du distribuerar subgrafen måste du skapa den i The Graph Explorer. Gå till [instrumentpanelen](https://thegraph.com/hosted-service/dashboard) och klicka på _"Lägg till subgraf"_ knappen och fyll i informationen nedan efter behov: -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. +**Bild** – Välj en bild som ska användas som förhandsgranskningsbild och miniatyrbild för subgrafen. -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ +**Subgraf namn** - Tillsammans med kontonamnet som subgrafen skapas under kommer detta också att definiera `kontonamn/undergrafnamn`-stilen namn som används för distributioner och GraphQL slutpunkter. _Det här fältet kan inte ändras senare._ -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ +**Konto** - Kontot som subgrafen skapas under. Detta kan vara kontot för en individ eller organisation. _Subgrafer kan inte flyttas mellan konton senare._ -**Subtitle** - Text that will appear in subgraph cards. +**Underrubrik** – Text som kommer att visas på kort för delgrafer. -**Description** - Description of the subgraph, visible on the subgraph details page. +**Beskrivning** – Beskrivning av subgrafen, synlig på sidan med subgraf detaljer. -**GitHub URL** - Link to the subgraph repository on GitHub. +**GitHub URL** – Länk till subgraf förrådet på GitHub. -**Hide** - Switching this on hides the subgraph in the Graph Explorer. +**Göm** - Om du slår på detta döljs subgrafen i Graph Explorer. -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Defining a Subgraph section](/developing/defining-a-subgraph). +När du har sparat den nya subgrafen visas en skärm med hjälp om hur du installerar Graph CLI, hur du skapar ställningen för en ny subgraf och hur du distribuerar din subgraf. De två första stegen behandlades i avsnittet [Definiera en subgraf sektion](/developing/defining-a-subgraph). -## Deploy a Subgraph on the Hosted Service +## Distribuera en subgraf på värdtjänsten -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell the Graph Explorer to start indexing your subgraph using these files. +Genom att distribuera din subgraf laddar du upp subgraf filerna som du har byggt med `yarn build` till IPFS och säger till Graph Explorer att börja indexera din subgraf med dessa filer. -You deploy the subgraph by running `yarn deploy` +Du distribuerar subgrafen genom att köra `yarn deploy` -After deploying the subgraph, the Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. +Efter att ha distribuerat subgrafen kommer Graph Explorer att växla till att visa synkroniseringsstatusen för din subgraf. Beroende på mängden data och antalet händelser som måste extraheras från historiska block, från och med genesisblocket, kan synkronisering ta från några minuter till flera timmar. -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. +Undergrafens status växlar till `Synced` när Graph Node har extraherat alla data från historiska block. Graph Node fortsätter att inspektera block för din undergraf när dessa block minas. -## Redeploying a Subgraph +## Omdistribuera en subgraf -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. +När du gör ändringar i din subgrafdefinition, t. ex. för att åtgärda ett problem i entitetsmappningarna, kör du kommandot `yarn deploy` ovan igen för att distribuera den uppdaterade versionen av din subgraf. Varje uppdatering av en subgraf kräver att Graph Node indexerar om hela subgrafen, återigen med början i genesis-blocket. -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. +Om din tidigare distribuerade subgraf fortfarande har status `Syncing`, kommer den omedelbart att ersättas med den nyligen distribuerade versionen. Om den tidigare distribuerade subgrafen redan är helt synkroniserad, kommer Graph Node att markera den nyligen distribuerade versionen som `Pending version`, synkronisera den i bakgrunden och bara ersätta den för närvarande distribuerade versionen med den nya när den synkroniseras nya versionen är klar. Detta säkerställer att du har en subgraf att arbeta med medan den nya versionen synkroniseras. -## Deploying the subgraph to multiple networks +## Distribuera undergrafen till flera nätverk -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +I vissa fall vill du distribuera samma undergraf till flera nätverk utan att duplicera all dess kod. Den största utmaningen med detta är att kontraktsadresserna på dessa nätverk är olika. -### Using graph-cli +### Använda graph-cli -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: +Både `graph build` (sedan `v0.29.0`) och `graph deploy` (eftersom `v0.32.0`) accepterar två nya alternativ: ```sh -Options: +Alternativ: ... - --network Network configuration to use from the networks config file - --network-file Networks config file path (default: "./networks.json") + --network Nätverkskonfiguration som ska användas från filen nätverk config + --network-file Sökväg till konfigurationsfil för nätverk (standard: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +Du kan använda alternativet `--network` för att ange en nätverkskonfiguration från en `json` standardfil (standard till `networks.json`) för att enkelt uppdatera din subgraf under utvecklingen. -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. +**Obs!** Kommandot `init` kommer nu att automatiskt generera en `networks.json` baserat på den tillhandahållna informationen. Du kommer då att kunna uppdatera befintliga eller lägga till ytterligare nätverk. -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: +Om du inte har en `networks.json`-fil måste du skapa en manuellt med följande struktur: ```json { - "network1": { // the network name - "dataSource1": { // the dataSource name - "address": "0xabc...", // the contract address (optional) - "startBlock": 123456 // the startBlock (optional) + "network1": { // nätverkets namn + "dataSource1": { // namn på datakällan + "address": "0xabc...", // Avtalets adress (frivillig uppgift) + "startBlock": 123456 // startBlock (valfritt) }, "dataSource2": { "address": "0x123...", @@ -98,9 +98,9 @@ If you don't have a `networks.json` file, you'll need to manually create one wit } ``` -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. +**Obs!** Du behöver inte ange någon av `templates` (om du har några) i konfigurationsfilen, bara ` dataSources`. Om det finns några `templates` deklarerade i filen `subgraph.yaml`, kommer deras nätverk automatiskt att uppdateras till det som anges med alternativet `--network`. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `goerli` networks, and this is your `subgraph.yaml`: +Låt oss nu anta att du vill kunna distribuera din subgraf till `mainnet`- och `goerli`-nätverken, och det här är din `subgraph.yaml`: ```yaml # ... @@ -115,7 +115,7 @@ dataSources: kind: ethereum/events ``` -This is what your networks config file should look like: +Så här ska nätverkets konfigurationsfil se ut: ```json { @@ -132,17 +132,17 @@ This is what your networks config file should look like: } ``` -Now we can run one of the following commands: +Nu kan vi köra något av följande kommandon: ```sh -# Using default networks.json file +# Använda standardfilen networks.json yarn build --network goerli -# Using custom named file +# Använda fil med eget namn yarn build --network goerli --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `goerli` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +Kommandot `build` uppdaterar din `subgraph.yaml` med `goerli`-konfigurationen och kompilerar sedan om subgrafen. Din `subgraph.yaml`-fil bör nu se ut så här: ```yaml # ... @@ -157,23 +157,23 @@ dataSources: kind: ethereum/events ``` -Now you are ready to `yarn deploy`. +Nu är du redo att `yarn deploy`. -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: +**Obs:** Som nämnts tidigare, sedan `graph-cli 0.32.0` kan du direkt köra `yarn deploy` med `--nätverk` alternativ: ```sh -# Using default networks.json file -yarn deploy --network goerli +# Använda standardfilen networks.json +yarn build --network goerli -# Using custom named file -yarn deploy --network goerli --network-file path/to/config +# Använda fil med eget namn +yarn build --network goerli --network-file path/to/config ``` -### Using subgraph.yaml template +### Använda subgraph.yaml mallen -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). +En lösning för äldre graph-cli versioner som tillåter parameterisering av aspekter som kontraktsadresser är att generera delar av den med hjälp av ett mallsystem som [Mustache](https://mustache.github.io/) eller [Handlebars](https://handlebarsjs.com/). -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Goerli using different contract addresses. You could then define two config files providing the addresses for each network: +För att illustrera detta tillvägagångssätt, låt oss anta att en subgraf ska distribueras till mainnet och Goerli med hjälp av olika kontraktsadresser. Du kan då definiera två konfigurationsfiler som innehåller adresserna för varje nätverk: ```json { @@ -182,7 +182,7 @@ To illustrate this approach, let's assume a subgraph should be deployed to mainn } ``` -and +och ```json { @@ -191,7 +191,7 @@ and } ``` -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: +Tillsammans med det skulle du ersätta nätverksnamnet och adresserna i manifestet med variabla platshållare `{{network}}` och `{{address}}` och byta namn på manifestet till t.ex. `subgraph.template.yaml`: ```yaml # ... @@ -208,7 +208,7 @@ dataSources: kind: ethereum/events ``` -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: +För att generera ett manifest till något av nätverken kan du lägga till ytterligare två kommandon till `package.json` tillsammans med ett beroende av `mustache`: ```json { @@ -225,7 +225,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Goerli you would now simply run one of the two following commands: +För att distribuera denna subgraf för mainnet eller Goerli behöver du nu bara köra ett av följande två kommandon: ```sh # Mainnet: @@ -235,15 +235,15 @@ yarn prepare:mainnet && yarn deploy yarn prepare:goerli && yarn deploy ``` -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). +Ett fungerande exempel på detta hittar du [här](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. +**Obs:** Detta tillvägagångssätt kan också tillämpas i mer komplexa situationer, där det är nödvändigt att ersätta mer än kontraktsadresser och nätverksnamn eller där mappningar eller ABI: er genereras från mallar. -## Checking subgraph health +## Kontroll av undergrafens hälsa -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +Om en subgraf synkroniseras framgångsrikt är det ett gott tecken på att den kommer att fortsätta att fungera bra för alltid. Nya triggers i nätverket kan dock göra att din subgraf stöter på ett otestat feltillstånd eller så kan den börja halka efter på grund av prestandaproblem eller problem med nodoperatörerna. -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the Hosted Service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graf Nod visar en graphql-slutpunkt som du kan fråga för att kontrollera statusen för din subgraf. På värdtjänsten är den tillgänglig på `https://api.thegraph.com/index-node/graphql`. På en lokal nod är den tillgänglig på port `8030/graphql` som standard. Det fullständiga schemat för denna slutpunkt finns [här](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Här är ett exempel på en fråga som kontrollerar statusen för den aktuella versionen av en subgraf: ```graphql { @@ -270,22 +270,22 @@ Graph Node exposes a graphql endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +Detta kommer att ge dig `chainHeadBlock` som du kan jämföra med `latestBlock` på din subgraf för att kontrollera om den körs efter. `synced` informerar om subgrafen någonsin har kommit ikapp kedjan. `health` kan för närvarande ta värdena `healthy` om inga fel inträffade, eller `failed` om det fanns ett fel som stoppade subgrafens framsteg. I det här fallet kan du kontrollera fältet `fatalError` för detaljer om detta fel. -## Hosted service subgraph archive policy +## Policy för arkivering av undergrafer för värdtjänster -The Hosted Service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. +Värdtjänsten är en kostnadsfri Graf Nod Indexer. Utvecklare kan distribuera undergrafer som indexerar en rad nätverk, som kommer att indexeras och göras tillgängliga för frågor via graphQL. -To improve the performance of the service for active subgraphs, the Hosted Service will archive subgraphs that are inactive. +För att förbättra tjänstens prestanda för aktiva undergrafer kommer värddatatjänsten att arkivera undergrafer som är inaktiva. -**A subgraph is defined as "inactive" if it was deployed to the Hosted Service more than 45 days ago, and if it has received 0 queries in the last 45 days.** +**En undergraf definieras som "inaktiv" om den distribuerades till värdtjänsten för mer än 45 dagar sedan och om den har fått 0 förfrågningar under de senaste 45 dagarna.** -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's Hosted Service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. +Utvecklare kommer att meddelas via e-post om en av deras subgrafer har markerats som inaktiv 7 dagar innan den tas bort. Om de vill "aktivera" sin subgraf kan de göra det genom att ställa en fråga i sin subgrafs Hosted Service graphQL playground. Utvecklare kan alltid omplacera en arkiverad undergraf om den behövs igen. -## Subgraph Studio subgraph archive policy +## Subgraph Studio subgraf arkivpolitik -When a new version of a subgraph is deployed, the previous version is archived (deleted from the graph-node DB). This only happens if the previous version is not published to The Graph's decentralized network. +När en ny version av en undergraf distribueras arkiveras den tidigare versionen (raderas från grafnods DB). Detta sker endast om den tidigare versionen inte har publicerats i The Graphs decentraliserade nätverk. -When a subgraph version isn’t queried for over 45 days, that version is archived. +När en version av en undergraf inte har efterfrågats på över 45 dagar arkiveras den versionen. -Every subgraph affected with this policy has an option to bring the version in question back. +Varje subgraf som påverkas av denna policy har en möjlighet att ta tillbaka versionen i fråga. diff --git a/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..66c01e909c75 100644 --- a/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/sv/deploying/deploying-a-subgraph-to-studio.mdx @@ -1,70 +1,70 @@ --- -title: Deploying a Subgraph to the Subgraph Studio +title: Distribuera en undergraf till Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Lär dig hur du kan distribuera subgrafer utan begränsning av hastighet till Subgraf Studio [här](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). -These are the steps to deploy your subgraph to the Subgraph Studio: +Detta är stegen för att distribuera din subgraf till Subgraph Studio: -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in the Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to the Subgraph Studio +- Installera Graph CLI (med antingen yarn eller npm) +- Skapa din subgraf i Subgraph Studio +- Autentisera ditt konto från CLI +- Distribuera en undergraf till Subgraph Studio -## Installing Graph CLI +## Installera Graph CLI -We are using the same CLI to deploy subgraphs to our [hosted service](https://thegraph.com/hosted-service/) and to the [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install graph-cli. This can be done using npm or yarn. +Vi använder samma CLI för att distribuera undergrafer till vår [hostade tjänst](https://thegraph.com/hosted-service/) och till [Subgraph Studio](https://thegraph.com/studio/). Här är kommandona för att installera graph-cli. Detta kan göras med hjälp av npm eller yarn. -**Install with yarn:** +**Installera med yarn:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**Installera med npm:** ```bash npm install -g @graphprotocol/graph-cli ``` -## Create your Subgraph in Subgraph Studio +## Skapa din subgraf i Subgraph Studio -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. +Innan du distribuerar din faktiska subgraf måste du skapa en subgraf i [Subgraph Studio](https://thegraph.com/studio/). Vi rekommenderar att du läser vår [Studiodokumentation](/deploying/subgraph-studio) för att lära dig mer om detta. -## Initialize your Subgraph +## Initiera din Subgraph -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: +När din subgraf har skapats i Subgraph Studio kan du initiera subgraf koden med detta kommando: ```bash graph init --studio ``` -The `` value can be found on your subgraph details page in Subgraph Studio: +Värdet `` finns på sidan med information om din subgraf i Subgraf Studio: -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) +![Subgraf Studio - Snigel](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +När du har kört `graph init` kommer du att bli ombedd att ange kontraktsadressen, nätverket och ABI som du vill fråga efter. Om du gör detta genereras en ny mapp på din lokala dator med grundläggande kod för att börja arbeta med din subgraf. Du kan sedan slutföra din subgraf för att se till att den fungerar som förväntat. -## Graph Auth +## Auth för grafer -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. +Innan du kan distribuera din undergraf till Subgraf Studio måste du logga in på ditt konto i CLI. För att göra detta behöver du din deploy key som du hittar på sidan "My Subgraphs" eller på sidan med information om din subgraf. -Here is the command that you need to use to authenticate from the CLI: +Här är kommandot som du behöver använda för att autentisera dig från CLI: ```bash graph auth --studio ``` -## Deploying a Subgraph to Subgraph Studio +## Distribuera en undergraf till Subgraph Studio -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. +När du är redo kan du distribuera din subgraf till Subgraf Studio. Detta innebär inte att din subgraf publiceras i det decentraliserade nätverket, utan endast att den distribueras till ditt Studio-konto där du kan testa den och uppdatera metadata. -Here is the CLI command that you need to use to deploy your subgraph. +Här är CLI-kommandot som du behöver använda för att distribuera din subgraf. ```bash graph deploy --studio ``` -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. +Efter att ha kört detta kommando kommer CLI att fråga efter en versionsetikett, du kan namnge den hur du vill, du kan använda etiketter som `0.1` och `0.2` eller använda bokstäver också som `uniswap-v2-0.1`. Dessa etiketter kommer att vara synliga i Graph Explorer och kan användas av curatorer för att bestämma om de vill signalera på den här versionen eller inte, så välj dem med omtanke. -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. +När du har distribuerat kan du testa din subgraf i Subgraf Studio med hjälp av lekplatsen, distribuera en annan version om det behövs, uppdatera metadata och när du är klar kan du publicera din subgraf i Graph Explorer. diff --git a/website/pages/sv/deploying/hosted-service.mdx b/website/pages/sv/deploying/hosted-service.mdx index 2e6093531110..62ca46568cfd 100644 --- a/website/pages/sv/deploying/hosted-service.mdx +++ b/website/pages/sv/deploying/hosted-service.mdx @@ -1,24 +1,24 @@ --- -title: What is the Hosted Service? +title: Vad är värdtjänsten? --- -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> Observera att den hostade tjänsten kommer att börja avvecklas 2023, men den kommer att förbli tillgänglig för nätverk som inte stöds av det decentraliserade nätverket. Utvecklare uppmuntras att [uppgradera sina undergrafer till The Graf Nätverk ](/cookbook/upgrading-a-subgraph) när fler nätverk stöds. Varje nätverk kommer att ha sina hostade tjänsteekvivalenter gradvis solnedgång för att säkerställa att utvecklare har tillräckligt med tid att uppgradera undergrafer till det decentraliserade nätverket. Läs mer om avvecklingen av den hostade tjänsten [här](https://thegraph.com/blog/sunsetting-hosted-service). -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). +I det här avsnittet går vi igenom hur du distribuerar en undergraf till [hostad tjänst](https://thegraph.com/hosted-service/). -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +Om du inte har ett konto på den hostade tjänsten kan du registrera dig med ditt GitHub-konto. När du har autentiserat dig kan du börja skapa undergrafer via användargränssnittet och distribuera dem från din terminal. Värdtjänsten stöder ett antal nätverk, till exempel Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum med flera. -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). +För en fullständig lista, se [Nätverk som stöds](/developing/supported-networks/#hosted-service). -## Create a Subgraph +## Skapa en Subgraf -First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` +Följ först instruktionerna [ här](/developing/defining-a-subgraph) för att installera Graph CLI. Skapa en subgraf genom att skicka in `graph init --product hosted-service` -### From an Existing Contract +### Från ett befintligt avtal -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. +Om du redan har ett smart kontrakt distribuerat till ditt nätverk kan det vara ett bra sätt att komma igång med värdtjänsten genom att starta upp en ny subgraf från detta kontrakt. -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from [Etherscan](https://etherscan.io/). +Du kan använda detta kommando för att skapa en subgraf som indexerar alla händelser från ett befintligt kontrakt. Detta kommer att försöka hämta kontraktets ABI från [Etherscan](https://etherscan.io/). ```sh graph init \ @@ -27,25 +27,36 @@ graph init \ / [] ``` -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from Etherscan, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. +Dessutom kan du använda följande valfria argument. Om ABI inte kan hämtas från Etherscan återgår kommandot till att begära en lokal filsökväg. Om något valfritt argument saknas i kommandot kommer du att gå igenom ett interaktivt formulär. ```sh --network \ --abi \ ``` -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** +`` är i det här fallet din GitHub-användare eller organisationsnamn, `` är namnet på din subgraf och ``är det valfria namnet på katalogen där `graph init` kommer att placera exemplets subgrafmanifest. `` är adressen till ditt befintliga kontrakt. `` är namnet på nätverket som kontraktet lever på. `` är en lokal sökväg till en kontrakts ABI-fil. **Både `--nätverk` och `--abi` är valfria.** -### From an Example Subgraph +### Från ett exempel på en undergraf -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: +Det andra läget som `graph init` stöder är att skapa ett nytt projekt från ett exempel på en undergraf. Följande kommando gör detta: ``` graph init --from-example --product hosted-service / [] ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +Exemplet är baserat på Gravity-kontraktet av Dani Grant som hanterar användaravatarer och sänder ut `NewGravatar` eller `UpdateGravatar`-händelser närhelst avatarer skapas eller uppdateras. Subgrafen hanterar dessa händelser genom att skriva `Gravatar`-entiteter till Graph Node-arkivet och se till att dessa uppdateras i enlighet med händelserna. Fortsätt till [undergrafmanifestet](/developing/creating-a-subgraph#the-subgraph-manifest) för att bättre förstå vilka händelser från dina smarta kontrakt du ska uppmärksamma, mappningar och mer. -## Supported Networks on the hosted service +### From a Proxy Contract -You can find the list of the supported networks [Here](/developing/supported-networks). +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## Nätverk som stöds av den hostade tjänsten + +Du hittar listan över de nätverk som stöds [Här](/developing/supported-networks). diff --git a/website/pages/sv/deploying/subgraph-studio-faqs.mdx b/website/pages/sv/deploying/subgraph-studio-faqs.mdx index 65217d4b7741..bf66a51169eb 100644 --- a/website/pages/sv/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/sv/deploying/subgraph-studio-faqs.mdx @@ -1,31 +1,31 @@ --- -title: Subgraph Studio FAQs +title: Vanliga frågor om Subgraf Studio --- -## 1. What is Subgraph Studio? +## 1. Vad är Subgraf Studio? -[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. +[Subgraf Studio](https://thegraph.com/studio/) är en dapp för att skapa, hantera och publicera undergrafer och API-nycklar. -## 2. How do I create an API Key? +## 2. Hur skapar jag en API-nyckel? -To create an API, navigate to the Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. +För att skapa ett API, navigera till Subgraph Studio och anslut din plånbok. Du kommer att kunna klicka på fliken API-nycklar högst upp. Där kommer du att kunna skapa en API-nyckel. -## 3. Can I create multiple API Keys? +## 3. Kan jag skapa flera API-nycklar? -Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). +Ja, du kan skapa flera API-nycklar som du kan använda i olika projekt. Kolla in länken[ här](https://thegraph.com/studio/apikeys/). -## 4. How do I restrict a domain for an API Key? +## 4. Hur begränsar jag en domän för en API-nyckel? -After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. +När du har skapat en API-nyckel kan du i avsnittet Säkerhet definiera vilka domäner som kan ställa frågor till en specifik API-nyckel. -## 5. Can I transfer my subgraph to another owner? +## 5. Kan jag överföra min subgraf till en annan ägare? -Yes, subgraphs that have been published to Mainnet can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. +Ja, undergrafer som har publicerats på Mainnet kan överföras till en ny plånbok eller en Multisig. Du kan göra det genom att klicka på de tre punkterna bredvid knappen "Publicera" på undergrafens detaljsida och välja "Överför ägande". -Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. +Observera att du inte längre kommer att kunna se eller redigera undergrafen i Studio när den har överförts. -## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? +## 6. Hur hittar jag fråge-URL: er för undergrafer om jag inte är utvecklaren av den undergraf jag vill använda? -You can find the query URL of each subgraph in the Subgraph Details section of The Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in the Subgraph Studio. +Du kan hitta sökadressen för varje subgraf i avsnittet Subgraph Details i The Graph Explorer. När du klickar på knappen "Fråga" kommer du att dirigeras till en ruta där du kan se sökadressen till subgrafen du är intresserad av. Du kan sedan ersätta platshållaren `` med API-nyckeln du vill använda i Subgraph Studio. -Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. +Kom ihåg att du kan skapa en API-nyckel och ställa frågor till alla undergrafer som publicerats i nätverket, även om du själv har byggt en undergraf. Dessa förfrågningar via den nya API-nyckeln är betalda förfrågningar som alla andra i nätverket. diff --git a/website/pages/sv/deploying/subgraph-studio.mdx b/website/pages/sv/deploying/subgraph-studio.mdx index 1406065463d4..f2b421b9abce 100644 --- a/website/pages/sv/deploying/subgraph-studio.mdx +++ b/website/pages/sv/deploying/subgraph-studio.mdx @@ -1,95 +1,89 @@ --- -title: How to Use the Subgraph Studio +title: Hur man använder Subgraph Studio --- -Welcome to your new launchpad 👩🏽‍🚀 +Välkommen till din nya startplats 👩🏽‍🚀 -The Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). +I Subgraf Studio kan du bygga och skapa undergrafer, lägga till metadata och publicera dem i den nya decentraliserade Explorer (mer om det [här](/network/explorer)). -What you can do in the Subgraph Studio: +Vad du kan göra i Subgraf Studio: -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs +- Skapa en undergraf via Studio-gränssnittet +- Distribuera en undergraf med hjälp av CLI +- Publicera en subgraf med Studio-gränssnittet +- Testa den på lekplatsen +- Integrera den i staging med hjälp av fråge-URL: en +- Skapa och hantera API nycklar för specifika undergrafer -Here in the Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. +Här i Subgraf Studio har du full kontroll över dina undergrafer. Du kan inte bara testa dina subgrafer innan du publicerar dem, utan du kan också begränsa dina API nycklar till specifika domäner och bara tillåta vissa indexerare att göra förfrågningar från deras API nycklar. -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: +Förfrågningar i undergrafer genererar förfrågningsavgifter som används för att belöna [Indexers](/network/indexing) på Graf-nätverket. Om du är en dapp-utvecklare eller subgrafutvecklare kommer studion att ge dig möjlighet att bygga bättre subgrafer för att driva dina eller din gemenskaps förfrågningar. Studion består av fem huvuddelar: -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing +- Kontroller för ditt användarkonto +- En lista över undergrafer som du har skapat +- En sektion för att hantera, visa detaljer och visualisera status för en specifik undergraf +- Ett avsnitt för att hantera dina API nycklar som du behöver för att ställa frågor till en subgraf +- Ett avsnitt för att hantera din fakturering -## How to Create Your Account +## Så här Skapar du ditt Konto -1. Sign in with your wallet - you can do this via MetaMask or WalletConnect -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. +1. Logga in med din plånbok - du kan göra detta via MetaMask eller WalletConnect +1. När du har loggat in ser du din unika implementeringsnyckel på startsidan för ditt konto. Detta gör att du antingen kan publicera dina undergrafer eller hantera dina API nycklar + fakturering. Du kommer att ha en unik deployerings nyckel som kan genereras på nytt om du tror att den har äventyrats. -## How to Create your Subgraph in Subgraph Studio +## Hur man skapar en subgraf i Subgraf Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: + -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website +## Kompatibilitet mellan undergrafer och grafnätet -## Subgraph Compatibility with The Graph Network +The Graph Nätverk kan ännu inte stödja alla datakällor & funktioner som finns tillgängliga på värdstjänsten. För att kunna stödjas av indexerare i nätverket måste undergrafer: -The Graph Network is not yet able to support all of the data-sources & features available on the Hosted Service. In order to be supported by Indexers on the network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: +- Indexera ett [stött nätverk](/developing/supported-networks) +- Får inte använda någon av följande egenskaper: - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting + - Icke dödliga fel + - Ympning -More features & networks will be added to The Graph Network incrementally. +Fler funktioner & nätverk kommer att läggas till i The Graph Nätverk stegvis. -### Subgraph lifecycle flow +### Livscykelflöde för undergraf -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) +![Livscykel för undergrafer](/img/subgraph-lifecycle.png) -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (pst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. +När du har skapat din subgraf kommer du att kunna distribuera den med hjälp av [CLI](https://github.com/graphprotocol/graph-cli), eller kommandoradsgränssnittet. Om du distribuerar en undergraf med CLI flyttas undergrafen till Studio där du kan testa undergrafer med hjälp av lekplatsen. Detta kommer så småningom att göra det möjligt för dig att publicera till Graph Nätverk. För mer information om CLI-installationen, [kolla in detta](/developing/defining-a-subgraph#install-the-graph-cli) (pst, se till att du har din deploy-nyckel till hands). Kom ihåg att distribuera är **inte samma sak som** publicera. När du distribuerar en undergraf flyttar du den bara till studion där du kan testa den. När du däremot publicerar en undergraf publicerar du den i kedjan. -## Testing your Subgraph in Subgraph Studio +## Testa din subgraf i Subgraph Studio -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. +Om du vill testa din subgraf innan du publicerar den i nätverket kan du göra det i Subgraf **Lekplats** eller titta på dina loggar. Subgrafloggarna talar om för dig **var** din subgraf misslyckas i de fall den gör det. -## Publish your Subgraph in Subgraph Studio +## Publicera din subgraf i Subgraf Studio -You’ve made it this far - congrats! +Du har klarat dig så här långt - grattis! -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [blog](https://thegraph.com/blog/building-with-subgraph-studio). +För att kunna publicera din subgraf framgångsrikt måste du gå igenom följande steg som beskrivs i denna [blogg](https://thegraph.com/blog/building-with-subgraph-studio). -Check out the video overview below as well: +Kolla också in videoöversikten nedan: -Remember, while you’re going through your publishing flow, you’ll be able to push to either mainnet or Goerli. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Goerli, which is free to do. This will allow you to see how the subgraph will work in The Graph Explorer and will allow you to test curation elements. +Kom ihåg att när du går igenom ditt publiceringsflöde kan du välja att publicera till antingen mainnet eller Goerli. Om du är en förstagångsutvecklare av undergrafer rekommenderar vi starkt att du börjar med att publicera till Goerli, vilket är gratis. På så sätt kan du se hur subgrafen fungerar i The Graf Explorer och testa av kurationselement. -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! +Indexerare måste skicka in obligatoriska bevis på indexeringsposter från och med en specifik blockhash. Eftersom publicering av en subgraf är en åtgärd som vidtas på kedjan, kom ihåg att transaktionen kan ta upp till några minuter att gå igenom. Den adress som du använder för att publicera kontraktet kommer att vara den enda som kan publicera framtida versioner. Välj klokt! -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. +Undergrafer med kureringssignal visas för indexerare så att de kan indexeras i det decentraliserade nätverket. Du kan publicera undergrafer och signal i en transaktion, vilket gör att du kan mynta den första curation-signalen på undergrafen och spara på bensinkostnader. Genom att lägga till din signal till den signal som senare tillhandahålls av Curators, kommer din subgraf också att ha en större chans att i slutändan betjäna frågor. -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! +**Nu när du har publicerat din undergraf, låt oss gå in på hur du hanterar dem regelbundet.** Observera att du inte kan publicera din undergraf till nätverket om den har misslyckats med synkroniseringen. Detta beror vanligtvis på att undergrafen har buggar - loggarna kommer att berätta var dessa problem finns! -## Versioning your Subgraph with the CLI +## Versionera din Subgraf med CLI -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to The Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. +Utvecklare kan vilja uppdatera sin undergraf av olika anledningar. När så är fallet kan du distribuera en ny version av din subgraf till Studio med hjälp av CLI (den kommer bara att vara privat vid denna tidpunkt) och om du är nöjd med den kan du publicera den nya distributionen till The Graph Explorer. Detta kommer att skapa en ny version av din subgraf som kuratorer kan börja signalera på och indexerare kommer att kunna indexera den nya versionen. -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in The Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +Fram till nyligen var utvecklare tvungna att distribuera och publicera en ny version av sin undergraf till Explorer för att uppdatera metadata för sina undergrafer. Nu kan utvecklare uppdatera metadata för sina undergrafer **utan att behöva publicera en ny version**. Utvecklare kan uppdatera sina undergrafdetaljer i Studio (under profilbild, namn, beskrivning, etc) genom att markera ett alternativ som heter **Uppdatera Detaljer** i The Graf Explorer. Om detta är markerat kommer en transaktion på kedjan att genereras som uppdaterar undergrafdetaljer i Utforskaren utan att behöva publicera en ny version med en ny distribution. -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). +Observera att det är förenat med kostnader att publicera en ny version av en undergraf i nätverket. Förutom transaktionsavgifterna måste utvecklarna också finansiera en del av kurationsskatten på den automatiskt migrerande signalen. Du kan inte publicera en ny version av din subgraf om kuratorer inte har signalerat om den. För mer information om riskerna med kuratering, läs mer [här](/network/curating). -### Automatic Archiving of Subgraph Versions +### Automatisk arkivering av versioner av undergrafer -Whenever you deploy a new subgraph version in the Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. +När du distribuerar en ny version av en undergraf i Subgraf Studio arkiveras den tidigare versionen. Arkiverade versioner kommer inte att indexeras/synkroniseras och kan därför inte efterfrågas. Du kan avarkivera en arkiverad version av din undergraf i Studio UI. Observera att tidigare versioner av icke-publicerade undergrafer som distribuerats till Studio arkiveras automatiskt. -![Subgraph Studio - Unarchive](/img/Unarchive.png) +![Subgraf Studio - Avarkivera](/img/Unarchive.png) diff --git a/website/pages/sv/developing/creating-a-subgraph.mdx b/website/pages/sv/developing/creating-a-subgraph.mdx index 1fc288833c35..a0ea1f3d0fdc 100644 --- a/website/pages/sv/developing/creating-a-subgraph.mdx +++ b/website/pages/sv/developing/creating-a-subgraph.mdx @@ -1,46 +1,46 @@ --- -title: Creating a Subgraph +title: Skapa en Subgraph --- -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. +En subgraph extraherar data från en blockchain, bearbetar den och lagrar den så att den kan frågas enkelt via GraphQL. -![Defining a Subgraph](/img/defining-a-subgraph.png) +![Definiera en Subgraph](/img/defining-a-subgraph.png) -The subgraph definition consists of a few files: +Subgraph-definitionen består av några filer: -- `subgraph.yaml`: a YAML file containing the subgraph manifest +- `subgraph.yaml`: en YAML-fil som innehåller subgraph-manifestet -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL +- `schema.graphql`: ett GraphQL-schema som definierar vilka data som lagras för din subgraph och hur man frågar efter det via GraphQL -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) +- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kod som översätter från händelsedata till de enheter som är definierade i ditt schema (t.ex. `mapping.ts` i den här handledningen) -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). +> För att använda din subgraph på The Graphs decentraliserade nätverk måste du [skapa en API-nyckel](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). Det rekommenderas att du [lägger till signal](/network/curating/#how-to-signal) till din subgraph med minst [10 000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. +Innan du går in på detaljer om manifest filens innehåll måste du installera [Graph CLI](https://github.com/graphprotocol/graph-cli), som du kommer att behöva för att bygga och distribuera en subgraph. -## Install the Graph CLI +## Installera Graph CLI -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +Graph CLI är skrivet i JavaScript, och du måste installera antingen `yarn` eller `npm` för att använda det; det antas att du har yarn i det följande. -Once you have `yarn`, install the Graph CLI by running +När du har `yarn`, installera Graph CLI genom att köra -**Install with yarn:** +**Installera med yarn:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**Installera med npm:** ```bash npm install -g @graphprotocol/graph-cli ``` -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. +Efter installationen kan kommandot `graph init` användas för att skapa ett nytt subgrafprojekt, antingen från ett befintligt kontrakt eller från en exempelsubgraf. Detta kommando kan användas för att skapa en subgraf på Subgraph Studio genom att skicka in `graph init --product subgraph-studio`. Om du redan har ett smart kontrakt distribuerat till ditt föredragna nätverk kan det vara ett bra sätt att komma igång med att starta en ny subgraf från det kontraktet. -## From An Existing Contract +## Från ett Befintligt kontrakt -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. +Följande kommando skapar en subgraf som indexerar alla händelser i ett befintligt kontrakt. Det försöker hämta kontraktets ABI från Etherscan och faller tillbaka till att begära en lokal filsökväg. Om något av de valfria argumenten saknas tar det dig genom ett interaktivt formulär. ```sh graph init \ @@ -51,49 +51,49 @@ graph init \ [] ``` -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. +`` är ID för din subgraf i Subgraf Studio, det kan hittas på din subgraf detaljsida. -## From An Example Subgraph +## Från ett Exempel Subgraph -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: +Det andra läget som `graph init` stöder är att skapa ett nytt projekt från ett exempel på en undergraf. Följande kommando gör detta: ```sh graph init --studio ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. +Exempelsubgrafen är baserad på Gravity-kontraktet av Dani Grant som hanterar användares avatarer och avger händelserna `NewGravatar` eller `UpdateGravatar` när avatarer skapas eller uppdateras. Subgrafen hanterar dessa händelser genom att skriva `Gravatar`-entiteter till Graph Node-förvaringen och säkerställer att dessa uppdateras enligt händelserna. Följande avsnitt kommer att gå igenom filerna som utgör subgrafens manifest för detta exempel. -## Add New dataSources To An Existing Subgraph +## Lägg till nya datakällor i en befintlig Subgraf -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. +Från och med `v0.31.0` stöder `graph-cli` att lägga till nya datakällor i en befintlig subgraf genom kommandot `graph add`. ```sh graph add
    [] Options: - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") + --abi Sökväg till kontraktets ABI (standard: nedladdning från Etherscan) + --contract-name Kontraktets namn (standard: Kontrakt) + --merge-entities Om enheter med samma namn ska slås samman (standard: false) + --network-file Sökväg till konfigurationsfil för nätverk (standard: "./networks.json") ``` -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. +Kommandot `add` hämtar ABI: en från Etherscan (om inte en ABI-sökväg anges med alternativet `--abi`) och skapar en ny `dataSource` på samma sätt som kommandot `graph init` skapar en `dataSource` `--from-contract`, och uppdaterar schemat och mappningarna därefter. -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: +Alternativet `--merge-entities` identifierar hur utvecklaren vill hantera konflikter med `entity`- och `event`-namn: -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. +- Om `true`: den nya `dataSource` ska använda befintliga `eventHandlers` & `entities`. +- Om `false`: en ny entitet och händelsehanterare ska skapas med `${dataSourceName}{EventName}`. -The contract `address` will be written to the `networks.json` for the relevant network. +Kontraktsadressen kommer att skrivas till `networks.json` för den relevanta nätverket. -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. +> **Obs:** När du använder det interaktiva kommandoraden, efter att ha kört `graph init` framgångsrikt, kommer du att bli ombedd att lägga till en ny `dataSource`. -## The Subgraph Manifest +## Subgrafens manifest -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +Subgrafens manifest `subgraph.yaml` definierar de smarta kontrakten som din subgraf indexerar, vilka händelser från dessa kontrakt som ska uppmärksammas och hur man kartlägger händelsedata till entiteter som Graph Node lagrar och tillåter att fråga. Den fullständiga specifikationen för subgrafens manifest finns [här](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph, `subgraph.yaml` is: +För exempelsubgrafen är `subgraph.yaml`: ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -134,59 +142,63 @@ dataSources: file: ./src/mapping.ts ``` -The important entries to update for the manifest are: +De viktiga posterna att uppdatera för manifestet är: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. +- `repository`: URL till lagringsplatsen där subgrafens manifest kan hittas. Detta visas också av The Graph Explorer. -- `features`: a list of all used [feature](#experimental-features) names. +- `features`: en lista över alla använda [funktions](#experimentella-funktioner) namn. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: adressen till det smarta kontraktet som subgrafen hämtar data från, och ABI för det smarta kontraktet att använda. Adressen är valfri; att utelämna den gör det möjligt att indexera matchande händelser från alla kontrakt. -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.startBlock`: det valfria blocknummer som datakällan börjar indexera från. I de flesta fall föreslår vi att du använder det block där kontraktet skapades. -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.entities`: de entiteter som datakällan skriver till lagringsplatsen. Schemat för varje entitet definieras i filen schema.graphql. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.abis`: en eller flera namngivna ABI-filer för källkontraktet samt eventuella andra smarta kontrakt som du interagerar med från inom mappningarna. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.eventHandlers`: listar de smarta kontraktshändelser som denna subgraf reagerar på och hanterare i mappningen—./src/mapping.ts i exemplet - som omvandlar dessa händelser till entiteter i lagringsplatsen. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +- `dataSources.mapping.callHandlers`: listar de smarta kontraktsfunktioner som denna subgraf reagerar på och hanterare i mappningen som omvandlar in- och utdata till funktionsanrop till entiteter i lagringsplatsen. -The triggers for a data source within a block are ordered using the following process: +- `dataSources.mapping.blockHandlers`: listar de block som denna subgraf reagerar på och hanterare i mappningen som körs när ett block läggs till i kedjan. Utan ett filter körs blockhanteraren varje block. En valfri anropsfiltrering kan tillhandahållas genom att lägga till en `filter`-fält med `kind: call` till hanteraren. Detta körs bara om blocket innehåller minst ett anrop till datakällan. -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. +En enskild subgraf kan indexera data från flera smarta kontrakt. Lägg till en post för varje kontrakt från vilket data behöver indexeras i `dataSources`-matrisen. -These ordering rules are subject to change. +Utlösarna för en datakälla inom ett block ordnas med hjälp av följande process: -### Getting The ABIs +1. Händelse- och anropsutlösare ordnas först efter transaktionsindex inom blocket. +2. Händelse- och anropsutlösare inom samma transaktion ordnas med hjälp av en konvention: händelseutlösare först, sedan anropsutlösare, varje typ respekterar ordningen de definieras i manifestet. +3. Blockutlösare körs efter händelse- och anropsutlösare, i den ordning de definieras i manifestet. -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: +Dessa ordningsregler kan komma att ändras. -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +### Hämta ABI: erna -## The GraphQL Schema +ABI-filerna måste matcha ditt/dina kontrakt. Det finns några olika sätt att få ABI-filer: -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. +- Om du bygger ditt eget projekt har du förmodligen tillgång till dina senaste ABIs. +- Om du bygger en subgraf för ett offentligt projekt kan du ladda ner det projektet till din dator och få ABI:n genom att använda [`truffle compile`](https://truffleframework.com/docs/truffle/overview) eller använda solc för att kompilera. +- Du kan också hitta ABI:n på [Etherscan](https://etherscan.io/), men detta är inte alltid pålitligt, eftersom ABI:n som laddas upp där kan vara föråldrad. Se till att du har rätt ABI, annars kommer din subgraf att misslyckas när den körs. -## Defining Entities +## GraphQL-schemat -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. +Schemat för din subgraf finns i filen `schema.graphql`. GraphQL-scheman definieras med hjälp av gränssnittsdefinitionsspråket för GraphQL. Om du aldrig har skrivit ett GraphQL-schema rekommenderas det att du kollar in denna introduktion till GraphQL-typsystemet. Referensdokumentation för GraphQL-scheman finns i avsnittet [GraphQL API](/querying/graphql-api). -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. +## Definition av entiteter -### Good Example +Innan du definierar entiteter är det viktigt att ta ett steg tillbaka och tänka på hur din data är strukturerad och länkad. Alla frågor kommer att göras mot datamodellen som definieras i subgrafens schema och de entiteter som indexerats av subgraf. Därför är det bra att definiera subgrafens schema på ett sätt som matchar din dapp's behov. Det kan vara användbart att tänka på entiteter som "objekt som innehåller data", snarare än som händelser eller funktioner. -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. +Med The Graph definierar du helt enkelt entitetstyper i `schema.graphql`, och Graph Node kommer att generera toppnivåfält för att fråga enskilda instanser och samlingar av den entitetstypen. Varje typ som ska vara en entitet måste vara annoterad med en `@entity`-direktiv. Som standard är entiteter muterbara, vilket innebär att mappningar kan ladda befintliga entiteter, ändra dem och lagra en ny version av den entiteten. Mutabilitet har ett pris, och för entitetstyper där det är känt att de aldrig kommer att ändras, till exempel eftersom de helt enkelt innehåller data som extraherats ordagrant från kedjan, rekommenderas att markera dem som omutbara med `@entity(immutable: true)`. Mappningar kan göra ändringar i omutbara entiteter så länge dessa ändringar sker i samma block som entiteten skapades. Omutebara entiteter är mycket snabbare att skriva och att fråga, och bör därför användas när det är möjligt. + +### Bra exempel + +Entiteten `Gravatar` nedan är strukturerad kring ett Gravatar-objekt och är ett bra exempel på hur en entitet kan definieras. ```graphql type Gravatar @entity(immutable: true) { @@ -198,9 +210,9 @@ type Gravatar @entity(immutable: true) { } ``` -### Bad Example +### Dåligt exempel -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. +Exemplen `GravatarAccepted` och `GravatarDeclined` nedan är baserade på händelser. Det rekommenderas inte att mappa händelser eller funktionsanrop till entiteter 1:1. ```graphql type GravatarAccepted @entity { @@ -218,36 +230,37 @@ type GravatarDeclined @entity { } ``` -### Optional and Required Fields +### Valfria och obligatoriska fält -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: +Entitetsfält kan definieras som obligatoriska eller valfria. Obligatoriska fält anges med `!` i schemat. Om ett obligatoriskt fält inte har angetts i mappningen får du det här felmeddelandet när du frågar efter fältet: ``` Null value resolved for non-null field 'name' ``` -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. +Varje entitet måste ha ett `id`-fält, som måste vara av typen `Bytes!` eller `String!`. Det rekommenderas generellt att använda `Bytes!`, om inte `id` innehåller läsbar text, eftersom entiteter med `Bytes!`-id kommer att vara snabbare att skriva och fråga än de med ett `String!` `id`. `id`-fältet fungerar som primärnyckel och måste vara unikt bland alla entiteter av samma typ. Av historiska skäl accepteras också typen `ID!` och är en synonym för `String!`. -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. +För vissa entitetstyper konstrueras `id` från id:erna hos två andra entiteter; det är möjligt med `concat`, t.ex. `let id = left.id.concat(right.id)` för att bilda id från id:erna hos `left` och `right`. På liknande sätt kan för att konstruera ett id från id:et hos en befintlig entitet och en räknare `count` användas `let id = left.id.concatI32(count)`. Konkatineringen garanterar att producera unika id:er så länge längden av `left` är densamma för alla sådana entiteter, till exempel eftersom `left.id` är en `Address`. -### Built-In Scalar Types +### Inbyggda Skalartyper -#### GraphQL Supported Scalars +#### GraphQL-Stödda Skalartyper -We support the following scalars in our GraphQL API: +Vi stödjer följande skalartyper i vår GraphQL API: -| Type | Description | +| Typ | Beskrivning | | --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Bytes` | Bytematris, representerad som en hexadecimal sträng. Vanligt används för Ethereum-hashar och adresser. | +| `String` | Skalär för `string`-värden. Nolltecken stöds inte och tas automatiskt bort. | +| `Boolean` | Skalär för `boolean`-värden. | +| `Int` | Enligt GraphQL specifikationen har `Int` en storlek på 32 byte. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Stora heltal. Används för Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` typer. Observera: Allt under `uint32`, som `int32`, `uint24` eller `int8` representeras som `i32`. | +| `BigDecimal` | `BigDecimal` Högprecisionsdecimaler representerade som en signifikant och en exponent. Exponentområdet är från −6143 till +6144. Avrundat till 34 signifikanta siffror. | #### Enums -You can also create enums within a schema. Enums have the following syntax: +Du kan också skapa enums inom ett schema. Enums har följande syntax: ```graphql enum TokenStatus { @@ -257,19 +270,19 @@ enum TokenStatus { } ``` -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: +När enumet är definierat i schemat kan du använda enumvärdenas strängrepresentation för att ställa in ett enumfält på en entitet. Till exempel kan du ställa in `tokenStatus` till `SecondOwner` genom att först definiera din entitet och sedan ställa in fältet med `entity.tokenStatus = "SecondOwner"`. Exemplet nedan visar hur Token-entiteten skulle se ut med ett enumfält: -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). +Mer detaljer om att skriva enums finns i [GraphQL-dokumentationen](https://graphql.org/learn/schema/). -#### Entity Relationships +#### Entitetsrelationer -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. +En entitet kan ha en relation till en eller flera andra entiteter i ditt schema. Dessa relationer kan traverseras i dina frågor. Relationer i The Graph är enriktade. Det är möjligt att simulera dubbelriktade relationer genom att definiera en enriktad relation på antingen den ena "änden" av relationen. -Relationships are defined on entities just like any other field except that the type specified is that of another entity. +Relationer definieras på entiteter precis som vilket annat fält som helst, förutom att den specificerade typen är en annan entitet. -#### One-To-One Relationships +#### En-till-en-relationer -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: +Definiera en entitetstyp `Transaction` med en valfri en-till-en-relation till en entitetstyp `TransactionReceipt`: ```graphql type Transaction @entity(immutable: true) { @@ -283,9 +296,9 @@ type TransactionReceipt @entity(immutable: true) { } ``` -#### One-To-Many Relationships +#### En-till-många-relationer -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: +Definiera en entitetstyp `TokenBalance` med ett obligatoriskt en-till-many förhållande med en entitetstyp Token: ```graphql type Token @entity(immutable: true) { @@ -299,15 +312,15 @@ type TokenBalance @entity { } ``` -#### Reverse Lookups +#### Omvända sökningar -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. +Omvända sökningar kan definieras på en entitet genom fältet `@derivedFrom`. Det skapar ett virtuellt fält på entiteten som kan frågas, men som inte kan ställas in manuellt via mappings API. Istället härleds det från den relation som är definierad på den andra entiteten. För sådana relationer är det sällan meningsfullt att lagra båda sidor av relationen, och både indexering och frågeprestanda blir bättre när bara en sida lagras och den andra härleds. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +För en-till-många-relationer bör relationen alltid lagras på 'en'-sidan, och 'många'-sidan bör alltid härledas. Att lagra relationen på detta sätt, istället för att lagra en array av entiteter på 'många'-sidan, kommer att resultera i dramatiskt bättre prestanda både för indexering och för frågning av subgraphen. Generellt sett bör lagring av arrayer av entiteter undvikas så mycket som är praktiskt möjligt. -#### Example +#### Exempel -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: +Vi kan göra balanserna för en token åtkomliga från token genom att härleda ett fält `tokenBalances`: ```graphql type Token @entity(immutable: true) { @@ -322,13 +335,13 @@ type TokenBalance @entity { } ``` -#### Many-To-Many Relationships +#### Många-till-många-relationer -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. +För många-till-många-relationer, som till exempel användare som var och en kan tillhöra ett antal organisationer, är det mest raka, men generellt sett inte den mest prestanda-optimerade, sättet att modellera relationen som en array i vardera av de två entiteter som är involverade. Om relationen är symmetrisk behöver bara ena sidan av relationen lagras och den andra sidan kan härledas. -#### Example +#### Exempel -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. +Definiera en omvänd sökning från en entitet av typen `Användare` till en entitet av typen `Organisation`. I exemplet nedan uppnås detta genom att söka upp attributet `medlemmar` inom entiteten `Organisation`. I frågor kommer fältet `organisationer` på `Användare` att lösas genom att hitta alla `Organisations`-entiteter som inkluderar användarens ID. ```graphql type Organization @entity { @@ -344,7 +357,7 @@ type User @entity { } ``` -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like +Ett mer effektivt sätt att lagra denna relation är genom en mappningstabell som har en post för varje `User` / `Organization`-par med ett schema som ```graphql type Organization @entity { @@ -366,7 +379,7 @@ type UserOrganization @entity { } ``` -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: +Detta tillvägagångssätt kräver att frågorna går ner till ytterligare en nivå för att hämta t. ex. organisationer för användare: ```graphql query usersWithOrganizations { @@ -381,11 +394,11 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +Detta mer avancerade sätt att lagra många-till-många-relationer kommer att leda till att mindre data lagras för subgrafen, och därför till en subgraf som ofta är dramatiskt snabbare att indexera och att fråga. -#### Adding comments to the schema +#### Lägga till kommentarer i schemat -As per GraphQL spec, comments can be added above schema entity attributes using double quotations `""`. This is illustrated in the example below: +Enligt GraphQL-specifikationen kan kommentarer läggas till ovanför entitetsattribut i schemat med hjälp av dubbla citattecken `""`. Detta illustreras i exemplet nedan: ```graphql type MyFirstEntity @entity { @@ -395,13 +408,13 @@ type MyFirstEntity @entity { } ``` -## Defining Fulltext Search Fields +## Definiera fält för fulltextsökning -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. +Fulltextsökningar filtrerar och rangordnar entiteter baserat på en textinmatning för sökning. Fulltextförfrågningar kan returnera träffar för liknande ord genom att bearbeta söktexten till stammar innan de jämförs med den indexerade textdata. -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. +En fulltextförfrågningsdefinition inkluderar förfrågningsnamnet, ordboken som används för att bearbeta textfälten, rangordningsalgoritmen som används för att ordna resultaten och fälten som ingår i sökningen. Varje fulltextförfrågan kan omfatta flera fält, men alla inkluderade fält måste vara från en enda entitetstyp. -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. +För att lägga till en fulltextförfrågan inkludera en typ `_Schema_` med en fulltextdirektiv i GraphQL-schemat. ```graphql type _Schema_ @@ -424,7 +437,7 @@ type Band @entity { } ``` -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. +Exempelfältet `bandSearch` kan användas i frågor för att filtrera `Band`-entiteter baserat på textdokumenten i fälten `name`, `description` och `bio`. Gå till [GraphQL API - Frågor](/querying/graphql-api#queries) för en beskrivning av API:et för fulltextsökning och fler exempel på användning. ```graphql query { @@ -437,100 +450,100 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Funktionshantering](#experimental-features):** Från `specVersion` `0.0.4` och framåt måste `fullTextSearch` deklareras under avsnittet `features` i subgraph-manifestet. -### Languages supported +### Stödda språk -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". +Att välja ett annat språk kommer att ha en definitiv, om än ibland subtil, effekt på fulltext-sök-API:en. Fält som omfattas av en fulltextförfrågningsfunktion granskas i kontexten av det valda språket, så lexem som produceras av analys och sökfrågor varierar från språk till språk. Till exempel: när det används det stödda turkiska ordboken "token" så avstamsas det till "toke", medan engelska ordboken självklart avstammar det till "token". -Supported language dictionaries: +Stödda språkordböcker: -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Kod | Ordbok | +| ----- | ------------ | +| enkel | Allmän | +| da | Danska | +| nl | Holländska | +| en | Engelska | +| fi | Finska | +| fr | Franska | +| de | Tyska | +| hu | Ungerska | +| it | Italienska | +| no | Norska | +| pt | Portugisiska | +| ro | Rumänska | +| ru | Ryska | +| es | Spanska | +| sv | Svenska | +| tr | Turkiska | -### Ranking Algorithms +### Rankningsalgoritmer -Supported algorithms for ordering results: +Stödda algoritmer för att ordna resultat: -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algoritm | Beskrivning | +| ------------- | ---------------------------------------------------------------------------------- | +| rank | Använd matchningskvaliteten (0-1) från fulltextförfrågan för att ordna resultaten. | +| proximityRank | Liknande rank, men inkluderar också närheten av träffarna. | -## Writing Mappings +## Skriv Mappningar -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. +Mappningar tar data från en specifik källa och omvandlar den till entiteter som är definierade i din schema. Mappningar skrivs i en delmängd av [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) som kallas [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) som kan kompileras till WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript är strängare än vanlig TypeScript, men erbjuder en bekant syntax. -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. +För varje händelsehanterare som är definierad i `subgraph.yaml` under `mapping.eventHandlers`, skapa en exporterad funktion med samma namn. Varje hanterare måste acceptera en enda parameter med namnet `event` med en typ som motsvarar namnet på händelsen som hanteras. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +I det här exempelsubgraphet innehåller `src/mapping.ts` hanterare för händelserna `NewGravatar` och `UpdatedGravatar`: ```javascript -import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' -import { Gravatar } from '../generated/schema' +import { NewGravatar, UpdatedGravatar } from "../generated/Gravity/Gravity"; +import { Gravatar } from "../generated/schema"; export function handleNewGravatar(event: NewGravatar): void { - let gravatar = new Gravatar(event.params.id) - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() + let gravatar = new Gravatar(event.params.id); + gravatar.owner = event.params.owner; + gravatar.displayName = event.params.displayName; + gravatar.imageUrl = event.params.imageUrl; + gravatar.save(); } export function handleUpdatedGravatar(event: UpdatedGravatar): void { - let id = event.params.id - let gravatar = Gravatar.load(id) + let id = event.params.id; + let gravatar = Gravatar.load(id); if (gravatar == null) { - gravatar = new Gravatar(id) + gravatar = new Gravatar(id); } - gravatar.owner = event.params.owner - gravatar.displayName = event.params.displayName - gravatar.imageUrl = event.params.imageUrl - gravatar.save() + gravatar.owner = event.params.owner; + gravatar.displayName = event.params.displayName; + gravatar.imageUrl = event.params.imageUrl; + gravatar.save(); } ``` -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. +Den första hanteraren tar en `NewGravatar`-händelse och skapar en ny `Gravatar`-entitet med `new Gravatar(event.params.id.toHex())`, fyller i entitetsfälten med hjälp av motsvarande händelseparametrar. Denna entitetsinstans representeras av variabeln `gravatar`, med ett id-värde av `event.params.id.toHex()`. -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. +Den andra hanteraren försöker ladda den befintliga `Gravatar` från Graph Node-lagringen. Om den inte finns ännu skapas den på begäran. Entiteten uppdateras sedan för att matcha de nya händelseparametrarna innan den sparas tillbaka till lagringen med `gravatar.save()`. -### Recommended IDs for Creating New Entities +### Rekommenderade ID:n för att skapa nya entiteter -Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. +Varje entitet måste ha ett `id` som är unikt bland alla entiteter av samma typ. En entitets `id`-värde sätts när entiteten skapas. Nedan finns några rekommenderade `id`-värden att överväga när du skapar nya entiteter. OBS: Värdet på `id` måste vara en `string`. - `event.params.id.toHex()` - `event.transaction.from.toHex()` - `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` -We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. +Vi tillhandahåller [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) som innehåller verktyg för att interagera med Graph Node-lagringen och bekvämligheter för att hantera smart kontraktsdata och entiteter. Du kan använda denna bibliotek i dina mappningar genom att importera `@graphprotocol/graph-ts` i `mapping.ts`. -## Code Generation +## Kodgenerering -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +För att göra det enkelt och typsäkert att arbeta med smarta kontrakt, händelser och entiteter kan Graph CLI generera AssemblyScript-typer från subgrafens GraphQL-schema och kontrakts-ABIn som ingår i datakällorna. -This is done with +Detta görs med ```sh graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +men i de flesta fall är undergrafer redan förkonfigurerade via `package.json` så att du helt enkelt kan köra en av följande för att uppnå samma sak: ```sh # Yarn @@ -540,7 +553,7 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +Detta genererar en AssemblyScript-klass för varje smart kontrakt i ABI-filerna som nämns i `subgraph.yaml`, så att du kan binda dessa kontrakt till specifika adresser i mappningarna och anropa skrivskyddade kontraktsmetoder mot det block som bearbetas. Den kommer också att generera en klass för varje kontraktshändelse för att ge enkel åtkomst till händelseparametrar, samt blocket och transaktionen som händelsen härstammar från. Alla dessa typer skrivs till `//.ts`. I undergrafen i exemplet skulle detta vara `generated/Gravity/Gravity.ts`, vilket gör att mappningar kan importera dessa typer med. ```javascript import { @@ -552,25 +565,25 @@ import { } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +Utöver detta genereras en klass för varje entitetstyp i subgrafens GraphQL-schema. Dessa klasser tillhandahåller typsäker entitetsladdning, läs- och skrivåtkomst till entitetsfält samt en `save()`-metod för att skriva entiteter till lagret. Alla entitetsklasser skrivs till `/schema.ts`, vilket gör att mappningar kan importera dem med ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Observera:** Kodgenerering måste utföras igen efter varje ändring av GraphQL-schemat eller ABIn som ingår i manifestet. Det måste också utföras minst en gång innan du bygger eller distribuerar subgrafet. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Kodgenerering kontrollerar inte din mappningskod i `src/mapping.ts`. Om du vill kontrollera det innan du försöker distribuera din subgraf till Graph Explorer kan du köra `yarn build` och åtgärda eventuella syntaxfel som TypeScript-kompilatorn kan hitta. -## Data Source Templates +## Datakällmallar -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. +En vanlig mönster i EVM-kompatibla smarta kontrakt är användningen av register- eller fabrikskontrakt, där ett kontrakt skapar, hanterar eller hänvisar till ett godtyckligt antal andra kontrakt som var och en har sin egen stat och händelser. -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. +Adresserna till dessa underkontrakt kan eller kanske inte vara kända på förhand, och många av dessa kontrakt kan skapas och/eller läggas till över tid. Det är därför, i sådana fall, som det är omöjligt att definiera en enda datakälla eller ett fast antal datakällor och en mer dynamisk metod behövs: _datakällmallar_. -### Data Source for the Main Contract +### Datakälla för huvudkontraktet -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +Först definierar du en vanlig datakälla för huvudkontraktet. Snutten nedan visar ett förenklat exempel på en datakälla för [Uniswap](https://uniswap.org) utbytesfabrikskontrakt. Observera `NewExchange(address,address)` händelsehanteraren. Denna händelse emitteras när en ny utbyteskontrakt skapas på kedjan av fabrikskontraktet. ```yaml dataSources: @@ -595,9 +608,9 @@ dataSources: handler: handleNewExchange ``` -### Data Source Templates for Dynamically Created Contracts +### Datakällmallar för dynamiskt skapade kontrakt -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. +Sedan lägger du till _datakällmallar_ i manifestet. Dessa är identiska med vanliga datakällor, förutom att de saknar en fördefinierad avtalsadress under `source`. Vanligtvis definierar du en mall för varje typ av underkontrakt som hanteras eller refereras till av det överordnade kontraktet. ```yaml dataSources: @@ -631,9 +644,9 @@ templates: handler: handleRemoveLiquidity ``` -### Instantiating a Data Source Template +### Instansiering av en mall för datakälla -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. +I det sista steget uppdaterar du mappningen av huvudkontraktet för att skapa en dynamisk datakällinstans från en av mallarna. I det här exemplet ändrar du mappningen av huvudkontraktet för att importera mallen `Exchange` och anropar metoden `Exchange.create(address)` för att börja indexera det nya växlingskontraktet. ```typescript import { Exchange } from '../generated/templates' @@ -645,13 +658,13 @@ export function handleNewExchange(event: NewExchange): void { } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> ** Notera:** En ny datakälla bearbetar endast anrop och händelser för det block där den skapades och alla efterföljande block, men bearbetar inte historiska data, dvs. data som finns i tidigare block. > -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. +> Om tidigare block innehåller data som är relevanta för den nya datakällan, är det bäst att indexera dessa data genom att läsa kontraktets aktuella status och skapa enheter som representerar denna status vid den tidpunkt då den nya datakällan skapas. -### Data Source Context +### Kontext för datakälla -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: +Datakällans kontext gör det möjligt att skicka extra konfiguration när en mall instansieras. I vårt exempel kan vi säga att börser är associerade med ett visst handelspar, vilket ingår i händelsen `NewExchange`. Den informationen kan skickas till den instansierade datakällan, så här: ```typescript import { Exchange } from '../generated/templates' @@ -663,7 +676,7 @@ export function handleNewExchange(event: NewExchange): void { } ``` -Inside a mapping of the `Exchange` template, the context can then be accessed: +Inuti en mappning av mallen `Exchange` kan kontexten sedan nås: ```typescript import { dataSource } from '@graphprotocol/graph-ts' @@ -672,11 +685,11 @@ let context = dataSource.context() let tradingPair = context.getString('tradingPair') ``` -There are setters and getters like `setString` and `getString` for all value types. +Det finns sättare och hämtare som `setString` och `getString` för alla värdestyper. -## Start Blocks +## Startblock -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +`startBlock` är en valfri inställning som låter dig definiera från vilken block i kedjan datakällan ska börja indexera. Genom att ställa in startblocket kan datakällan hoppa över potentiellt miljontals block som är irrelevanta. Vanligtvis kommer en subgrafutvecklare att ställa in `startBlock` till blocket där datakällans smarta kontrakt skapades. ```yaml dataSources: @@ -702,23 +715,23 @@ dataSources: handler: handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Observera:** Blocket där kontraktet skapades kan snabbt sökas upp på Etherscan: > -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. +> 1. Sök efter kontraktet genom att ange dess adress i sökfältet. +> 2. Klicka på transaktionshashen för skapandet i avsnittet `Kontraktsskapare`. +> 3. Ladda sidan med transaktionsdetaljer där du hittar startblocket för det kontraktet. -## Call Handlers +## Anropsbehandlare -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +Medan händelser ger ett effektivt sätt att samla in relevanta ändringar av ett kontrakts tillstånd, undviker många kontrakt att generera loggar för att optimera gasavgifterna. I dessa fall kan en subgraf prenumerera på anrop som görs till datakällans kontrakt. Detta uppnås genom att definiera anropsbehandlare som refererar till funktions signaturen och hanteraren som kommer att bearbeta anrop till denna funktion. För att bearbeta dessa anrop kommer hanteraren att ta emot ett `ethereum.Call` som ett argument med de typade in- och utdata från anropet. Anrop som görs på vilken djupnivå som helst i en transaktions anropskedja kommer att utlösa kartläggningen, vilket gör det möjligt att fånga aktivitet med datakällan genom proxykontrakt. -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. +Anropsbehandlare utlöses endast i ett av två fall: när den specificerade funktionen anropas av ett konto som inte är kontraktet självt eller när den är markerad som extern i Solidity och anropas som en del av en annan funktion i samma kontrakt. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Observera:** Anropsbehandlare är för närvarande beroende av Paritys spårnings-API. Vissa nätverk, som BNB-kedjan och Arbitrum, stöder inte denna API. Om en subgraf som indexerar ett av dessa nätverk innehåller en eller flera anropsbehandlare kommer den inte att börja synkroniseras. Subgrafutvecklare bör istället använda händelsehanterare. Dessa är mycket mer prestandaoptimerade än anropsbehandlare och stöds på alla evm-nätverk. -### Defining a Call Handler +### Definiera en Anropsbehandlare -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. +För att definiera en anropsbehandlare i din manifest, lägg helt enkelt till en `callHandlers`-array under den datakälla du vill prenumerera på. ```yaml dataSources: @@ -743,11 +756,11 @@ dataSources: handler: handleCreateGravatar ``` -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. +`function` är den normaliserade funktions signaturen för att filtrera anrop efter. Egenskapen `handler` är namnet på funktionen i din kartläggning som du vill utföra när målfunktionen anropas i datakällans kontrakt. -### Mapping Function +### Kartläggningsfunktion -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Varje anropsbehandlare tar en enda parameter med en typ som motsvarar namnet på den kallade funktionen. I det ovanstående exempelsubgrafet innehåller kartläggningen en hanterare för när funktionen `createGravatar` anropas och tar emot en `CreateGravatarCall`-parameter som ett argument: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -762,24 +775,26 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { } ``` -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. +Funktionen `handleCreateGravatar` tar emot ett nytt `CreateGravatarCall`, som är en underklass av `ethereum.Call`, tillhandahållen av `@graphprotocol/graph-ts`, som inkluderar de typade in- och utmatningarna från anropet. Typen `CreateGravatarCall` genereras för dig när du kör `graph codegen`. -## Block Handlers +## Blockbehandlare -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +Förutom att prenumerera på kontrakts händelser eller funktionsanrop kan en subgraf vilja uppdatera sina data när nya block läggs till i kedjan. För att uppnå detta kan en subgraf köra en funktion efter varje block eller efter block som matchar en fördefinierad filter. -### Supported Filters +### Stödda filter + +#### Anropsfilter ```yaml filter: kind: call ``` -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ +_Den definierade hanteraren kommer att anropas en gång för varje block som innehåller ett anrop till det kontrakt (datakälla) som hanteraren är definierad under._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Observera:** `call`-filtret är för närvarande beroende av Parity-tracing-API: et. Vissa nätverk, som BNB-kedjan och Arbitrum, stöder inte detta API. Om en subgraf som indexerar ett av dessa nätverk innehåller en eller flera blockhanterare med ett `call`-filter, kommer den inte att börja synkronisera. -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. +Avsaknaden av ett filter för en blockhanterare kommer att säkerställa att hanteraren kallas för varje block. En datakälla kan endast innehålla en blockhanterare för varje filttyp. ```yaml dataSources: @@ -806,9 +821,48 @@ dataSources: kind: call ``` -### Mapping Function +#### Undersökningsfilter + +> **Requires `specVersion` >= 0.0.8** + +> **Observera:** Undersökningsfilter är endast tillgängliga på datakällor av typen `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +Den definierade hanteraren kommer att kallas en gång för varje `n` block, där `n` är värdet som anges i fältet `every`. Denna konfiguration möjliggör för delgrafer att utföra specifika operationer med regelbundna blockintervall. + +#### En Gång Filter + +> **Requires `specVersion` >= 0.0.8** -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +> **Observera:** En gång-filtrar är endast tillgängliga på datakällor av typen `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +Den definierade hanteraren med filtret once kommer att anropas endast en gång innan alla andra hanterare körs. Denna konfiguration gör det möjligt för subgrafen att använda hanteraren som en initialiseringshanterare, som utför specifika uppgifter i början av indexeringen. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Kartläggningsfunktion + +Mappningsfunktionen tar emot ett `ethereum.Block` som sitt enda argument. Liksom mappningsfunktioner för händelser kan denna funktion komma åt befintliga subgrafiska enheter i lagret, anropa smarta kontrakt och skapa eller uppdatera enheter. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -820,9 +874,9 @@ export function handleBlock(block: ethereum.Block): void { } ``` -## Anonymous Events +## Anonyma händelser -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: +Om du behöver behandla anonyma händelser i Solidity kan du göra det genom att ange händelsens ämne 0, som i exemplet: ```yaml eventHandlers: @@ -831,13 +885,13 @@ eventHandlers: handler: handleGive ``` -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. +En händelse utlöses endast när både signaturen och topic0 matchar. Som standard är `topic0` lika med hashtillståndet för händelsesignaturen. -## Transaction Receipts in Event Handlers +## Transaktionskvitton i Händelsehanterare -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. +Från och med `specVersion` `0.0.5` och `apiVersion` `0.0.7` kan händelsehanterare få tillgång till kvittot för den transaktion som emitterade dem. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +För att göra detta måste händelsehanterare deklareras i delgrafmanifestet med den nya nyckeln `receipt: true`, vilket är valfritt och som standard är falskt. ```yaml eventHandlers: @@ -846,20 +900,20 @@ eventHandlers: receipt: true ``` -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. +Inuti hanterarfunktionen kan kvittot nås i fältet `Event.receipt`. När nyckeln `receipt` är inställd som `false` eller utelämnad i manifestet, kommer istället ett `null`-värde att returneras. -## Experimental features +## Experimentella funktioner -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +Från och med `specVersion` `0.0.4` måste delgrafsfunktioner deklareras explicit i avsnittet `features` högst upp i manifestfilen, med deras `camelCase`-namn, som listas i tabellen nedan: -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | +| Funktion | Namn | +| -------------------------------------------------------- | ------------------------------------------------------ | +| [Icke dödliga fel](#non-fatal-errors) | `nonFatalErrors` | +| [Fulltextssökning](#defining-fulltext-search-fields) | `nonFatalErrors` | +| [Ympning](#grafting-onto-existing-subgraphs) | `grafting` | +| [IPFS på Ethereum-kontrakt](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` eller `nonDeterministicIpfs` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +Till exempel, om en delgraf använder funktionerna **Fulltextssökning** och **Icke dödliga fel**, ska fältet `features` i manifestet vara: ```yaml specVersion: 0.0.4 @@ -870,27 +924,27 @@ features: dataSources: ... ``` -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +Observera att att använda en funktion utan att deklarera den kommer att resultera i en **valideringsfel** under delgrafens distribution, men inga fel uppstår om en funktion deklareras men inte används. -### IPFS on Ethereum Contracts +### IPFS på Ethereum-kontrakt -A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. +En vanlig användning av att kombinera IPFS med Ethereum är att lagra data på IPFS som skulle vara för dyra att underhålla on-chain och referera till IPFS-hashen i Ethereum-kontrakt. -Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. +Med sådana IPFS-hashar kan delgrafer läsa de motsvarande filerna från IPFS med hjälp av `ipfs.cat` och `ipfs.map`. För att göra detta på ett pålitligt sätt krävs det att dessa filer är fastnålade till en IPFS-nod med hög tillgänglighet, så att [den värdtjänst](https://thegraph.com/hosted-service) som använder IPFS-noden kan hitta dem under indexeringen. -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Observera:** The Graph Nätverk stöder ännu inte `ipfs.cat` och `ipfs.map`, och utvecklare bör inte distribuera delgrafer med den funktionaliteten till nätverket via Studio. -> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. +> **[Funktionshantering](#experimental-features):** `ipfsOnEthereumContracts` måste deklareras under `features` i delgrafens manifest. För icke-EVM-kedjor kan aliaset `nonDeterministicIpfs` också användas för samma ändamål. -When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. +När du kör en lokal Graph Node måste miljövariabeln `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` sättas för att indexera delgrafer med denna experimentella funktionalitet. -### Non-fatal errors +### Icke dödliga fel -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Indexeringsfel på redan synkroniserade delgrafer kommer, som standard, att få delgrafen att misslyckas och sluta synkronisera. Delgrafer kan istället konfigureras för att fortsätta synkroniseringen i närvaro av fel, genom att ignorera ändringarna som orsakades av hanteraren som provocerade felet. Det ger delgrafsförfattare tid att korrigera sina delgrafer medan förfrågningar fortsätter att behandlas mot det senaste blocket, även om resultaten kan vara inkonsekventa på grund av felet som orsakade felet. Observera att vissa fel alltid är dödliga. För att vara icke-dödliga måste felet vara känt för att vara deterministiskt. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Observera:** The Graph Nätverk stöder ännu inte icke-dödliga fel, och utvecklare bör inte distribuera delgrafer med den funktionaliteten till nätverket via Studio. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Aktivering av icke-dödliga fel kräver att följande funktionsflagga sätts i delgrafens manifest: ```yaml specVersion: 0.0.4 @@ -900,7 +954,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +Frågan måste också välja att fråga efter data med potentiella inkonsekvenser genom argumentet `subgraphError`. Det rekommenderas också att fråga `_meta` för att kontrollera om subgrafen har hoppat över fel, som i exemplet: ```graphql foos(first: 100, subgraphError: allow) { @@ -912,7 +966,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +Om subgrafen stöter på ett fel returnerar frågan både data och ett graphql-fel med meddelandet `"indexing_error"`, som i detta exempelsvar: ```graphql "data": { @@ -932,11 +986,13 @@ If the subgraph encounters an error, that query will return both the data and a ] ``` -### Grafting onto Existing Subgraphs +### Ympning på befintliga delgrafer -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +> **Observera:** Det rekommenderas inte att använda ympning vid initial uppgradering till The Graph Nätverk. Läs mer [här](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +När en delgraf först distribueras börjar den indexera händelser från genesisblocket på den motsvarande kedjan (eller på `startBlock` som är definierat för varje datakälla). I vissa situationer kan det vara fördelaktigt att återanvända data från en befintlig delgraf och börja indexera vid en mycket senare block. Denna indexeringsläge kallas _Ympning_. Ympning är exempelvis användbart under utvecklingen för att snabbt komma förbi enkla fel i mappningarna eller tillfälligt få en befintlig delgraf att fungera igen efter att den har misslyckats. + +En delgraf ympas på en grunddelgraf när delgrafmanifestet i `subgraph.yaml` innehåller en `graft`-block högst upp: ```yaml description: ... @@ -945,49 +1001,49 @@ graft: block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +När en delgraf vars manifest innehåller en `graft`-sektion distribueras kommer Graph Node att kopiera data från den `base` delgrafen upp till och inklusive det angivna `block` och sedan fortsätta indexera den nya delgrafen från det blocket. Basdelgrafen måste finnas på målnoden Graph Node och måste ha indexerats upp till minst det angivna blocket. På grund av denna begränsning bör ympning endast användas under utveckling eller i en nödsituation för att snabba upp produktionen av en motsvarande icke-ympad delgraf. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Eftersom ympning kopierar data istället för att indexera basdata går det mycket snabbare att få delgrafen till det önskade blocket än att indexera från början, även om den initiala datorkopieringen fortfarande kan ta flera timmar för mycket stora delgrafer. Medan den ympade delgrafen initialiseras kommer Graph Node att logga information om de entitetstyper som redan har kopierats. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Den ympade delgrafen kan använda ett GraphQL-schema som inte är identiskt med basdelgrafens, men bara kompatibelt med den. Det måste vara ett giltigt delgrafschema i sig själv, men kan avvika från basdelgrafens schema på följande sätt: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Den lägger till eller tar bort entitetstyper +- Den tar bort attribut från entitetstyper +- Den lägger till nollställbara attribut till entitetstyper +- Den gör icke-nollställbara attribut till nollställbara attribut +- Den lägger till värden till enum +- Den lägger till eller tar bort gränssnitt +- Den ändrar vilka entitetstyper som ett gränssnitt är implementerat för -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Funktionshantering](#experimental-features):** `grafting` måste deklareras under `features` i delgrafens manifest. -## File Data Sources +## Fildatakällor -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +Filbaserade datakällor är en ny delgrafsfunktion för att få tillgång till data utanför kedjan under indexering på ett robust, utökat sätt. Filbaserade datakällor stödjer hämtning av filer från IPFS och från Arweave. -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. +> Detta lägger också grunden för deterministisk indexering av data utanför kedjan, samt möjligheten att introducera godtycklig data som hämtas via HTTP. -### Overview +### Översikt -Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. +Istället för att hämta filer "i linje" under hanterarens exekvering introducerar detta mallar som kan skapas som nya datakällor för en given filidentifikator. Dessa nya datakällor hämtar filerna och försöker igen om de inte lyckas, och kör en dedikerad hanterare när filen hittas. -This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. +Detta liknar [befintliga datorkällmalar](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), som används för att dynamiskt skapa nya kedjebaserade datakällor. -> This replaces the existing `ipfs.cat` API +> Detta ersätter den befintliga `ipfs.cat` API -### Upgrade guide +### Uppgraderingsguide -#### Update `graph-ts` and `graph-cli` +#### Uppdatera `graph-ts` och `graph-cli` -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +Filbaserade datakällor kräver graph-ts >=0.29.0 och graph-cli >=0.33.1 -#### Add a new entity type which will be updated when files are found +#### Lägg till en ny entitetstyp som kommer att uppdateras när filer hittas -File data sources cannot access or update chain-based entities, but must update file specific entities. +Filbaserade datakällor kan inte komma åt eller uppdatera kedjebaserade entiteter, utan måste uppdatera filspecifika entiteter. -This may mean splitting out fields from existing entities into separate entities, linked together. +Detta kan innebära att fält från befintliga entiteter separeras i separata entiteter som är kopplade ihop. -Original combined entity: +Ursprunglig kombinerad entitet: ```graphql type Token @entity { @@ -1005,7 +1061,7 @@ type Token @entity { } ``` -New, split entity: +Ny, delad enhet: ```graphql type Token @entity { @@ -1026,13 +1082,13 @@ type TokenMetadata @entity { } ``` -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! +Om relationen är 1:1 mellan föräldraentiteten och den resulterande filbaserade datakälla entiteten är det enklaste mönstret att länka föräldraentiteten till en resulterande filbaserad entitet genom att använda IPFS CID som söknyckel. Kontakta oss på Discord om du har svårt att modellera dina nya filbaserade entiteter! -> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> Du kan använda [inbäddade filter](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) för att filtrera föräldraentiteter baserat på dessa inbäddade entiteter. -#### Add a new templated data source with `kind: file/ipfs` +#### Lägg till en ny mallbaserad datakälla med `kind: file/ipfs` eller `kind: file/arweave` -This is the data source which will be spawned when a file of interest is identified. +Detta är datakällan som skapas när en intressant fil identifieras. ```yaml templates: @@ -1050,21 +1106,21 @@ templates: file: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> För närvarande krävs `abis`, även om det inte är möjligt att anropa kontrakt från filbaserade datakällor -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. +Filbaserade datakällor måste specifikt nämna alla entitetstyper som de kommer att interagera med under `entities`. Se [begränsningar](#Begränsningar) för mer information. -#### Create a new handler to process files +#### Skapa en ny hanterare för att bearbeta filer -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). +Denna hanterare bör acceptera en `Bytes`-parameter, som kommer att vara innehållet i filen när den hittas, och som sedan kan bearbetas. Detta är ofta en JSON-fil, som kan bearbetas med hjälp av `graph-ts` hjälpfunktioner ([dokumentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). -The CID of the file as a readable string can be accessed via the `dataSource` as follows: +CID för filen som en läsbar sträng kan nås via `dataSource` enligt följande: ```typescript const cid = dataSource.stringParam() ``` -Example handler: +Exempel på hanterare: ```typescript import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' @@ -1091,22 +1147,24 @@ export function handleMetadata(content: Bytes): void { } ``` -#### Spawn file data sources when required +#### Skapa filbaserade datakällor vid behov + +Nu kan du skapa filbaserade datakällor under utförandet av kedjebaserade hanterare: -You can now create file data sources during execution of chain-based handlers: +- Importera mallen från den automatiskt genererade `templates` +- anropa `TemplateName.create(cid: string)` från en mappning, där cid är en giltig innehållsidentifierare för IPFS eller Arweave -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +För IPFS stöder Graph Node [v0 och v1 innehållsidentifierare](https://docs.ipfs.tech/concepts/content-addressing/), och innehållsidentifierare med kataloger (t.ex. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +För Arweave, från och med version 0.33.0 kan Graph Node hämta filer som är lagrade på Arweave baserat på deras [transaktions-ID](https://docs.arweave.org/developers/server/http-api#transactions) från en Arweave-gateway ([exempelfil](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave stöder transaktioner som laddats upp via Bundlr, och Graph Node kan också hämta filer baserat på [Bundlr-manifest](https://docs.bundlr.network/learn/gateways#indexing). -Example: +Exempel: ```typescript import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//Denna exempelkod är för en undergraf för kryptosamverkan. Ovanstående ipfs-hash är en katalog med tokenmetadata för alla kryptosamverkande NFT:er. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -1116,7 +1174,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + //Detta skapar en sökväg till metadata för en enskild Crypto coven NFT. Den konkaterar katalogen med "/" + filnamn + ".json" token.ipfsURI = tokenIpfsHash @@ -1129,50 +1187,50 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +Detta kommer att skapa en ny filbaserad datakälla som kommer att övervaka Graph Nodes konfigurerade IPFS- eller Arweave-slutpunkt och försöka igen om den inte hittas. När filen hittas kommer filbaserad datakälla hanteraren att köras. -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. +I det här exemplet används CID som koppling mellan förälderentiteten `Token` och den resulterande entiteten `TokenMetadata`. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Tidigare är detta det punkt där en delgrafutvecklare skulle ha anropat `ipfs.cat(CID)` för att hämta filen -Congratulations, you are using file data sources! +Grattis, du använder filbaserade datakällor! -#### Deploying your subgraphs +#### Distribuera dina delgrafer -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +Du kan nu `bygga` och `distribuera` dina delgrafer till en Graph Node >=v0.30.0-rc.0. -#### Limitations +#### Begränsningar -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +Filbaserade datakällahanterare och entiteter är isolerade från andra delgrafentiteter, vilket säkerställer att de är deterministiska när de körs och att ingen förorening av kedjebaserade datakällor sker. För att vara specifik: -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers +- Entiteter skapade av Filbaserade datakällor är oföränderliga och kan inte uppdateras +- Filbaserade datakällahanterare kan inte komma åt entiteter från andra filbaserade datakällor +- Entiteter associerade med filbaserade datakällor kan inte nås av kedjebaserade hanterare -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> Även om denna begränsning inte bör vara problematisk för de flesta användningsfall kan den införa komplexitet för vissa. Var god kontakta oss via Discord om du har problem med att modellera din data baserad på fil i en delgraf! -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. +Dessutom är det inte möjligt att skapa datakällor från en filbaserad datakälla, vare sig det är en datakälla på kedjan eller en annan filbaserad datakälla. Denna begränsning kan komma att hävas i framtiden. -#### Best practices +#### Bästa praxis -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. +Om du länkar NFT-metadata till motsvarande token, använd metadata IPFS-hash för att referera till en Metadata-entitet från Token-entiteten. Spara Metadata-entiteten med IPFS-hash som ID. -You can use [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. +Du kan använda [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) när du skapar Filbaserade datakällor för att skicka extra information som kommer att vara tillgänglig för Filbaserade datakälla hanteraren. -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. +Om du har entiteter som uppdateras flera gånger, skapa unika filbaserade entiteter med IPFS-hash & entitets-ID, och referera till dem med hjälp av ett härlett fält i kedjebaserade entiteten. -> We are working to improve the above recommendation, so queries only return the "most recent" version +> Vi arbetar med att förbättra rekommendationen ovan så att förfrågningar endast returnerar den "senaste" versionen -#### Known issues +#### Kända problem -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. +Filbaserade datakällor kräver för närvarande ABIs, även om ABIs inte används ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Ett arbetsområde är att lägga till en ABI. -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. +Hanterare för Filbaserade datakällor kan inte finnas i filer som importerar `eth_call` kontraktsbindningar, vilket misslyckas med "okänd import: `ethereum::ethereum.call` har inte definierats" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Ett arbetsområde är att skapa filbaserade datakällahanterare i en dedikerad fil. -#### Examples +#### Exempel -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) +[Crypto Coven Migration av undergrafer](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) -#### References +#### Referenser -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) +[GIP Filbaserade datakällor](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/sv/developing/developer-faqs.mdx b/website/pages/sv/developing/developer-faqs.mdx index 0b925a79dce2..85dc3131826b 100644 --- a/website/pages/sv/developing/developer-faqs.mdx +++ b/website/pages/sv/developing/developer-faqs.mdx @@ -1,74 +1,74 @@ --- -title: Developer FAQs +title: Vanliga frågor för utvecklare --- -## 1. What is a subgraph? +## 1. Vad är en subgraf? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using the Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available to be queried by subgraph consumers. +En subgraf är en anpassad API byggd på blockkedjedata. Subgrafer frågas med hjälp av GraphQL-frågespråket och distribueras till en Graph Node med hjälp av Graph CLI. När de har distribuerats och publicerats till The Graphs decentraliserade nätverk, bearbetar Indexers subgrafer och gör dem tillgängliga för frågekonsumenter av subgrafer. -## 2. Can I delete my subgraph? +## 2. Kan jag ta bort min subgraf? -It is not possible to delete subgraphs once they are created. +Det är inte möjligt att ta bort subgrafer efter att de har skapats. -## 3. Can I change my subgraph name? +## 3. Kan jag ändra namnet på min subgraf? -No. Once a subgraph is created, the name cannot be changed. Make sure to think of this carefully before you create your subgraph so it is easily searchable and identifiable by other dapps. +Nej. När en subgraf har skapats kan namnet inte ändras. Se till att tänka noga på detta innan du skapar din subgraf så att den är lätt sökbar och identifierbar av andra dappar. -## 4. Can I change the GitHub account associated with my subgraph? +## 4. Kan jag ändra det GitHub-konto som är kopplat till min subgraf? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Make sure to think of this carefully before you create your subgraph. +Nej. När en subgraf har skapats kan det associerade GitHub-kontot inte ändras. Tänk noggrant på detta innan du skapar din subgraf. -## 5. Am I still able to create a subgraph if my smart contracts don't have events? +## 5. Kan jag fortfarande skapa en subgraf om mina smarta kontrakt inte har händelser? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are by far the fastest way to retrieve useful data. +Det rekommenderas starkt att strukturera dina smarta kontrakt så att de har händelser som är kopplade till data du är intresserad av att fråga. Händelsehanterare i subgrafen utlöses av kontrakthändelser och är överlägset det snabbaste sättet att hämta användbar data. -If the contracts you are working with do not contain events, your subgraph can use call and block handlers to trigger indexing. Although this is not recommended, as performance will be significantly slower. +Om de kontrakt du arbetar med inte innehåller händelser kan din subgraf använda sig av uppropshanterare och blockhanterare för att utlösa indexering. Detta rekommenderas dock inte, eftersom prestandan kommer att vara betydligt långsammare. -## 6. Is it possible to deploy one subgraph with the same name for multiple networks? +## 6. Är det möjligt att distribuera en subgraf med samma namn för flera nätverk? -You will need separate names for multiple networks. While you can't have different subgraphs under the same name, there are convenient ways of having a single codebase for multiple networks. Find more on this in our documentation: [Redeploying a Subgraph](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) +Du behöver separata namn för flera nätverk. Även om du inte kan ha olika subgrafer under samma namn finns det bekväma sätt att ha en enda kodbas för flera nätverk. Läs mer om detta i vår dokumentation: [Omdistribuera en subgraf](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) -## 7. How are templates different from data sources? +## 7. Hur skiljer sig mallar från datakällor? -Templates allow you to create data sources on the fly, while your subgraph is indexing. It might be the case that your contract will spawn new contracts as people interact with it, and since you know the shape of those contracts (ABI, events, etc) upfront you can define how you want to index them in a template and when they are spawned your subgraph will create a dynamic data source by supplying the contract address. +Mallar låter dig skapa datakällor på flykt medan din subgraf indexerar. Det kan vara så att ditt kontrakt kommer att skapa nya kontrakt när människor interagerar med det, och eftersom du känner till formen av dessa kontrakt (ABI, händelser osv.) i förväg kan du definiera hur du vill indexera dem i en mall och när de skapas kommer din subgraf att skapa en dynamisk datakälla genom att tillhandahålla kontraktsadressen. -Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph#data-source-templates). +Kolla in avsnittet "Instansiera en mall för datakälla" på: [Mallar för datakällor](/developing/creating-a-subgraph#data-source-templates). -## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? +## 8. Hur ser jag till att jag använder den senaste versionen av graph-node för mina lokala distributioner? -You can run the following command: +Du kan köra följande kommando: ```sh docker pull graphprotocol/graph-node:latest ``` -**NOTE:** docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so it is important to do this to make sure you are up to date with the latest version of graph-node. +**OBS:** Docker / docker-compose kommer alltid att använda den graph-node-version som hämtades första gången du körde det, så det är viktigt att göra detta för att se till att du är uppdaterad med den senaste versionen av graph-node. -## 9. How do I call a contract function or access a public state variable from my subgraph mappings? +## 9. Hur anropar jag en kontraktsfunktion eller får åtkomst till en offentlig statisk variabel från mina subgraf-mappningar? -Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/developing/assemblyscript-api). +Ta en titt på `Åtkomst till smart kontrakts`-stat i avsnittet [AssemblyScript API](/developing/assemblyscript-api). -## 10. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another datasource in `subgraph.yaml` after running `graph init`? +## 10. Är det möjligt att ställa in en subgraf med hjälp av `graph init` från `graph-cli` med två kontrakt? Eller bör jag manuellt lägga till en annan datakälla i `subgraph.yaml` efter att jag har kört `graph init`? -Unfortunately, this is currently not possible. `graph init` is intended as a basic starting point, from which you can then add more data sources manually. +Tyvärr är detta för närvarande inte möjligt. `graph init` är avsett som en grundläggande startpunkt, från vilken du sedan manuellt kan lägga till fler datakällor. -## 11. I want to contribute or add a GitHub issue. Where can I find the open source repositories? +## 11. Jag vill bidra eller lägga till ett GitHub-ärende. Var hittar jag öppna källkodsrepository? - [graph-node](https://github.com/graphprotocol/graph-node) - [graph-cli](https://github.com/graphprotocol/graph-cli) - [graph-ts](https://github.com/graphprotocol/graph-ts) -## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? +## 12. Vad är det rekommenderade sättet att bygga "automatiskt genererade" ID för en entitet när man hanterar händelser? -If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. +Om endast en entitet skapas under händelsen och om inget bättre är tillgängligt, skulle transaktionshashen + loggindexet vara unikt. Du kan förvränga dessa genom att konvertera dem till bytes och sedan skicka dem genom `crypto.keccak256`, men detta kommer inte att göra dem mer unika. -## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? +## 13. När du lyssnar på flera kontrakt, är det möjligt att välja kontraktsordningen för att lyssna på händelser? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Inom en subgraf behandlas händelser alltid i den ordning de visas i blocken, oavsett om det är över flera kontrakt eller inte. -## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? +## 14. Är det möjligt att särskilja mellan nätverk (mainnet, Goerli, lokalt) från händelsehanterare? -Yes. You can do this by importing `graph-ts` as per the example below: +Ja, du kan göra detta genom att importera `graph-ts` enligt exemplet nedan: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -77,66 +77,62 @@ dataSource.network() dataSource.address() ``` -## 15. Do you support block and call handlers on Goerli? +## 15. Stödjer ni block- och uppropshanterare på Goerli? -Yes. Goerli supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. +Ja. Goerli stödjer blockhanterare, uppropshanterare och händelsehanterare. Det bör noteras att händelsehanterare är betydligt mer prestandaeffektiva än de andra två hanterarna, och de stöds på varje EVM-kompatibelt nätverk. -## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? +## 16. Kan jag importera ethers.js eller andra JS-bibliotek i mina subgraf-mappningar? -Not currently, as mappings are written in AssemblyScript. One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. +För närvarande inte, eftersom mappningar är skrivna i AssemblyScript. En möjlig alternativ lösning på detta är att lagra rådata i enheter och utföra logik som kräver JS-bibliotek på klienten. -## 17. Is it possible to specify what block to start indexing on? +## 17. Är det möjligt att specificera vilket block som ska börja indexera? -Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created: Start blocks +Ja. `dataSources.source.startBlock` i filen `subgraph.yaml` anger numret på det block som datakällan börjar indexera från. I de flesta fall föreslår vi att använda det block där kontraktet skapades: Start blocks -## 18. Are there some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +## 18. Finns det några tips för att öka prestandan vid indexering? Min subgraf tar väldigt lång tid att synkronisera -Yes, you should take a look at the optional start block feature to start indexing from the block that the contract was deployed: [Start blocks](/developing/creating-a-subgraph#start-blocks) +Ja, du bör titta på den valfria funktionen för startblock för att börja indexera från det block där kontraktet distribuerades: [Start blocks](/developing/creating-a-subgraph#start-blocks) -## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +## 19. Finns det ett sätt att direkt fråga subgrafen för att ta reda på det senaste blocknumret den har indexerat? -Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: +Ja! Prova följande kommando och ersätt "organization/subgraphName" med organisationen under vilken den är publicerad och namnet på din subgraf: ```sh curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql ``` -## 20. What networks are supported by The Graph? +## 20. Vilka nätverk stöds av The Graph? -You can find the list of the supported networks [here](/developing/supported-networks). +Du kan hitta listan över de stödda nätverken [här](/developing/supported-networks). -## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +## 21. Är det möjligt att duplicera en subgraf till ett annat konto eller en annan slutpunkt utan att distribuera om? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +Du måste distribuera om subgrafen, men om subgrafens ID (IPFS-hash) inte ändras behöver den inte synkroniseras från början. -## 22. Is this possible to use Apollo Federation on top of graph-node? +## 22. Är det möjligt att använda Apollo Federation ovanpå graph-node? -Federation is not supported yet, although we do want to support it in the future. At the moment, something you can do is use schema stitching, either on the client or via a proxy service. +Federation stöds ännu inte, även om vi planerar att stödja det i framtiden. För närvarande kan du använda schema stitching, antingen på klienten eller via en proxytjänst. -## 23. Is there a limit to how many objects The Graph can return per query? +## 23. Finns det en begränsning för hur många objekt The Graph kan returnera per fråga? -By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: +Som standard är frågesvar begränsade till 100 objekt per samling. Om du vill ha fler kan du gå upp till 1000 objekt per samling och bortom det kan du paginera med: ```graphql someCollection(first: 1000, skip: ) { ... } ``` -## 24. If my dapp frontend uses The Graph for querying, do I need to write my query key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? +## 24. Om min dapp-frontänd använder The Graph för frågor, måste jag skriva in min frågenyckel direkt i frontänden? Vad händer om vi betalar frågeavgifter för användare – kommer skadliga användare att orsaka mycket höga frågeavgifter? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +För närvarande är det rekommenderade tillvägagångssättet för en dapp att lägga till nyckeln i frontänden och exponera den för slutanvändare. Med det sagt kan du begränsa den nyckeln till en värdnamn, som _yourdapp.io_ och subgraphen. Gatewayen drivs för närvarande av Edge & Node. En del av gatewayens ansvar är att övervaka missbruk och blockera trafik från skadliga klienter. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? -Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). +Gå till hosted service för att hitta subgrafer som du eller andra har distribuerat till hosted service. Du hittar den [här](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? -The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. +The Graph kommer aldrig ta ut avgifter för hosted service. The Graph är en decentraliserad protokoll, och att ta ut avgifter för en centraliserad tjänst är inte i linje med The Graphs värderingar. Hosted service var alltid ett tillfälligt steg för att hjälpa till att nå det decentraliserade nätverket. Utvecklare kommer att ha tillräckligt med tid att uppgradera till det decentraliserade nätverket när de är bekväma med det. -## 27. When will the Hosted Service be shut down? +## 27. How do I update a subgraph on mainnet? -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? - -If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +Om du är en subgraf-utvecklare kan du distribuera en ny version av din subgraf till Subgraf Studio med hjälp av CLI. Den kommer att vara privat vid den punkten, men om du är nöjd med den kan du publicera den till den decentraliserade Graph Explorer. Detta kommer att skapa en ny version av din subgraf som Curators kan börja signalera på. diff --git a/website/pages/sv/developing/graph-ts/api.mdx b/website/pages/sv/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..7b12c7bb5e49 --- /dev/null +++ b/website/pages/sv/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: API för AssemblyScript +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +Denna sida dokumenterar vilka inbyggda API: er som kan användas när man skriver mappningar av undergrafer. Två typer av API: er är tillgängliga från start: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API-referens + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Primitiver på låg nivå för att översätta mellan olika typsystem som Ethereum, JSON, GraphQL och AssemblyScript. + +### Versioner + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Versionsanteckningar | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Inbyggda typer + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Skapa entiteter + +Följande är ett vanligt mönster för att skapa entiteter från Ethereum-händelser. + +```typescript +// Importera händelseklassen Transfer som genererats från ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Importera entitetstypen Transfer som genererats från GraphQL-schemat +import { Transfer } from '../generated/schema' + +// Händelsehanterare för överföring +export function handleTransfer(event: TransferEvent): void { + // Skapa en Transfer-entitet, med transaktionshash som enhets-ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Ange egenskaper för entiteten med hjälp av händelseparametrarna + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Spara entiteten till lagret + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Varje entitet måste ha en unik ID för att undvika kollisioner med andra entiteter. Det är ganska vanligt att händelsens parametrar inkluderar en unik identifierare som kan användas. Observera: Att använda transaktionshashen som ID förutsätter att inga andra händelser i samma transaktion skapar entiteter med denna hash som ID. + +#### Ladda entiteter från lagret + +Om en entitet redan finns kan den laddas från lagret med följande: + +```typescript +let id = event.transaction.hash // eller hur ID konstrueras +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Använd överföringsenheten som tidigare +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Sökning av entiteter skapade inom ett block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +API:et för lagret underlättar hämtningen av entiteter som skapades eller uppdaterades i det aktuella blocket. En vanlig situation för detta är att en hanterare skapar en transaktion från någon händelse på kedjan, och en senare hanterare vill komma åt denna transaktion om den finns. I det fall då transaktionen inte finns, måste subgraphen gå till databasen bara för att ta reda på att entiteten inte finns; om subgraphförfattaren redan vet att entiteten måste ha skapats i samma block, undviker man detta databasbesök genom att använda loadInBlock. För vissa subgrapher kan dessa missade sökningar bidra avsevärt till indexeringstiden. + +```typescript +let id = event.transaction.hash // eller hur ID konstrueras +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Använd överföringsenheten som tidigare +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Sökning av härledda entiteter + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +Detta möjliggör laddning av härledda entitetsfält från inom en händelsehanterare. Till exempel, med följande schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Ladda de Token-enheter som är associerade med en given innehavare +let tokens = holder.tokens.load() +``` + +#### Uppdatering av befintliga entiteter + +Det finns två sätt att uppdatera en befintlig entitet: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Att ändra egenskaper är rakt fram i de flesta fall, tack vare de genererade egenskapsinställarna: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +Det är också möjligt att avaktivera egenskaper med en av följande två instruktioner: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Detta kommer inte att fungera +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// Detta kommer att fungera +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Ta bort entiteter från lagret + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +Ethereum API ger tillgång till smarta kontrakt, offentliga tillståndsvariabler, kontrakt funktioner, händelser, transaktioner, block och kodning/avkodning av Ethereum data. + +#### Stöd för Ethereum-typer + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +Följande exempel illustrerar detta. Med en subgraph-schema som + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Händelser och Block/Transaktionsdata + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Åtkomst till Smart Contract-tillstånd + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +En vanlig mönster är att komma åt kontraktet från vilket en händelse härstammar. Detta uppnås med följande kod: + +```typescript +// Importera den genererade kontraktsklassen och den genererade klassen för överföringshändelser +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Importera den genererade entitetsklassen +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind kontraktet till den adress som skickade händelsen + let contract = ERC20Contract.bind(event.address) + + // Åtkomst till tillståndsvariabler och funktioner genom att anropa dem + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Andra kontrakt som är en del av subgraphen kan importeras från den genererade koden och bindas till en giltig adress. + +#### Hantering av återkallade anrop + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravitera = gravitera.bind(event.address) +let callResult = gravitera_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Observera att en Graf-nod ansluten till en Geth eller Infura klient kanske inte upptäcker alla återkallade anrop. Om du förlitar dig på detta rekommenderar vi att du använder en Graph nod som är ansluten till en Parity klient. + +#### Kodning/Dekodning av ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +För ytterligare information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### API för loggning + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Loggning av ett eller flera värden + +##### Logga ett enskilt värde + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logga en enskild post från en befintlig array + +I exemplet nedan loggas endast det första värdet i argument arrayen, trots att arrayen innehåller tre värden. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Visar : "Mitt värde är: A" (Även om tre värden skickas till `log.info`) + log.info('Mitt värde är: {}', myArray) +} +``` + +#### Logga flera poster från en befintlig array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Visar: "Mitt första värde är: A, andra värdet är: B, tredje värdet är: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logga en specifik post från en befintlig array + +För att visa ett specifikt värde i arrayen måste det indexeras och tillhandahållas. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Visar : "Mitt tredje värde är C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Loggning av händelseinformation + +I exemplet nedan loggas blocknummer, blockhash och transaktionshash från en händelse: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +För att läsa en fil från IPFS med en given IPFS-hash eller sökväg görs följande: + +```typescript +// Placera detta i en händelsehanterare i mappningen +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Sökvägar som `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// som inkluderar filer i kataloger stöds också +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // Se JSONValue-dokumentationen för mer information om hur man hanterar + // med JSON-värden + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks kan också skapa enheter + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Ange parent till "parentId" + newitem.save() +} + +// Placera detta i en händelsehanterare i mappningen +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternativt kan du använda `ipfs.mapJSON`. +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Typkonverteringsreferens + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ------------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | Sträng (hexadecimal) | s.toHexString() eller s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | Sträng (hexadecimal) | s.toHexString() eller s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| Sträng (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Metadata för datakälla + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entitet och DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/sv/developing/graph-ts/common-issues.mdx b/website/pages/sv/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..71cdf70beba8 --- /dev/null +++ b/website/pages/sv/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Vanliga problem med AssemblyScript +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/sv/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/sv/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..2b6f1d5f67d9 100644 --- a/website/pages/sv/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/sv/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: Vanliga frågor om Substreams-drivna subgrafer --- -## What are Substreams? +## Vad är Substreams? -Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. +Utvecklad av [StreamingFast](https://www.streamingfast.io/), är Substreams en exceptionellt kraftfull bearbetningsmotor som kan konsumera rika strömmar av blockkedjedata. Substreams tillåter dig att förädla och forma blockkedjedata för snabb och sömlös användning av slutanvändarprogram. Mer specifikt är Substreams en blockkedjeoberoende, paralleliserad och strömningsfokuserad motor som fungerar som ett blockkedjedata-transformationslager. Driven av [Firehose](https://firehose.streamingfast.io/), gör det det möjligt för utvecklare att skriva Rust-moduler, bygga på gemenskapsmoduler, erbjuda extremt högpresterande indexering och [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) sina data var som helst. -Go to the [Substreams Documentation](/substreams) to learn more about Substreams. +Gå till [Substreams Documentation](/substreams) för att lära dig mer om Substreams. -## What are Substreams-powered subgraphs? +## Vad är Substreams-drivna subgrafer? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +[Substreams-drivna subgrafer](/cookbook/substreams-powered-subgraphs/) kombinerar kraften hos Substreams med subgrafiernas möjlighet att frågas. När en Substreams-driven subgraf publiceras kan datan som produceras av Substreams omvandlingar [utdata ändringar av enheter](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), vilket är kompatibelt med subgrafiernas enheter. -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +Om du redan är bekant med utveckling av subgrafer kan du notera att Substreams-drivna subgrafer sedan kan frågas på samma sätt som om de hade producerats av AssemblyScript omvandlingslagret, med alla fördelar med subgrafer, som att tillhandahålla en dynamisk och flexibel GraphQL API. -## How are Substreams-powered subgraphs different from subgraphs? +## Hur skiljer sig Substreams-drivna subgrafer från subgrafer? -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +Subgrafer består av datakällor som specificerar händelser på blockkedjan och hur dessa händelser ska omvandlas via hanterare skrivna i AssemblyScript. Dessa händelser bearbetas sekventiellt, baserat på ordningen som händelserna inträffar på blockkedjan. -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +Å andra sidan har Substreams-drivna subgrafer en enda datakälla som refererar till ett Substreams-paket, vilket bearbetas av Graph Node. Substreams har tillgång till ytterligare detaljerade on-chain data jämfört med konventionella subgrafer och kan även dra nytta av massivt parallell bearbetning, vilket kan leda till betydligt snabbare bearbetningstider. -## What are the benefits of using Substreams-powered subgraphs? +## Vilka fördelar har användning av Substreams-drivna subgrafer? -Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. +Substreams-drivna subgrafer kombinerar alla fördelar med Substreams med subgrafernas frågebarhet. De ger The Graph större komponerbarhet och högpresterande indexering. De möjliggör också nya dataanvändningsfall; till exempel, när du har byggt din Substreams-drivna Subgraph kan du återanvända dina [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) för att mata ut till olika [sinks]\(https:/ /substreams.streamingfast.io/developers-guide/sink-targets) som PostgreSQL, MongoDB och Kafka. -## What are the benefits of Substreams? +## Vilka fördelar har Substreams? -There are many benefits to using Substreams, including: +Det finns många fördelar med att använda Substreams, inklusive: -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- Sammansättbarhet: Du kan stapla Substreams-moduler som LEGO-block och bygga på gemenskapsmoduler för att ytterligare förädla offentliga data. -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- Högpresterande indexering: Ordervärden snabbare indexering genom storskaliga kluster av parallella operationer (tänk BigQuery). -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- Utdata var som helst: Du kan sänka dina data var som helst du vill: PostgreSQL, MongoDB, Kafka, subgrafer, platta filer, Google Sheets. -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- Programmerbarhet: Använd kod för att anpassa extrahering, utföra transformationsbaserade aggregeringar och modellera din utdata för flera sänkar. -- Access to additional data which is not available as part of the JSON RPC +- Tillgång till ytterligare data som inte är tillgänglig som en del av JSON RPC -- All the benefits of the Firehose. +- Alla fördelar med Firehose. -## What is the Firehose? +## Vad är Firehose? -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +Utvecklat av [StreamingFast](https://www.streamingfast.io/), är Firehose ett blockkedjedata-extraktionslager som är utformat från grunden för att bearbeta blockkedjans fullständiga historik med hastigheter som tidigare inte var skådade. Genom att erbjuda en filbaserad och strömningsorienterad metod är det en kärnkomponent i StreamingFasts svit med öppen källkodstekniker och grunden för Substreams. -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Gå till [documentation](https://firehose.streamingfast.io/) för att lära dig mer om Firehose. -## What are the benefits of the Firehose? +## Vilka fördelar har Firehose? -There are many benefits to using Firehose, including: +Det finns många fördelar med att använda Firehose, inklusive: -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- Lägsta latens och ingen avfrågning: I en strömningsorienterad stil är Firehose-noderna utformade för att snabbt skicka ut blockdata. -- Prevents downtimes: Designed from the ground up for High Availability. +- Förebygger driftstopp: Designat från grunden för hög tillgänglighet. -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- Missa aldrig en händelse: Firehose-strömmens markör är utformad för att hantera gafflar och att fortsätta där du avslutade under alla förhållanden. -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- Rikaste datamodell:  Bästa datamodell som inkluderar balansändringar, hela anropsträdet, interna transaktioner, loggar, lagringsändringar, gasavgifter och mer. -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- Använder platta filer: Blockkedjedata extraheras till platta filer, den billigaste och mest optimerade datorkällan som finns tillgänglig. -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## Var kan utvecklare få mer information om Substreams-drivna subgrafer och Substreams? -The [Substreams documentation](/substreams) will teach you how to build Substreams modules. +[Substreams-dokumentationen](/substreams) kommer att lära dig hur du bygger Substreams-moduler. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +[Substreams-drivna subgrafer dokumentationen](/cookbook/substreams-powered-subgraphs/) kommer att visa dig hur man paketerar dem för distribution på The Graph. -## What is the role of Rust modules in Substreams? +## Vad är rollen för Rust-moduler i Substreams? -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +Rust-moduler är motsvarigheten till AssemblyScript-mappers i subgrafer. De kompileras till WASM på ett liknande sätt, men programmeringsmodellen tillåter parallell körning. De definierar vilken typ av omvandlingar och aggregeringar du vill tillämpa på råblockkedjedata. -See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. +Se [moduldokumentation](https://substreams.streamingfast.io/developers-guide/modules) för mer information. -## What makes Substreams composable? +## Vad gör Substreams sammansättbart? -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +Vid användning av Substreams sker sammansättningen på omvandlingsnivån, vilket gör att cachade moduler kan återanvändas. -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +Som exempel kan Alice bygga en DEX-prismodul, Bob kan använda den för att bygga en volymaggregator för vissa intressanta tokens, och Lisa kan kombinera fyra individuella DEX-prismoduler för att skapa en prisoracle. En enda Substreams-begäran kommer att paketera alla dessa individuella moduler, länka dem samman, för att erbjuda en mycket mer förädlad dataström. Den strömmen kan sedan användas för att fylla i en subgraf och frågas av användare. -## How can you build and deploy a Substreams-powered Subgraph? +## Hur kan man bygga och distribuera en Substreams-drivna subgraf? -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +Efter att ha [defining](/cookbook/substreams-powered-subgraphs/) en Substreams-drivna subgraf kan du använda Graph CLI för att distribuera den i [Subgraph Studio](https://thegraph.com/studio/). -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## Var kan jag hitta exempel på Substreams och Substreams-drivna subgrafer? -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +Du kan besöka [detta Github-repo](https://github.com/pinax-network/awesome-substreams) för att hitta exempel på Substreams och Substreams-drivna subgrafer. -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## Vad innebär Substreams och Substreams-drivna subgrafer för The Graph Network? -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +Integrationen lovar många fördelar, inklusive extremt högpresterande indexering och ökad sammansättbarhet genom att dra nytta av gemenskapsmoduler och bygga vidare på dem. diff --git a/website/pages/sv/developing/supported-networks.json b/website/pages/sv/developing/supported-networks.json index 5e12392b8c7d..b3b8866abbae 100644 --- a/website/pages/sv/developing/supported-networks.json +++ b/website/pages/sv/developing/supported-networks.json @@ -1,9 +1,9 @@ { - "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "network": "Nätverk", + "cliName": "Kommandoradsgränssnitt namn", + "chainId": "Kedje-ID", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "Decentraliserat Nätverk", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/sv/developing/supported-networks.mdx b/website/pages/sv/developing/supported-networks.mdx index 58ce56345f7c..83d9fdfc287f 100644 --- a/website/pages/sv/developing/supported-networks.mdx +++ b/website/pages/sv/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Stödda Nätverk --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,16 +9,16 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +För en fullständig lista över vilka funktioner som stöds på det decentraliserade nätverket, se [den här sidan](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). -Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. +Substreams-drivna subgrafer som indexerar Ethereum `mainnet` stöds i Subgraf Studio och det decentraliserade nätverket. -## Graph Node +## Graf Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +Om ditt föredragna nätverk inte stöds på The Graph's decentraliserade nätverk kan du köra din egen [Graf Node](https://github.com/graphprotocol/graph-node) för att indexera vilket EVM-kompatibelt nätverk som helst. Se till att [versionen](https://github.com/graphprotocol/graph-node/releases) du använder stöder nätverket och att du har den nödvändiga konfigurationen. -Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. +Graf Node kan också indexera andra protokoll via en Firehose-integration. Firehose-integrationer har skapats för NEAR, Arweave och Cosmos-baserade nätverk. diff --git a/website/pages/sv/developing/unit-testing-framework.mdx b/website/pages/sv/developing/unit-testing-framework.mdx index 8ffc66465e3a..1496196edc7a 100644 --- a/website/pages/sv/developing/unit-testing-framework.mdx +++ b/website/pages/sv/developing/unit-testing-framework.mdx @@ -1,30 +1,30 @@ --- -title: Unit Testing Framework +title: Enhetsprovningsramverk --- -Matchstick is a unit testing framework, developed by [LimeChain](https://limechain.tech/), that enables subgraph developers to test their mapping logic in a sandboxed environment and deploy their subgraphs with confidence! +Matchstick är ett enhetsprovningsramverk utvecklat av [LimeChain](https://limechain.tech/) som möjliggör för subgraph-utvecklare att testa sin kartläggningslogik i en avskärmad miljö och distribuera sina subgrapher med förtroende! -## Getting Started +## Komma igång -### Install dependencies +### Installera beroenden -In order to use the test helper methods and run the tests, you will need to install the following dependencies: +För att använda testhjälpmedlen och köra testerna måste du installera följande beroenden: ```sh yarn add --dev matchstick-as ``` -❗ `graph-node` depends on PostgreSQL, so if you don't already have it, you will need to install it. We highly advise using the commands below as adding it in any other way may cause unexpected errors! +❗ graph-node är beroende av PostgreSQL, så om du inte redan har det måste du installera det. Vi rekommenderar starkt att du använder följande kommandon eftersom att lägga till det på något annat sätt kan orsaka oväntade fel! #### MacOS -Postgres installation command: +Kommando för installation av Postgres: ```sh brew install postgresql ``` -Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` +Skapa en symbolisk länk till den senaste libpq.5.lib._ Du kanske behöver skapa den här mappen först: _`/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -32,7 +32,7 @@ ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/o #### Linux -Postgres installation command (depends on your distro): +Kommando för Postgres installation (beroende på din distribution): ```sh sudo apt install postgresql @@ -40,25 +40,25 @@ sudo apt install postgresql ### WSL (Windows Subsystem for Linux) -You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like +Du kan använda Matchstick i WSL både med Docker-metoden och binärmetoden. Eftersom WSL kan vara lite knepigt, här är några tips om du stöter på problem som ``` static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = ``` -or +eller ``` /node_modules/gluegun/build/index.js:13 throw up; ``` -Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running +Se till att du använder en nyare version av Node.js eftersom graph-cli inte längre stöder **v10.19.0**, och det är fortfarande standardversionen för nya Ubuntu-bilder på WSL. Till exempel är Matchstick bekräftat fungerande på WSL med **v18.1.0**. Du kan byta till den antingen via** nvm ** eller genom att uppdatera din globala Node.js. Glöm inte att ta bort `node_modules` och köra `npm install`igen efter att du har uppdaterat Node.js! Sedan, se till att du har **libpq** installerat, du kan göra det genom att köra ``` sudo apt-get install libpq-dev ``` -And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as +Och till sist, använd inte `graph test` (som använder din globala installation av graph-cli och av någon anledning ser ut som om det är trasig på WSL för närvarande), istället använd `yarn test` eller `npm run test` (det kommer att använda den lokala projektbaserade instansen av graph-cli, som fungerar utmärkt). För detta behöver du självklart ha ett `"test"`-skript i din `package.json`-fil, vilket kan vara något så enkelt som ```json { @@ -76,57 +76,57 @@ And finally, do not use `graph test` (which uses your global installation of gra } ``` -### Usage +### Användning -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +För att använda **Matchstick** i ditt subgrafprojekt öppnar du bara en terminal, navigerar till rotmappen för ditt projekt och kör helt enkelt `graftest [options] ` - den laddar ner den senaste **Matchstick**-binären och kör det angivna testet eller alla tester i en testmapp (eller alla befintliga tester om ingen datakällasflagga är angiven). -### CLI options +### CLI alternativ -This will run all tests in the test folder: +Detta kommer att köra alla tester i testmappen: ```sh graph test ``` -This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: +Detta kommer att köra en test med namnet gravity.test.ts och/eller alla tester inuti en mapp med namnet gravity: ```sh graph test gravity ``` -This will run only that specific test file: +Då körs endast den specifika testfilen: ```sh graph test path/to/file.test.ts ``` -**Options:** +**Alternativ:** ```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used +-c, --coverage Kör testerna i täckningsläge +-d, --docker Kör testerna i en docker-container (Observera: Kör från rotmappen för subgraph) +-f, --force Binär: Hämtar om binären. Docker: Hämtar om Dockerfilen och bygger om dockerbilden. +-h, --help Visar användningsinformation +-l, --logs Loggar till konsolen information om OS, CPU-modell och nedladdnings-URL (för felsökningssyften) +-r, --recompile Tvingar testerna att kompileras om +-v, --version Välj versionen av den rust binära som du vill att den ska hämtas/användas ``` ### Docker -From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. +Från `graph-cli 0.25.2` stöder kommandot `graph test` att köra `matchstick` i en Docker-behållare med flaggan `-d`. Docker-implementeringen använder [bind mount](https://docs.docker.com/storage/bind-mounts/) så att den inte behöver bygga om dockerbilden varje gång kommandot `graph test -d` körs. Alternativt kan du följa instruktionerna från [matchstick](https://github.com/LimeChain/matchstick#docker-) repository för att köra Docker manuellt. -❗ If you have previously ran `graph test` you may encounter the following error during docker build: +❗ Om du tidigare har kört `graph test` kan du stöta på följande fel under docker build: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` +I det här fallet skapar du en `.dockerignore` i rotmappen och lägger till `node_modules/binary-install-raw/bin`. -### Configuration +### Konfiguration -Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: +Matchstick kan konfigureras att använda en anpassad sökväg för tester, libs och manifest via konfigurationsfilen `matchstick.yaml`: ```yaml testsFolder: path/to/tests @@ -134,27 +134,27 @@ libsFolder: path/to/libs manifestPath: path/to/subgraph.yaml ``` -### Demo subgraph +### Demo undergraf -You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) +Du kan prova och leka med exemplen från den här guiden genom att klona [Demo Subgraph-repot](https://github.com/LimeChain/demo-subgraph) -### Video tutorials +### Handledning för video -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Du kan också kolla på videoserien om ["Hur man använder Matchstick för att skriva enhetstester för dina subgraph"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) -## Tests structure (>=0.5.0) +## Teststruktur (>=0.5.0) -_**IMPORTANT: Requires matchstick-as >=0.5.0**_ +_**VIKTIGT: Kräver matchstick-as >=0.5.0**_ ### describe() -`describe(name: String , () => {})` - Defines a test group. +`describe(name: String , () => {})` - Definierar en testgrupp. -**_Notes:_** +**_Noteringar:_** -- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ +- _Describes är inte obligatoriska. Du kan fortfarande använda test() på det gamla sättet, utanför describe() blocken_ -Example: +Exempel: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -167,7 +167,7 @@ describe("handleNewGravatar()", () => { }) ``` -Nested `describe()` example: +Nästat `describe()` exempel: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -192,9 +192,9 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. +`test(name: String, () =>, should_fail: bool)` - Definierar ett testfall. Du kan använda test() inuti describe()-block eller fristående. -Example: +Exempel: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -207,7 +207,7 @@ describe("handleNewGravatar()", () => { }) ``` -or +eller ```typescript test("handleNewGravatar() should create a new entity", () => { @@ -221,11 +221,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. +Kör en kodblock före något av testen i filen. Om `beforeAll` deklareras inuti en `describe`-block körs den i början av det `describe`-blocket. -Examples: +Exempel: -Code inside `beforeAll` will execute once before _all_ tests in the file. +Kod inuti `beforeAll` kommer att utföras en gång före _alla_ tester i filen. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -239,20 +239,20 @@ beforeAll(() => { ... }) -describe("When the entity does not exist", () => { - test("it should create a new Gravatar with id 0x1", () => { +describe("När enheten inte existerar", () => { + test("det bör skapa en ny Gravatar med id 0x1", () => { ... }) }) -describe("When entity already exists", () => { - test("it should update the Gravatar with id 0x0", () => { +describe("När enheten redan existerar", () => { + test("det bör uppdatera Gravatar med id 0x0", () => { ... }) }) ``` -Code inside `beforeAll` will execute once before all tests in the first describe block +Kod inuti `beforeAll` kommer att exekveras en gång före alla tester i det första beskrivningsblocket ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -267,11 +267,11 @@ describe("handleUpdatedGravatar()", () => { ... }) - test("updates Gravatar with id 0x0", () => { + test("uppdaterar Gravatar med id 0x0", () => { ... }) - test("creates new Gravatar with id 0x1", () => { + test("skapar ny Gravatar med id 0x1", () => { ... }) }) @@ -281,11 +281,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. +Kör en kodblock efter alla test i filen. Om `afterAll` deklareras inuti en `describe`-block körs den i slutet av det `describe`-blocket. -Example: +Exempel: -Code inside `afterAll` will execute once after _all_ tests in the file. +Kod inuti `afterAll` kommer att utföras en gång efter _alla_ tester i filen. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -298,19 +298,19 @@ afterAll(() => { }) describe("handleNewGravatar, () => { - test("creates Gravatar with id 0x0", () => { + test("skapar Gravatar med id 0x0", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("updates Gravatar with id 0x0", () => { + test("uppdaterar Gravatar med id 0x0", () => { ... }) }) ``` -Code inside `afterAll` will execute once after all tests in the first describe block +Kod inuti `afterAll` kommer att exekveras en gång efter alla tester i det första beskrivna blocket ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" @@ -322,17 +322,17 @@ describe("handleNewGravatar", () => { ... }) - test("It creates a new entity with Id 0x0", () => { + test("Det skapar en ny enhet med id 0x0", () => { ... }) - test("It creates a new entity with Id 0x1", () => { + test("Det skapar en ny enhet med id 0x1", () => { ... }) }) describe("handleUpdatedGravatar", () => { - test("updates Gravatar with id 0x0", () => { + test("uppdaterar Gravatar med id 0x0", () => { ... }) }) @@ -342,24 +342,24 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. +Kör en kodblock före varje test. Om `beforeEach` deklareras inuti en `describe`-block körs den före varje test i det `describe`-blocket. -Examples: Code inside `beforeEach` will execute before each tests. +Exempel: Koden inuti `beforeEach` kommer att utföras före varje test. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" import { handleNewGravatars } from "./utils" beforeEach(() => { - clearStore() // <-- clear the store before each test in the file + clearStore() // <-- rensa butiken före varje test i filen }) describe("handleNewGravatars, () => { - test("A test that requires a clean store", () => { + test("Ett test som kräver en ren butik", () => { ... }) - test("Second that requires a clean store", () => { + test("Andra som kräver en ren butik", () => { ... }) }) @@ -367,7 +367,7 @@ describe("handleNewGravatars, () => { ... ``` -Code inside `beforeEach` will execute only before each test in the that describe +Kod inuti `beforeEach` kommer att exekveras endast före varje test i den som beskriver ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -376,7 +376,7 @@ import { handleUpdatedGravatar, handleNewGravatar } from '../../src/gravity' describe('handleUpdatedGravatars', () => { beforeEach(() => { let gravatar = new Gravatar('0x0') - gravatar.displayName = 'First Gravatar' + gravatar.displayName = 'Första Gravatar' gravatar.imageUrl = '' gravatar.save() }) @@ -384,7 +384,7 @@ describe('handleUpdatedGravatars', () => { test('Upates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // code that should update the displayName to 1st Gravatar + // kod som ska uppdatera displayName till 1st Gravatar assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -393,7 +393,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -405,11 +405,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. +Kör en kodblock efter varje test. Om `afterEach` deklareras inuti en `describe`-block körs den efter varje test i det `describe`-blocket. -Examples: +Exempel: -Code inside `afterEach` will execute after every test. +Kod inuti `afterEach` kommer att utföras efter varje test. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -433,7 +433,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // kod som ska uppdatera displayName till 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -441,14 +441,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Code inside `afterEach` will execute after each test in that describe +Kod i `afterEach` kommer att exekveras efter varje test i den beskrivningen ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -473,7 +473,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // kod som ska uppdatera displayName till 1st Gravatar assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -481,7 +481,7 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // kod som ska ändra imageUrl till https://www.gravatar.com/avatar/0x0 assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) @@ -522,11 +522,11 @@ assertNotNull(value: T) entityCount(entityType: string, expectedCount: i32) ``` -## Write a Unit Test +## Skriv en enhetstest -Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +Låt oss se hur ett enkelt enhetstest skulle se ut med hjälp av Gravatar-exemplen i [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). -Assuming we have the following handler function (along with two helper functions to make our life easier): +Antag att vi har följande hanteringsfunktion (tillsammans med två hjälpfunktioner för att göra vårt liv enklare): ```typescript export function handleNewGravatar(event: NewGravatar): void { @@ -577,7 +577,7 @@ export function createNewGravatarEvent( } ``` -We first have to create a test file in our project. This is an example of how that might look like: +Vi måste först skapa en testfil i vårt projekt. Det här är ett exempel på hur det kan se ut: ```typescript import { clearStore, test, assert } from 'matchstick-as/assembly/index' @@ -586,23 +586,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Create a test entity and save it in the store as initial state (optional) + // Skapa en testenhet och spara den i arkivet som initialtillstånd (valfritt) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Create mock events + // Skapa låtsashändelser let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Call mapping functions passing the events we just created + // Anropa mappningsfunktioner som skickar händelserna vi just skapade handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Assert the state of the store + // Bekräfta butikens tillstånd assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Clear the store in order to start the next test off on a clean slate + // Rensa lagret för att starta nästa test med en ny start clearStore() }) @@ -611,38 +611,38 @@ test('Next test', () => { }) ``` -That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: +Det är mycket att ta in! Först och främst är det viktigt att notera att vi importerar saker från `matchstick-as`, vår AssemblyScript hjälpbibliotek (distribuerat som ett npm-paket). Du kan hitta lagringsplatsen [här](https://github.com/LimeChain/matchstick-as). `matchstick-as` förser oss med användbara testmetoder och definierar också funktionen `test()` som vi kommer att använda för att bygga våra testblock. Resten är ganska självförklarande - här är vad som händer: -- We're setting up our initial state and adding one custom Gravatar entity; -- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; -- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; -- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; -- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. +- Vi ställer in vår inledande status och lägger till en anpassad Gravatar-entitet; +- Vi definierar två `NewGravatar` händelseobjekt tillsammans med deras data, med hjälp av funktionen `createNewGravatarEvent()`. +- Vi kallar på våra hanteringsmetoder för dessa händelser - `handleNewGravatars()` och skickar in listan med våra anpassade händelser; +- Vi försäkrar oss om statusen för lagringen. Hur fungerar det? - Vi skickar en unik kombination av entitetstyp och id. Sedan kontrollerar vi ett specifikt fält på den entiteten och försäkrar oss om att det har det värde vi förväntar oss. Vi gör detta både för den ursprungliga Gravatar-entiteten vi lade till i lagringen och de två Gravatar-entiteterna som läggs till när hanteringsfunktionen anropas; +- Och sist men inte minst - vi rensar lagringen med hjälp av `clearStore()` så att vårt nästa test kan börja med en fräsch och tom lagringsobjekt. Vi kan definiera så många testblock som vi vill. -There we go - we've created our first test! 👏 +Så där har vi skapat vårt första test! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +För att köra våra tester behöver du helt enkelt köra följande i din subgrafs rotmapp: `graph test Gravity` -And if all goes well you should be greeted with the following: +Och om allt går bra bör du hälsas av följande: -![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) +![Matchstick säger Alla tester har passerat](/img/matchstick-tests-passed.png) -## Common test scenarios +## Vanliga testscenarier -### Hydrating the store with a certain state +### Fylla på lagringen med en viss status -Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: +Användare kan fylla på lagringen med en känd uppsättning entiteter. Här är ett exempel på att initialisera lagringen med en Gravatar-entitet: ```typescript let gravatar = new Gravatar('entryId') gravatar.save() ``` -### Calling a mapping function with an event +### Anropa en mappnings funktion med en händelse -A user can create a custom event and pass it to a mapping function that is bound to the store: +En användare kan skapa en anpassad händelse och skicka den till en mappningsfunktion som är bunden till butiken: ```typescript import { store } from 'matchstick-as/assembly/store' @@ -654,9 +654,9 @@ let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01 handleNewGravatar(newGravatarEvent) ``` -### Calling all of the mappings with event fixtures +### Anropar alla mappningar med händelsefixturer -Users can call the mappings with test fixtures. +Användare kan kalla mappningarna med testfixturer. ```typescript import { NewGravatar } from '../../generated/Gravity/Gravity' @@ -678,9 +678,9 @@ export function handleNewGravatars(events: NewGravatar[]): void { } ``` -### Mocking contract calls +### Mocka kontraktsanrop -Users can mock contract calls: +Användare kan simulera kontraktssamtal: ```typescript import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' @@ -700,9 +700,9 @@ let result = gravity.gravatarToOwner(bigIntParam) assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) ``` -As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. +För att kunna simulera ett kontraktsanrop och ett hardcore returvärde måste användaren tillhandahålla en kontraktsadress, funktionsnamn, funktionssignatur, en uppsättning argument och naturligtvis - returvärdet. -Users can also mock function reverts: +Användare kan också simulera funktionsåtergångar: ```typescript let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') @@ -711,20 +711,20 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri .reverts() ``` -### Mocking IPFS files (from matchstick 0.4.1) +### Simulering av IPFS-filer (från matchstick 0.4.1) -Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. +Användare kan simulera IPFS-filer genom att använda funktionen `mockIpfsFile(hash, filePath)`. Funktionen accepterar två argument, det första är IPFS-filens hash/sökväg och det andra är sökvägen till en lokal fil. -NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: +OBS: När du testar `ipfs.map/ipfs.mapJSON` måste callback-funktionen exporteras från testfilen för att matchstck ska upptäcka den, liknande `processGravatar()`-funktionen i testexemplet nedan: -`.test.ts` file: +`.test.ts` fil: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Export ipfs.map() callback in order for matchstck to detect it +// Exportera ipfs.map() callback så att matchstck kan upptäcka den export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -754,7 +754,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` file: +`utils.ts` fil: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -764,8 +764,8 @@ import { Gravatar } from "../../generated/schema" // ipfs.map callback export function processGravatar(value: JSONValue, userData: Value): void { - // See the JSONValue documentation for details on dealing - // with JSON values + // Se JSONValue-dokumentationen för mer information om hur man hanterar + // med JSON-värden let obj = value.toObject() let id = obj.get('id') @@ -773,13 +773,13 @@ export function processGravatar(value: JSONValue, userData: Value): void { return } - // Callbacks can also created entities + // Callbacks kan också skapa enheter let gravatar = new Gravatar(id.toString()) gravatar.displayName = userData.toString() + id.toString() gravatar.save() } -// function that calls ipfs.cat +// funktion som anropar ipfs.cat export function gravatarFromIpfs(): void { let rawData = ipfs.cat("ipfsCatfileHash") @@ -802,9 +802,9 @@ export function gravatarFromIpfs(): void { } ``` -### Asserting the state of the store +### Kontrollera tillståndet för lagret -Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: +Användare kan kontrollera det slutgiltiga (eller delvisa) tillståndet för lagret genom att verifiera enheter. För att göra detta måste användaren ange en enhetstyp, den specifika ID: n för en enhet, namnet på ett fält på den enheten och det förväntade värdet på fältet. Här är ett snabbt exempel: ```typescript import { assert } from 'matchstick-as/assembly/index' @@ -816,38 +816,38 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. +Körning av funktionen assert.fieldEquals() kommer att kontrollera om det angivna fältet är lika med det förväntade värdet. Testet kommer att misslyckas och ett felmeddelande kommer att visas om värdena **INTE** är lika. Annars kommer testet att passera framgångsrikt. -### Interacting with Event metadata +### Interagera med händelsemetadata -Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: +Användare kan använda standardtransaktionsmetadata, som kan returneras som en ethereum.Event genom att använda funktionen `newMockEvent()`. Följande exempel visar hur du kan läsa/skriva till de fälten på Event-objektet: ```typescript -// Read +// Läs let logType = newGravatarEvent.logType -// Write +// Skriv let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) ``` -### Asserting variable equality +### Påstående om variabelns likhet ```typescript assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Asserting that an Entity is **not** in the store +### Påstå att en entitet **inte** finns i butiken -Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: +Användare kan hävda att en entitet inte finns i butiken. Funktionen tar en entitetstyp och ett id. Om entiteten faktiskt finns i butiken kommer testet att misslyckas med ett relevant felmeddelande. Här är ett snabbt exempel på hur du använder den här funktionen: ```typescript assert.notInStore('Gravatar', '23') ``` -### Printing the whole store (for debug purposes) +### Skriver ut hela butiken (för felsökningsändamål) -You can print the whole store to the console using this helper function: +Du kan skriva ut hela lagret till konsolen med hjälp av denna hjälpfunktion: ```typescript import { logStore } from 'matchstick-as/assembly/store' @@ -855,9 +855,9 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -### Expected failure +### Förväntat misslyckande -Users can have expected test failures, using the shouldFail flag on the test() functions: +Användare kan ha förväntade testfel genom att använda flaggan shouldFail på test()-funktionerna: ```typescript test( @@ -869,11 +869,11 @@ test( ) ``` -If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. +Om testet är markerat med shouldFail = true men INTE misslyckas, kommer det att visas som ett fel i loggarna och testblocket kommer att misslyckas. Om testet är markerat med shouldFail = false (standardtillståndet) kommer testköraren dessutom att krascha. -### Logging +### Loggning -Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: +Att ha anpassade loggar i enhetstesterna är exakt samma sak som att logga i mappningarna. Skillnaden är att loggobjektet måste importeras från matchstick-as snarare än graph-ts. Här är ett enkelt exempel med alla icke-kritiska loggtyper: ```typescript import { test } from "matchstick-as/assembly/index"; @@ -896,7 +896,7 @@ test("Warning", () => { }); ``` -Users can also simulate a critical failure, like so: +Användare kan också simulera ett kritiskt fel, t.ex: ```typescript test('Blow everything up', () => { @@ -904,11 +904,11 @@ test('Blow everything up', () => { }) ``` -Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. +Loggning av kritiska fel kommer att stoppa utförandet av testerna och orsaka total krasch. Trots allt vill vi säkerställa att din kod inte har kritiska loggar i produktion, och du bör märka det omedelbart om det skulle inträffa. -### Testing derived fields +### Testning av härledda fält -Testing derived fields is a feature which (as the example below shows) allows the user to set a field in a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. Important thing to note is that the first entity needs to be reloaded as the automatic update happens in the store in rust of which the AS code is agnostic. +Testning av härledda fält är en funktion som (som exemplet nedan visar) tillåter användaren att ställa in ett fält i en viss entitet och få en annan entitet att uppdateras automatiskt om den härleder ett av sina fält från den första entiteten. Det viktiga att notera är att den första entiteten måste laddas om eftersom den automatiska uppdateringen sker i lagringen i Rust, som AS-koden är omedveten om. ```typescript test('Derived fields example test', () => { @@ -931,13 +931,13 @@ test('Derived fields example test', () => { }) ``` -### Testing dynamic data sources +### Testning av dynamiska datakällor -Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). +Testning av dynamiska datakällor kan göras genom att moka returvärdena för funktionerna `context()`, `address()` och `network()` i dataSource-namespace. Dessa funktioner returnerar för närvarande följande: `context()` - returnerar en tom entitet (DataSourceContext), `address()` - returnerar `0x0000000000000000000000000000000000000000`, `network()` - returnerar `mainnet`. Funktionerna `create(...)` och `createWithContext(...)` mokas för att inte göra något, så de behöver inte anropas i testerna alls. Ändringar av returvärden kan göras genom funktionerna i namespace `dataSourceMock` i `matchstick-as` (version 0.3.0+). -Example below: +Exempel nedan: -First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): +Först har vi följande händelsehanterare (som medvetet har ändrats för att visa datasourcemockning): ```typescript export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { @@ -953,7 +953,7 @@ export function handleApproveTokenDestinations(event: ApproveTokenDestinations): } ``` -And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: +Och sedan har vi testet som använder en av metoderna i namespace dataSourceMock för att ställa in ett nytt returvärde för alla dataSource-funktioner: ```typescript import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' @@ -986,41 +986,41 @@ test('Data source simple mocking example', () => { }) ``` -Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. +Observera att dataSourceMock.resetValues() anropas i slutet. Det beror på att värdena kom ihåg när de ändrades och behöver återställas om du vill återgå till standardvärdena. -## Test Coverage +## Testtäckning -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Med **Matchstick** kan subgraph-utvecklare köra ett skript som beräknar täckningen av de skrivna enhetstesterna. -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +Verktyget för testtäckning tar de kompilerade test `wasm` binärerna och omvandlar dem till `wat`filer, som sedan enkelt kan inspekteras för att se om hanterarna som är definierade i `subgraph.yaml` har blivit kallade eller inte. Eftersom kodtäckning (och tester som helhet) är i mycket tidiga stadier i AssemblyScript och WebAssembly kan **Matchstick** inte kontrollera grentäckning. Istället förlitar vi oss på påståendet att om en given hanterare har blivit kallad, har händelsen/funktionen för den hanteraren blivit korrekt mockad. -### Prerequisites +### Förutsättningar -To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: +För att köra testtäckningsfunktionaliteten som tillhandahålls i **Matchstick** måste du förbereda några saker i förväg: -#### Export your handlers +#### Exportera dina hanterare -In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: +För att **Matchstick** ska kunna kontrollera vilka hanterare som körs måste dessa hanterare exporteras från **testfilen**. Till exempel i vårt exempel, i vår fil gravity.test.ts, har vi följande hanterare som importeras: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: +För att denna funktion skall vara synlig (för att den skall ingå i `wat`-filen **med namn**) måste vi också exportera den, så här: ```typescript export { handleNewGravatar } ``` -### Usage +### Användning -Once that's all set up, to run the test coverage tool, simply run: +När allt är klart kör du bara testtäckningsverktyget: ```sh graph test -- -c ``` -You could also add a custom `coverage` command to your `package.json` file, like so: +Du kan också lägga till ett anpassat `coverage`-kommando i din `package.json`-fil, så här: ```typescript "scripts": { @@ -1029,7 +1029,7 @@ You could also add a custom `coverage` command to your `package.json` file, like }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +Det kommer att köra täckningsverktyget och du bör se något liknande i terminalen: ```sh $ graph test -c @@ -1068,17 +1068,17 @@ Test coverage: 0.0% (0/6 handlers). Global test coverage: 22.2% (2/9 handlers). ``` -### Test run time duration in the log output +### Testkörningens varaktighet i loggutmatningen -The log output includes the test run duration. Here's an example: +Loggutmatningen innehåller testkörningens varaktighet. Här är ett exempel: `[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` -## Common compiler errors +## Vanliga kompilatorfel > Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined -This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/assemblyscript-api/#logging-api) +Det betyder att du har använt `console.log` i din kod, som inte stöds av AssemblyScript. Överväg att använda [Logging API](/developing/assemblyscript-api/#logging-api) > ERROR TS2554: Expected ? arguments, but got ?. > @@ -1092,8 +1092,8 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. +Motsägelsen i argumenten beror på en motsägelse i `graph-ts` och `matchstick-as`. Det bästa sättet att åtgärda problem som detta är att uppdatera allt till den senaste utgivna versionen. -## Feedback +## Respons -If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. +Om du har några frågor, feedback, funktionsförfrågningar eller bara vill nå ut, är det bästa stället The Graph Discord där vi har en dedikerad kanal för Matchstick, kallad 🔥| unit-testing. diff --git a/website/pages/sv/docsearch.json b/website/pages/sv/docsearch.json index 8cfff967936d..f05bd1cb46b0 100644 --- a/website/pages/sv/docsearch.json +++ b/website/pages/sv/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Sök", + "buttonAriaLabel": "Sök" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", - "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "resetButtonTitle": "Rensa frågan", + "resetButtonAriaLabel": "Rensa frågan", + "cancelButtonText": "Annullera", + "cancelButtonAriaLabel": "Annullera" }, "startScreen": { - "recentSearchesTitle": "Recent", - "noRecentSearchesText": "No recent searches", - "saveRecentSearchButtonTitle": "Save this search", - "removeRecentSearchButtonTitle": "Remove this search from history", - "favoriteSearchesTitle": "Favorite", - "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + "recentSearchesTitle": "Nyligen", + "noRecentSearchesText": "Inga senaste sökningar", + "saveRecentSearchButtonTitle": "Spara denna sökning", + "removeRecentSearchButtonTitle": "Ta bort den här sökningen från historiken", + "favoriteSearchesTitle": "Favorit", + "removeFavoriteSearchButtonTitle": "Ta bort denna sökning från favoriter" }, "errorScreen": { - "titleText": "Unable to fetch results", - "helpText": "You might want to check your network connection." + "titleText": "Det gick inte att hämta resultat", + "helpText": "Du kanske vill kontrollera din nätverks anslutning." }, "footer": { - "selectText": "to select", - "selectKeyAriaLabel": "Enter key", - "navigateText": "to navigate", - "navigateUpKeyAriaLabel": "Arrow up", - "navigateDownKeyAriaLabel": "Arrow down", - "closeText": "to close", - "closeKeyAriaLabel": "Escape key", - "searchByText": "Search by" + "selectText": "att välja", + "selectKeyAriaLabel": "Enter tangent", + "navigateText": "att navigera", + "navigateUpKeyAriaLabel": "Pil upp", + "navigateDownKeyAriaLabel": "Pil ner", + "closeText": "för nära", + "closeKeyAriaLabel": "Escape nyckel", + "searchByText": "Sök efter" }, "noResultsScreen": { - "noResultsText": "No results for", - "suggestedQueryText": "Try searching for", - "reportMissingResultsText": "Believe this query should return results?", - "reportMissingResultsLinkText": "Let us know." + "noResultsText": "Inga resultat för", + "suggestedQueryText": "Försök att söka efter", + "reportMissingResultsText": "Tror du att den här frågan borde ge resultat?", + "reportMissingResultsLinkText": "Låt oss veta." } } } diff --git a/website/pages/sv/firehose.mdx b/website/pages/sv/firehose.mdx index 5e2b37ee4bb6..421d75a41a66 100644 --- a/website/pages/sv/firehose.mdx +++ b/website/pages/sv/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Komma igång + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/sv/global.json b/website/pages/sv/global.json index 6a3eb234bfce..8a35bb090097 100644 --- a/website/pages/sv/global.json +++ b/website/pages/sv/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "collapse": "Kollaps", + "expand": "Expandera", + "previous": "Tidigare", + "next": "Nästa", + "editPage": "Redigera sida", + "pageSections": "Sektioner på sidan", + "linkToThisSection": "Länk till detta avsnitt", + "technicalLevelRequired": "Teknisk nivå krävs", + "notFoundTitle": "Hoppsan! Den här sidan försvann i rymden...", + "notFoundSubtitle": "Kontrollera om du använder rätt adress eller utforska vår webbplats genom att klicka på länken nedan.", + "goHome": "Gå hem", "video": "Video" } diff --git a/website/pages/sv/glossary.mdx b/website/pages/sv/glossary.mdx index 2e840513f1ea..229067f7c910 100644 --- a/website/pages/sv/glossary.mdx +++ b/website/pages/sv/glossary.mdx @@ -1,18 +1,18 @@ --- -title: Glossary +title: Ordlista --- -- **The Graph**: A decentralized protocol for indexing and querying data. +- **The Graf**: En decentraliserad protokoll för indexering och frågning av data. - **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: Ett frågespråk för API:er och en körningsmiljö för att uppfylla dessa frågor med befintlig data. The Graf använder GraphQL för att fråga subgrafer. - **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. -- **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. +- **Subgraf**: Ett anpassat API som bygger på blockchain-data och som kan frågas med hjälp av [GraphQL](https://graphql.org/). Utvecklare kan bygga, distribuera och publicera subgrafer på The Graph's decentraliserade nätverk. Därefter kan Indexer börja indexera subgrafer för att göra dem tillgängliga att fråga av subgrafkonsumenter. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,9 +24,11 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. -- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. +- **Delegationsavgift **: En avgift på 0,5% som betalas av Delegatorer när de delegerar GRT till Indexers. Det GRT som används för att betala avgiften bränns. - **Curators**: Network participants that identify high-quality subgraphs, and “curate” them (i.e., signal GRT on them) in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. Indexers earn indexing rewards proportional to the signal on a subgraph. We see a correlation between the amount of GRT signalled and the number of Indexers indexing a subgraph. @@ -38,52 +40,46 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. + 1. **Aktiv**: En allokering anses vara aktiv när den skapas på kedjan. Detta kallas att öppna en allokering och indikerar för nätverket att Indexer aktivt indexerar och betjänar frågor för en särskild subgraf. Aktiva allokeringar ackumulerar indexbelöningar proportionellt mot signalen på subgrafen och mängden GRT som allokerats. - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **SubGraf Studio**: En kraftfull dapp för att bygga, distribuera och publicera subgrafer. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **Indexbelöningar**: De belöningar som Indexers får för att indexera subgrafer. Indexbelöningar distribueras i GRT. -- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. +- **Delegationsbelöningar**: De belöningar som Delegatorer får för att delegera GRT till Indexers. Delegationsbelöningar distribueras i GRT. -- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. +- **GRT**: The Graph's arbetsnytto-token. GRT tillhandahåller ekonomiska incitament för nätverksdeltagare att bidra till nätverket. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +- **Grafnod**: Graph Node är komponenten som indexerar subgrafer och gör den resulterande datan tillgänglig för frågor via ett GraphQL API. Som sådan är den central för indexeringsstacken och korrekt drift av Graph Node är avgörande för att köra en framgångsrik Indexer. -- **Indexer agent**: The Indexer agent is part of the indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Indexer-agent**: Indexer-agenten är en del av indexeringsstacken. Den underlättar Indexers interaktioner på kedjan, inklusive registrering på nätverket, hantering av subgrafers distributioner till dess Graph Node(s), och hantering av allokeringar. -- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. +- **The Graph Klient**: Ett bibliotek för att bygga decentraliserade dappar baserade på GraphQL. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Graf Explorer**: En dapp utformad för nätverksdeltagare att utforska subgrafer och interagera med protokollet. -- **Graph CLI**: A command line interface tool for building and deploying to The Graph. +- **Graf CLI**: Ett kommandoradsgränssnitt för att bygga och distribuera till The Graph. -- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. +- **Cooldown-period**: Den återstående tiden tills en Indexer som ändrade sina delegationsparametrar kan göra det igen. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. +- **_Uppgradering_ av en subgraf till The Graf Nätverk**: Processen att flytta en subgraf från hosted service till The Graph Nätverk. -- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. +- **_Uppdatering_ av en subgraf**: Processen att släppa en ny subgrafversion med uppdateringar av subgrafens manifest, schema eller avbildning. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/sv/graphcast.mdx b/website/pages/sv/graphcast.mdx index e397aad36e43..497216fc39b9 100644 --- a/website/pages/sv/graphcast.mdx +++ b/website/pages/sv/graphcast.mdx @@ -2,20 +2,20 @@ title: Graphcast --- -## Introduction +## Introduktion -Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? +Finns det något du skulle vilja lära dig från eller dela med dina medindexare på ett automatiserat sätt, men det är för mycket besvär eller kostar för mycket gas? -Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. +För närvarande avgörs kostnaden för att sända information till andra nätverksdeltagare av gasavgifter på Ethereum-blockkedjan. Graphcast löser detta problem genom att agera som ett frivilligt decentraliserat, distribuerat peer-to-peer (P2P) kommunikationsverktyg som möjliggör utbyte av information i realtid mellan indexare över nätverket. Kostnaden för att utbyta P2P-meddelanden är nära noll, med avvägningen av att det inte finns några garantier för datintegritet. Trots detta strävar Graphcast efter att tillhandahålla garantier för meddelandevaliditet (dvs. att meddelandet är giltigt och signerat av en känd protokolldeltagare) med ett öppet utrymme för design av ryktmodeller. -The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: +Graphcast SDK (Utrustning för programvaruutveckling) gör det möjligt för utvecklare att bygga Radios, vilka är applikationer som drivs av gossipeffekt och som indexare kan köra för att tjäna ett visst syfte. Vi avser också att skapa några Radios (eller ge stöd åt andra utvecklare/team som önskar bygga Radios) för följande användningsområden: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. -- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. +- Realtidskorskontroll av datintegritet för delgrafer ([Subgraf Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). +- Genomföra auktioner och koordinering för warp-synkronisering av delgrafer, delströmmar och Firehose-data från andra indexare. +- Självrapportering om aktiv frågeanalys, inklusive delgrafförfrågningsvolym, avgiftsvolym etc. +- Självrapportering om indexeringanalys, inklusive tid för delgrafindexering, gasavgifter för handler, påträffade indexeringsfel etc. +- Självrapportering om stackinformation inklusive graph-node-version, Postgres-version, Ethereum-klientversion etc. -### Learn More +### Lär dig mer -If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) +Om du vill lära dig mer om Graphcast, [kolla in dokumentationen här.](https://docs.graphops.xyz/graphcast/intro) diff --git a/website/pages/sv/index.json b/website/pages/sv/index.json index 9e28e13d5001..47fa636b0961 100644 --- a/website/pages/sv/index.json +++ b/website/pages/sv/index.json @@ -1,77 +1,76 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "title": "Komma igång", + "intro": "Lär dig om The Graph, ett decentraliserat protokoll för indexering och sökning av data från blockkedjor.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Om The Graph", + "description": "Läs mer om The Graph" }, "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "title": "Snabbstart", + "description": "Hoppa in och börja med The Graph" }, "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "title": "Vanliga frågor för utvecklare", + "description": "Vanliga frågor" }, "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "title": "Fråga från en applikation", + "description": "Lär dig att fråga från en applikation" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Skapa en Subgraf", + "description": "Använd Studio för att skapa subgrafer" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", + "title": "Nätverks roller", + "description": "Lär dig om The Graph: s nätverks roller.", "roles": { "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "title": "Utvecklare", + "description": "Skapa en subgraf eller använd befintliga subgrafer i en dapp" }, "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "title": "Indexerare", + "description": "Använd en nod för att indexera data och betjäna frågor" }, "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "title": "Kurator", + "description": "Organisera data genom att signalera på subgrafer" }, "delegator": { - "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "title": "Delegater", + "description": "Säkra nätverket genom att delegera GRT till indexerare" } } }, - "readMore": "Read more", + "readMore": "Läs mer", "products": { - "title": "Products", + "title": "Produkter", "products": { "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" + "title": "Subgraf Studion", + "description": "Skapa, hantera och publicera subgrafer och API nycklar" }, "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "title": "Graf Utforskaren", + "description": "Utforska subgrafer och interagera med protokollet" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "Värdtjänster", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Nätverk som stöds", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/sv/managing/deprecating-a-subgraph.mdx b/website/pages/sv/managing/deprecating-a-subgraph.mdx index e6adfccad368..952b76f2445d 100644 --- a/website/pages/sv/managing/deprecating-a-subgraph.mdx +++ b/website/pages/sv/managing/deprecating-a-subgraph.mdx @@ -1,18 +1,18 @@ --- -title: Deprecating a Subgraph +title: Avveckla en Subgraf --- -So you'd like to deprecate your subgraph on The Graph Explorer. You've come to the right place! Follow the steps below: +Om du vill avveckla din subgraf på The Graph Explorer har du kommit rätt! Följ stegen nedan: -1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. +1. Besök kontraktsadressen [här](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) +2. Anropa `deprecateSubgraph` med din `SubgraphID` som argument. +3. Voilà! Din subgraf kommer inte längre att visas vid sökningar på The Graph Explorer. -Please note the following: +Observera följande: -- The `deprecateSubgraph` function should be called by the owner's wallet. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph will be able to withdraw their signal at an average share price. -- Deprecated subgraphs will be indicated with an error message. +- Funktionen `deprecateSubgraph` bör anropas av ägarens plånbok. +- Kuratorer kommer inte längre kunna signalera på subgrafet. +- Kuratorer som redan har signalerat på subgrafet kommer att kunna dra tillbaka sin signal till ett genomsnittligt andelspris. +- Avvecklade subgrafer kommer att markeras med ett felmeddelande. -If you interacted with the deprecated subgraph, you'll be able to find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. +Om du har interagerat med den avvecklade subgrafet kommer du att kunna hitta den i din användarprofil under flikarna "Subgrafer", "Indexering" eller "Kurat" respektive. diff --git a/website/pages/sv/managing/transferring-subgraph-ownership.mdx b/website/pages/sv/managing/transferring-subgraph-ownership.mdx index 1ca1c621a9c9..2b97d8279c28 100644 --- a/website/pages/sv/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/sv/managing/transferring-subgraph-ownership.mdx @@ -1,39 +1,39 @@ --- -title: Transferring Subgraph Ownership +title: Överföring av Ägande för Subgraf --- -The Graph supports the transfer of the ownership of a subgraph. +The Graph stödjer överföring av äganderätten för en subgraf. -When you deploy a subgraph to mainnet, an NFT will be minted to the address that deployed the subgraph. The NFT is based on a standard ERC721, so it can be easily transferred to different accounts. +När du distribuerar en subgraf till huvudnätet kommer en NFT att skapas till adressen som distribuerade subgrafen. NFT:n bygger på standarden ERC721, vilket gör det enkelt att överföra den till olika konton. -Whoever owns the NFT controls the subgraph. If the owner decides to sell the NFT, or transfer it, they will no longer be able to make edits or updates to that subgraph on the network. +Den som äger NFT:n kontrollerar subgrafen. Om ägaren bestämmer sig för att sälja NFT:n eller överföra den kommer de inte längre kunna göra redigeringar eller uppdateringar av subgrafen på nätverket. -In addition to adding more flexibility to the development lifecycle, this functionality makes certain use cases more convenient, such as moving your control to a multisig or a community member creating it on behalf of a DAO. +Utöver att ge mer flexibilitet till utvecklingslivscykeln, gör den här funktionaliteten vissa användningsfall mer bekväma, som att flytta kontrollen till en multisig eller att en medlem i gemenskapen skapar den på uppdrag av en DAO. -## Viewing your subgraph as an NFT +## Se din subgraf som en NFT -To view your subgraph as an NFT, you can visit an NFT marketplace like OpenSea: +För att se din subgraf som en NFT kan du besöka en NFT-marknadsplats som OpenSea: ``` https://opensea.io/your-wallet-address ``` -Or a wallet explorer like **Rainbow.me**: +Eller en plånboksutforskare som **Rainbow.me**: ``` https://rainbow.me/your-wallet-addres ``` -## Transferring ownership of a subgraph +## Överföring av ägande för en subgraf -To transfer ownership of a subgraph, you can use the UI built into Subgraph Studio: +För att överföra äganderätten för en subgraf kan du använda det inbyggda gränssnittet i Subgraph Studio: -![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) +![Överföring av Ägande för Subgraf](/img/subgraph-ownership-transfer-1.png) -And then choose the address that you would like to transfer the subgraph to: +Välj sedan adressen som du vill överföra subgrafen till: -![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) +![Överföring av Ägande för Subgraf](/img/subgraph-ownership-transfer-2.png) -You can also use the built-in UI of NFT marketplaces like OpenSea: +Du kan också använda det inbyggda gränssnittet i NFT-marknadsplatser som OpenSea: -![Subgraph Ownership Trasfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) +![Överföring av Ägande för Subgraf från NFT-marknadsplats](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/sv/mips-faqs.mdx b/website/pages/sv/mips-faqs.mdx index 73efe82662cb..0613c6c3d7c1 100644 --- a/website/pages/sv/mips-faqs.mdx +++ b/website/pages/sv/mips-faqs.mdx @@ -1,125 +1,127 @@ --- -title: MIPs FAQs +title: Vanliga Frågor om MIPs --- -## Introduction +## Introduktion -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. +> Observera: MIPs-programmet är avslutat sedan maj 2023. Tack till alla Indexers som deltog! -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +Det är en spännande tid att delta i The Graph-ekosystemet! Under [Graph Day 2022](https://thegraph.com/graph-day/2022/) tillkännagav Yaniv Tal [avslutningen av den hostade tjänsten](https://thegraph.com/blog/sunsetting-hosted-service/), ett ögonblick som The Graph-ekosystemet har arbetat mot i många år. -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. +För att stödja avslutningen av den hostade tjänsten och migrationen av all dess aktivitet till det decentraliserade nätverket har The Graph Foundation tillkännagivit [Migration Infrastructure Providers (MIPs) programmet](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. +MIPs-programmet är ett incitamentsprogram för Indexers för att stödja dem med resurser att indexera kedjor bortom Ethereum-huvudnätet och hjälpa The Graph-protokollet att expandera det decentraliserade nätverket till en flerlagers infrastruktur. -### Useful Resources +MIPs-programmet har allokerat 0,75% av GRT-försörjningen (75M GRT), med 0,5% för att belöna Indexers som bidrar till att starta nätverket och 0,25% som tilldelats Network Grants för subgraph-utvecklare som använder flerlags-subgraphs. -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +### Användbara Resurser + +- [Indexer 2ools från Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [Så blir du en effektiv Indexer på The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) - [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +- [Tilldelningsoptimerare](https://github.com/graphprotocol/allocationopt.jl) +- [Verktyg för Tilldelningsoptimering](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? +### 1. Är det möjligt att generera ett giltigt bevis för indexering (POI) även om en subgraph har misslyckats? -Yes, it is indeed. +Ja, det är faktiskt möjligt. -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. +För sammanhang specificerar skiljedomstolsstadgan [läs mer om stadgan här](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) metoden för att generera ett POI för en misslyckad subgraph. -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). +En medlem av communityn, [SunTzu](https://github.com/suntzu93), har skapat ett skript för att automatisera denna process i enlighet med stadgans metodik. Kolla in repositoriet [here](https://github.com/suntzu93/get_valid_poi_subgraph). -### 2. Which chain will the MIPs program incentivise first? +### 2. Vilken kedja kommer MIPs-programmet att incitamenta först? -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. +Den första kedjan som kommer att stödjas på det decentraliserade nätverket är Gnosis Chain! Tidigare känd som xDAI är Gnosis Chain en EVM-baserad kedja. Gnosis Chain valdes som den första med tanke på användarvänlighet för att köra noder, Indexer-readiness, överensstämmelse med The Graph och användning inom web3. -### 3. How will new chains be added to the MIPs program? +### 3. Hur kommer nya kedjor att läggas till i MIPs-programmet? -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. +Nya kedjor kommer att tillkännages under MIPs-programmet, baserat på Indexer-readiness, efterfrågan och communityns inställning. Kedjor kommer först att stödjas på testnätet och sedan kommer en GIP att antas för att stödja den kedjan på huvudnätet. Indexers som deltar i MIPs-programmet kommer att välja vilka kedjor de är intresserade av att stödja och kommer att tjäna belöningar per kedja, utöver att tjäna frågeavgifter och indexbelöningar på nätverket för att betjäna subgraphs. MIPs-deltagare kommer att poängsättas baserat på sin prestation, förmåga att betjäna nätverksbehoven och communitystöd. -### 4. How will we know when the network is ready for a new chain? +### 4. Hur vet vi när nätverket är redo för en ny kedja? -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. +The Graph Foundation kommer att övervaka QoS-prestandamätningar, nätverksprestanda och communitykanaler för att bäst bedöma beredskapen. Prioriteten är att säkerställa att nätverket uppfyller prestandakraven för de multi-chain dapps att kunna migrera sina subgraphs. -### 5. How are rewards divided per chain? +### 5. Hur fördelas belöningar per kedja? -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. +Eftersom kedjor varierar i sina krav för synkronisering av noder och de skiljer sig åt i fråga om frågevolym och användning, kommer belöningar per kedja att beslutas i slutet av den kedjans cykel för att säkerställa att all feedback och lärdomar fångas upp. Dock kommer Indexers när som helst också att kunna tjäna frågeavgifter och indexbelöningar när kedjan stöds på nätverket. -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? +### 6. Behöver vi indexera alla kedjor i MIPs-programmet eller kan vi välja bara en kedja och indexera den? -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. +Du är välkommen att indexera vilken kedja du vill! Målet med MIPs-programmet är att rusta Indexers med verktyg och kunskap att indexera de kedjor de önskar och stödja de web3-ekosystem de är intresserade av. Men för varje kedja finns det faser från testnätet till huvudnätet. Se [MIPs-notionssidan](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) för att lära dig mer om faserna. -### 7. When will rewards be distributed? +### 7. När kommer belöningarna att distribueras? -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. +MIPs-belöningar kommer att distribueras per kedja när prestandamätningarna uppfylls och migrerade subgraphs stöds av de Indexers. Håll utkik efter information om totala belöningar per kedja halvvägs genom den kedjans cykel. -### 8. How does scoring work? +### 8. Hur fungerar poängsättning? -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: +Indexers kommer att tävla om belöningar baserat på poängsättning under hela programmet på ledartavlan. Programmets poängsättning kommer att baseras på: -**Subgraph Coverage** +**Täckning av Subgraph** -- Are you providing maximal support for subgraphs per chain? +- Ger du maximalt stöd för subgraphs per kedja? -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. +- Under MIPs förväntas stora Indexers satsa 50%+ av subgraphs per kedja de stöder. -**Quality Of Service** +**Tjänstekvalitet** -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? +- Tjänar Indexern kedjan med god tjänstekvalitet (latens, färsk data, drifttid, osv.)? -- Is the Indexer supporting dapp developers being reactive to their needs? +- Stöder Indexern dapp-utvecklare genom att vara lyhörd för deras behov? -Is Indexer allocating efficiently, contributing to the overall health of the network? +Allokerar Indexer effektivt och bidrar till nätverkets övergripande hälsa? -**Community Support** +**Stöd till gemenskapen** -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? +- Samarbetar indexeraren med andra indexerare för att hjälpa dem att förbereda sig för multikedjor? -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? +- Ger Indexer feedback till kärnutvecklare genom hela programmet eller delar information med Indexerare i forumet? -### 9. How will the Discord role be assigned? +### 9. Hur kommer Discord-rollen att tilldelas? -Moderators will assign the roles in the next few days. +Moderatorer kommer att tilldela rollerna under de närmaste dagarna. -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? +### 10. Är det okej att starta programmet på ett testnät och sedan byta till Mainnet? Kommer du att kunna identifiera min nod och ta hänsyn till den när du delar ut belöningar? -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. +Ja, det förväntas faktiskt av dig att göra det. Flera faser är på Görli och en är på mainnet. -### 11. At what point do you expect participants to add a mainnet deployment? +### 11. När förväntar du dig att deltagarna lägger till en distribution av ett mainnet? -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +Det kommer att finnas ett krav på att ha en mainnet indexerare under fas 3. Mer information om detta kommer att [delas på denna begreppssida inom kort] \(https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -### 12. Will rewards be subject to vesting? +### 12. Kommer belöningar att bli föremål för intjänande? -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. +Den procentandel som ska distribueras i slutet av programmet kommer att vara föremål för intjänande. Mer information om detta kommer att ges i indexeringsavtalet. -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? +### 13. För lag med mer än en medlem, kommer alla lagmedlemmar att få en MIPs Discord-roll? -Yes +Ja -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? +### 14. Är det möjligt att använda de låsta tokens från grafkuratorprogrammet för att delta i MIPs testnät? -Yes +Ja -### 15. During the MIPs program, will there be a period to dispute invalid POI? +### 15. Kommer det att finnas en period för att bestrida ogiltiga POI under MIPs-programmet? -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation +Som skall beslutas. Vänligen återvänd till den här sidan med jämna mellanrum för mer information om detta eller om din förfrågan är brådskande, vänligen maila info@thegraph.foundation -### 17. Can we combine two vesting contracts? +### 17. Kan vi kombinera två intjänandeavtal? -No. The options are: you can delegate one to the other one or run two separate indexers. +Nej. Alternativen är: du kan delegera en till den andra eller köra två separata indexerare. -### 18. KYC Questions? +### 18. KYC Frågor? -Please email info@thegraph.foundation +Vänligen skicka e-post till info@thegraph.foundation -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? +### 19. Jag är inte redo att indexera Gnosis-kedjan, kan jag hoppa in och börja indexera från en annan kedja när jag är redo? -Yes +Ja -### 20. Are there recommended regions to run the servers? +### 20. Finns det rekommenderade regioner för att köra servrarna? -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. +Vi ger inga rekommendationer om regioner. När du väljer platser kanske du vill tänka på var de stora marknaderna finns för kryptovalutor. -### 21. What is “handler gas cost”? +### 21. Vad är "gasolkostnad för hanterare"? -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. +Det är det deterministiska måttet på kostnaden för att utföra en handler. I motsats till vad namnet kanske antyder är det inte relaterat till gaskostnaden på blockkedjor. diff --git a/website/pages/sv/network/benefits.mdx b/website/pages/sv/network/benefits.mdx index 839a0a7b9cf7..c94c62e684dc 100644 --- a/website/pages/sv/network/benefits.mdx +++ b/website/pages/sv/network/benefits.mdx @@ -1,96 +1,96 @@ --- -title: The Graph Network vs. Self Hosting +title: The Graf Nätverk vs. Egen Värd socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +The Graphs decentraliserade nätverk har utformats och finslipats för att skapa en robust indexering och frågeupplevelse - och det blir bättre för varje dag tack vare tusentals bidragsgivare runt om i världen. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +Fördelarna med detta decentraliserade protokoll kan inte replikeras genom att köra en `graph-node` lokalt. The Graph Nätverk är mer pålitligt, mer effektivt och mindre kostsamt. -Here is an analysis: +Här är en analys: -## Why You Should Use The Graph Network +## Varför Du Bör Använda The Graph Nätverk -- 60-98% lower monthly cost -- $0 infrastructure setup costs -- Superior uptime -- Access to 438 Indexers (and counting) -- 24/7 technical support by global community +- 60-98% lägre månadskostnad +- $0 infrastrukturinstallationskostnader +- Överlägsen drifttid +- Tillgång till hundratals oberoende indexerare runt om i världen +- Teknisk support dygnet runt via global community -## The Benefits Explained +## Fördelarna förklarade -### Lower & more Flexible Cost Structure +### Lägre & Mer Flexibel Kostnadsstruktur -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. +Inga avtal. Inga månadsavgifter. Du betalar bara för de sökningar du använder - med en genomsnittlig kostnad per sökning på 0,0002 USD. Förfrågningar prissätts i USD och betalas i GRT. -Query costs may vary; the quoted cost is the average at time of publication (December 2022). +Frågekostnaderna kan variera; den citerade kostnaden är genomsnittet vid publiceringstidpunkten (december 2022). -## Low Volume User (less than 30,000 queries per month) +## Användare Med Låg Volym (färre än 30,000 frågor per månad) -| Cost Comparison | Self Hosted | Graph Network | +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | ~$15 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | -| Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | ~$15 | - -## Medium Volume User (3,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Månatlig kostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $0+ | ~$15 per månad | +| Konstruktionstid | $400 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | 30 000 (automatisk skalning) | +| Kostnad per fråga | $0 | $0.0005 | +| Infrastruktur | Centraliserad | Decentraliserad | +| Geografisk redundans | $750+ per extra nod | Inkluderat | +| Drifttid | Varierande | 99.9%+ | +| Total Månadskostnad | $750+ | ~$15 | + +## Medium volym användare (3,000,000+ förfrågningar per månad) + +| Kostnadsjämförelse | Egen Värd | Graph Nätverk | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $750 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 3,000,000+ | -| Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $750 | - -## High Volume User (30,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Månadskostnad för server\* | $350 per månad | $0 | +| Kostnad för frågor | $500 per månad | $750 per månad | +| Ingenjörstid | $800 per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | 3,000,000+ | +| Kostnad per fråga | $0 | $0.00025 | +| Infrastruktur | Centraliserad | Decentraliserad | +| Kostnader för ingenjörsarbete | $200 per timme | Inkluderat | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierar | 99.9%+ | +| Total Månadskostnad | $1,650+ | $750 | + +## Användare Med Hög Volym (30,000,000+ frågor per månad) + +| Kostnadsjämförelse | Egen Värd | The Graph Nätverk | | :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $4,500 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000,000+ | -| Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $4,500 | +| Månadskostnad för server\* | $1100 per månad, per nod | $0 | +| Kostnad för frågor | $4000 | $4,500 per månad | +| Antal noder som behövs | 10 | Ej tillämpligt | +| Ingenjörstid | $6,000 eller mer per månad | Ingen, inbyggd i nätverket med globalt distribuerade Indexers | +| Frågor per månad | Begränsad till infra kapacitet | 30,000,000+ | +| Kostnad per fråga | $0 | $0.00015 | +| Infrastruktur | Centraliserad | Decentraliserad | +| Geografisk redundans | $1,200 i totala kostnader per extra nod | Inkluderat | +| Drifttid | Varierar | 99.9%+ | +| Total Månadskostnad | $11,000+ | $4,500 | -\*including costs for backup: $50-$100 per month +\*inklusive kostnader för backup: $50-$100 per månad -Engineering time based on $200 per hour assumption +Ingenjörstid baserad på antagandet $200 per timme -using the max query budget function in the budget billing tab, while maintaining high quality of service +använder max frågebudgetfunktionen i budgetfakturan, samtidigt som hög servicekvalitet bibehålls -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. +Uppskattade kostnader gäller endast för Ethereum Mainnet subgrafer - kostnaderna är ännu högre när en `graph-node` själv värd på andra nätverk. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Att kurera signal på en subgraf är en valfri engångskostnad med noll nettokostnad (t.ex. $1k i signal kan kurera på en subgraf och senare dras tillbaka - med potential att tjäna avkastning i processen). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Vissa användare kan behöva uppdatera sin subgraf till en ny version. På grund av Ethereum gasavgifter kostar en uppdatering ~$50 vid skrivande stund. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +Observera att gasavgifterna på [Arbitrum](/arbitrum/arbitrum-faq) är betydligt lägre än på Ethereum-mainnet. -## No Setup Costs & Greater Operational Efficiency +## Inga Installationskostnader & Ökad Driftseffektivitet -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Inga installationsavgifter. Kom igång direkt utan installations- eller overheadkostnader. Inga hårdvarukrav. Inga avbrott på grund av centraliserad infrastruktur och mer tid att fokusera på din kärnprodukt. Ingen nödvändighet för backup-servrar, felsökning eller dyra ingenjörsresurser. -## Reliability & Resiliency +## Tillförlitlighet & Motståndskraft -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graphs decentraliserade nätverk ger användare tillgång till geografisk redundans som inte existerar när man själv-hostar en `graph-node`. Förfrågningar betjänas pålitligt tack vare en drifttid på 99,9% eller mer, uppnådd av hundratals oberoende Indexers som säkrar nätverket globalt. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +Sammanfattningsvis är The Graph Nätverk mindre kostsamt, enklare att använda och ger överlägsna resultat jämfört med att köra en `graph-node` lokalt. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Börja använda The Graph Network idag och lär dig hur du [uppgraderar din subgraf till The Graphs decentraliserade nätverk](/cookbook/upgrading-a-subgraph). diff --git a/website/pages/sv/network/curating.mdx b/website/pages/sv/network/curating.mdx index 797d9b9dd896..9f408a71eb4d 100644 --- a/website/pages/sv/network/curating.mdx +++ b/website/pages/sv/network/curating.mdx @@ -1,96 +1,96 @@ --- -title: Curating +title: Kuratering --- -Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. +Kuratorer är avgörande för The Graphs decentraliserade ekonomi. De använder sin kunskap om web3-ekosystemet för att bedöma och signalera vilka subgrafer som bör indexeras av The Graph Network. Genom Explorer kan kuratorer se nätverksdata för att fatta signaleringsbeslut. The Graph Network belönar kuratorer som signalerar på subgrafer av god kvalitet med en del av de avgifter för frågor som subgrafer genererar. Kuratorer har ekonomiska incitament att signalera tidigt. Dessa signaler från kuratorer är viktiga för Indexers, som sedan kan bearbeta eller indexera data från dessa signalerade subgrafer. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +Vid signalering kan kuratorer välja att signalera på en specifik version av subgrafen eller signalera med automatisk migrering. Vid signalering med automatisk migrering kommer en kurators andelar alltid att migreras till den senaste versionen som utvecklaren publicerar. Om du väljer att signalera på en specifik version kommer andelarna alltid att stanna på denna specifika version. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Kom ihåg att kurering är riskabelt. Vänligen gör din flit för att se till att du kurerar på subgrafer du litar på. Att skapa en subgraf är utan tillåtelse, så människor kan skapa subgrafer och kalla dem vilket namn de vill. För mer vägledning om kurationsrisker, kolla in [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) -## Bonding Curve 101 +## Bindningskurva 101 -First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. +Först tar vi ett steg tillbaka. Varje subgraf har en bindningskurva på vilken kuratorandelar präglas när en användare lägger in signal **i** kurvan. Varje subgrafs bindningskurva är unik. Bindningskurvorna är utformade så att priset för att prägla en kuratorandel på en subgraf ökar linjärt, över antalet präglade andelar. -![Price per shares](/img/price-per-share.png) +![Pris per andel](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +Som ett resultat ökar priset linjärt, vilket innebär att det blir dyrare att köpa en andel över tiden. Här är ett exempel på vad vi menar, se bindningskurvan nedan: -![Bonding curve](/img/bonding-curve.png) +![Bindningskurva](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Låt oss säga att vi har två kuratorer som präglar andelar för en subgraf: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. -- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. +- Kurator A är den första att signalera på subgrafen. Genom att lägga till 120 000 GRT i kurvan kan de prägla 2000 andelar. +- Kurator B signalerar på subgrafen vid någon tidpunkt senare. För att få samma antal andelar som Kurator A skulle de behöva lägga till 360 000 GRT i kurvan. +- Eftersom båda kuratorerna har hälften av det totala antalet kuratorandelar skulle de få lika mycket kuratorersättning. +- Om någon av kuratorerna nu skulle bränna sina 2000 kuratorandelar skulle de få 360 000 GRT. +- Den återstående kuratorn skulle nu få all kuratorersättning för den subgrafen. Om de brände sina andelar för att ta ut GRT skulle de få 120 000 GRT. +- **TLDR:** GRT-värderingen av kuratorandelar bestäms av bindningskurvan och kan vara volatil. Det finns potential att ådra sig stora förluster. Att signalera tidigt innebär att du satsar mindre GRT för varje andel. Detta innebär i förlängningen att du tjänar mer kuratorersättning per GRT än senare kuratorer för samma subgraf. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +I allmänhet är en bindningskurva en matematisk kurva som definierar förhållandet mellan tokensupply och tillgångspris. I fallet med subgrafkurering **ökar priset på varje subgrafandel med varje investerad token** och **priset på varje andel minskar med varje såld token.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +I fallet med The Graph används [Bancors implementation av en bindningskurvformel](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA). -## How to Signal +## Hur man Signaliserar -Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) +Nu när vi har täckt grunderna om hur bindningskurvan fungerar, här är hur du går tillväga för att signalera på en subgraf. Inom Kuratorfliken på Graf Explorer kan kuratorer signalera och osignalera på vissa subgrafer baserat på nätverksstatistik. För en steg-för-steg-översikt över hur du gör detta i Explorer, [klicka här.](/network/explorer) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +En kurator kan välja att signalera på en specifik subgrafversion, eller så kan de välja att ha sin signal automatiskt migrerad till den nyaste produktionsversionen av den subgrafen. Båda är giltiga strategier och har sina egna för- och nackdelar. -Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Att signalera på en specifik version är särskilt användbart när en subgraf används av flera dApps. En dApp kan behöva regelbundet uppdatera subgrafen med nya funktioner. En annan dApp kan föredra att använda en äldre, väldokumenterad subgrafversion. Vid initial kurering åläggs en standardavgift på 1%. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Att ha din signal automatiskt migrerad till den nyaste produktionsversionen kan vara värdefullt för att säkerställa att du fortsätter att ackumulera frågeavgifter. Varje gång du signalerar åläggs en kuratoravgift på 1%. Du kommer också att betala en kuratoravgift på 0,5% vid varje migration. Subgrafutvecklare uppmanas att inte publicera nya versioner för ofta - de måste betala en kuratoravgift på 0,5% på alla automatiskt migrerade kuratorandelar. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Observera**: Den första adressen som signalerar en viss subgraf betraktas som den första kuratorn och kommer att behöva utföra mycket mer gasintensivt arbete än de övriga följande kuratorerna eftersom den första kuratorn initierar kuratorandelstoken, initierar bindningskurvan och överför även tokens till Graf-proxy. -## What does Signaling mean for The Graph Network? +## Vad innebär Signalering för The Graf Nätverk? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +För att slutanvändare ska kunna ställa frågor till en subgraf måste subgrafen först indexeras. Indexering är en process där filer, data och metadata granskas och sedan indexeras så att resultat kan hittas snabbare. För att en subgrafs data ska kunna sökas behöver den organiseras. -And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. +Så om Indexers skulle behöva gissa vilka subgrafer de ska indexera skulle det vara en låg chans att de skulle tjäna pålitliga frågeavgifter eftersom de inte skulle ha något sätt att validera vilka subgrafer som är av god kvalitet. Där kommer kureringen in. -Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! +Kuratorer gör The Graf-nätverket effektivt och signalering är det förfarande som kuratorer använder för att låta Indexers veta att en subgraf är bra att indexera, där GRT läggs till en bindningskurva för en subgraf. Indexers kan intrinsiskt lita på signalen från en kurator eftersom kuratorer vid signalering präglar en kuratorandel för subgrafen, vilket ger dem rätt till en del av de framtida frågeavgifterna som subgrafen genererar. Kuratorsignalen representeras som ERC20-tokens kallade Graph Curation Shares (GCS). Kuratorer som vill tjäna mer frågeavgifter bör signalera sitt GRT till subgrafer som de förutspår kommer att generera en stark ström av avgifter till nätverket. Kuratorer kan inte drabbas av negativ påverkan för dåligt beteende, men det finns en insättningsavgift för kuratorer för att motverka dåliga beslut som kan skada nätverkets integritet. Kuratorer tjänar också färre frågeavgifter om de väljer att kurera på en subgraf av låg kvalitet eftersom det kommer att finnas färre frågor att behandla eller färre Indexers att behandla dessa frågor. Se diagrammet nedan! -![Signaling diagram](/img/curator-signaling.png) +![Signaleringsschema](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +Indexers kan hitta subgrafer att indexera baserat på kuratorsignaler de ser i The Graph Explorer (screenshot nedan). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Utforska subgrafer](/img/explorer-subgraphs.png) -## Risks +## Risker -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. -3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. Frågemarknaden är i grunden ung på The Graph och det finns en risk att din %APY kan vara lägre än du förväntar dig på grund av tidiga marknadsmekanik. +2. Kuratoravgift - när en kurator signalerar GRT på en subgraf åläggs de en kuratoravgift på 1%. Denna avgift bränns och resten sätts in i reservförsörjningen för bindningskurvan. +3. När kuratorer bränner sina andelar för att ta ut GRT minskas GRT-värderingen av de återstående andelarna. Var medveten om att i vissa fall kan kuratorer besluta att bränna sina andelar **på en gång**. Denna situation kan vara vanlig om en dApp-utvecklare slutar versionera/förbättra och ställa frågor på sin subgraf eller om en subgraf misslyckas. Som ett resultat kan återstående kuratorer bara ta ut en bråkdel av sitt ursprungliga GRT. För en nätverksroll med en lägre riskprofil, se [Delegatorer](/network/delegating). +4. En subgraf kan misslyckas på grund av en bugg. En misslyckad subgraf genererar inte frågeavgifter. Som ett resultat måste du vänta tills utvecklaren rättar felet och distribuerar en ny version. + - Om du prenumererar på den nyaste versionen av en subgraf kommer dina andelar automatiskt att migreras till den nya versionen. Detta kommer att medföra en kuratoravgift på 0,5%. + - Om du har signalerat på en specifik subgrafversion och den misslyckas måste du manuellt bränna dina kuratorandelar. Observera att du kan få mer eller mindre GRT än du ursprungligen satt in i kuraturkurvan, vilket är en risk som är förknippad med att vara kurator. Du kan sedan signalera på den nya subgrafversionen och därmed åläggs en kuratoravgift på 1%. -## Curation FAQs +## Kurations-FAQ -### 1. What % of query fees do Curators earn? +### 1. Vilken % av frågeavgifterna tjänar Kuratorer? -By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Genom att signalera på en subgraf kommer du att tjäna en del av alla de frågeavgifter som denna subgraf genererar. 10% av alla frågeavgifter går till Kuratorerna pro rata till deras kuratorandelar. Dessa 10% är föremål för styrning. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Hur bestämmer jag vilka subgrafer av hög kvalitet att signalera på? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Att hitta högkvalitativa subgrafer är en komplex uppgift, men den kan närmas på många olika sätt. Som kurator vill du leta efter pålitliga subgrafer som genererar frågevolym. En pålitlig subgraf kan vara värdefull om den är komplett, korrekt och stöder en dApps datamässiga behov. En dåligt utformad subgraf kan behöva revideras eller publiceras på nytt och kan också misslyckas. Det är avgörande för kuratorer att granska en subgrafs arkitektur eller kod för att bedöma om en subgraf är värdefull. Som ett resultat: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Kuratorer kan använda sin förståelse för nätverket för att försöka förutsäga hur en enskild subgraf kan generera en högre eller lägre frågevolym i framtiden +- Kuratorer bör också förstå de metriker som finns tillgängliga via The Graph Explorer. Metriker som tidigare frågevolym och vem subgrafutvecklaren är kan hjälpa till att avgöra om en subgraf är värd att signalera på. -### 3. What’s the cost of updating a subgraph? +### 3. Vad kostar det att uppdatera en subgraf? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +Att migrera dina kuratorandelar till en ny subgrafversion åläggs en kuratoravgift på 1%. Kuratorer kan välja att prenumerera på den nyaste versionen av en subgraf. När kuratorandelar automatiskt migreras till en ny version kommer kuratorer också att betala en halv kuratoravgift, dvs. 0,5%, eftersom uppdatering av subgrafer är en on-chain-åtgärd som kostar gas. -### 4. How often can I update my subgraph? +### 4. Hur ofta kan jag uppdatera min subgraf? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Det föreslås att du inte uppdaterar dina subgrafer för ofta. Se frågan ovan för mer information. -### 5. Can I sell my curation shares? +### 5. Kan jag sälja mina kuratorandelar? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. +Kuratorandelar kan inte "köpas" eller "säljas" som andra ERC20-tokens som du kanske är bekant med. De kan endast präglas (skapade) eller brännas (förstöras) längs bindningskurvan för en särskild subgraf. Den mängd GRT som behövs för att prägla en ny signal och den mängd GRT du får när du bränner din befintliga signal bestäms av den bindningskurvan. Som kurator måste du veta att när du bränner dina kuratorandelar för att ta ut GRT kan du sluta med mer eller mindre GRT än du ursprungligen satt in. -Still confused? Check out our Curation video guide below: +Fortfarande förvirrad? Kolla in vår videohandledning om kurering nedan: diff --git a/website/pages/sv/network/delegating.mdx b/website/pages/sv/network/delegating.mdx index 4a6d6e00b73e..27bad461386c 100644 --- a/website/pages/sv/network/delegating.mdx +++ b/website/pages/sv/network/delegating.mdx @@ -1,98 +1,98 @@ --- -title: Delegating +title: Delegera --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +Delegater är nätverksdeltagare som delegerar (dvs. "satsar") GRT till en eller flera Indexers. Delegater bidrar till att säkra nätverket utan att själva köra en Graf Node. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +Genom att delegera till en indexerare tjänar delegatorer en del av indexerarens frågeavgifter och belöningar. Mängden frågor en indexerare kan behandla beror på indexerarens egen (och delegerade) insats och priset indexeraren tar ut för varje fråga, så ju mer insats som tilldelas en indexerare, desto fler potentiella frågor kan de bearbeta. -## Delegator Guide +## Delegateringsguide -This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: +Denna guide förklarar hur man blir en effektiv Delegater i Graf-nätverket. Delegater delar intäkterna från protokollet tillsammans med alla Indexers baserat på deras delegerade insats. En Delegater måste använda sitt bästa omdöme för att välja Indexers baserat på flera faktorer. Observera att denna guide inte kommer att gå igenom steg som att ställa in Metamask korrekt, eftersom den informationen är allmänt tillgänglig på internet. Det finns tre avsnitt i denna guide: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator -- A video guide showing the steps to delegate in the Graph Network UI +- Riskerna med att delegera tokens i The Graph-nätverket +- Hur man beräknar förväntad avkastning som Delegater +- En videohandledning som visar stegen för att delegera i Graf Nätverk UI -## Delegation Risks +## Delegationsrisker -Listed below are the main risks of being a Delegator in the protocol. +Här nedan listas huvudriskerna med att vara en Delegater i protokollet. -### The delegation tax +### Delegationsavgiften -Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. +Delegater kan inte "slashas" för dåligt beteende, men det finns en avgift för Delegater för att avskräcka dåligt beslutsfattande som kan skada nätverkets integritet. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +Det är viktigt att förstå att varje gång du delegerar kommer du att debiteras 0,5%. Detta innebär att om du delegerar 1000 GRT bränner du automatiskt 5 GRT. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Det innebär att en Delegater säkert bör beräkna vad deras avkastning kommer att vara genom att delegera till en Indexer. Till exempel kan en Delegater beräkna hur många dagar det kommer att ta innan de har tjänat tillbaka 0,5% avgiften för deras delegation. -### The delegation unbonding period +### Perioden för upphävande av delegering -Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. +När som helst en Delegater vill upphäva sin delegering är deras tokens föremål för en upphävningsperiod på 28 dagar. Det innebär att de inte kan överföra sina tokens eller tjäna några belöningar under 28 dagar. -One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. +En sak att tänka på är också att välja en Indexer klokt. Om du väljer en Indexer som inte var pålitlig eller inte gjorde ett bra jobb, kommer du att vilja upphäva, vilket innebär att du kommer att förlora många möjligheter att tjäna belöningar, vilket kan vara lika dåligt som att bränna GRT.
    - ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + ![Delegation upphävning](/img/Delegation-Upphävning.png) _Observera avgiften på 0,5% i Delegation UI, samt den 28 + dagar långa upphävningsperioden._
    -### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Att välja en pålitlig Indexer med en rättvis belöningsutbetalning till Delegater -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Detta är en viktig del att förstå. Först låt oss diskutera tre mycket viktiga värden, vilka är Delegationsparametrar. -Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. +Indexing Reward Cut - Indexing reward cut är den del av belöningarna som Indexer kommer att behålla för sig själva. Det betyder om den är inställd på 100%, som en Delegater kommer du att få 0 indexing rewards. Om du ser 80% i UI, innebär det att du som Delegater kommer att få 20%. En viktig notering - i början av nätverket kommer Indexing Rewards att utgöra majoriteten av belöningarna.
    - ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Indexing Edward Cut](/img/Indexing-Edward-Cut.png) *Den översta Indexet ger Delegater 90% av belöningarna. Den + mellersta ger Delegater 20%. Den nedersta ger Delegater ~83%.*
    -- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Query Fee Cut - Detta fungerar precis som Indexing Reward Cut. Detta gäller dock för avkastningen på frågebetalningar som Indexer samlar in. Det bör noteras att i början av nätverket kommer avkastningen från frågebetalningar att vara mycket små jämfört med indexeringsbelöningen. Det rekommenderas att uppmärksamma nätverket för att avgöra när frågebetalningarna i nätverket kommer att börja vara mer betydande. -As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. +Som du kan se krävs det mycket tanke som måste gå in i att välja rätt Indexer. Detta är varför vi starkt rekommenderar att du utforskar The Graf Discord för att avgöra vilka Indexers som har bästa sociala rykte och tekniska rykte för att belöna Delegater konsekvent. Många av Indexers är mycket aktiva på Discord och kommer att vara glada att besvara dina frågor. Många av dem har indexerat i månader på testnätet och gör sitt bästa för att hjälpa Delegater att tjäna en god avkastning, eftersom det förbättrar nätverkets hälsa och framgång. -### Calculating Delegators expected return +### Beräkning av Delegaters förväntade avkastning -A Delegator has to consider a lot of factors when determining the return. These include: +En Delegater måste överväga många faktorer när de bestämmer avkastningen. Dessa inkluderar: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- En teknisk Delegater kan också titta på Indexer's förmåga att använda de Delegerade tokens som är tillgängliga för dem. Om en Indexer inte allokerar alla tillgängliga tokens tjänar de inte maximal vinst de kunde för sig själva eller sina Delegater. +- Just nu i nätverket kan en Indexer välja att stänga en allokering och samla in belöningar när som helst mellan 1 och 28 dagar. Så det är möjligt att en Indexer har många belöningar som de ännu inte har samlat in, och därmed är deras totala belöningar låga. Detta bör beaktas i de tidiga dagarna. -### Considering the query fee cut and indexing fee cut +### Att överväga frågebetalningsavgiften och indexeringsavgiften -As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: +Som beskrivs i de ovanstående avsnitten bör du välja en Indexer som är öppen och ärlig om att sätta sina frågebetalningsavgifter och indexeringsavgifter. En Delegater bör också titta på Parametrarnas Kylningstid för att se hur mycket tidsskydd de har. Efter att detta är gjort är det ganska enkelt att beräkna den mängd belöningar som Delegaterna får. Formeln är: -![Delegation Image 3](/img/Delegation-Reward-Formula.png) +![Delegering Bild 3](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### Att överväga Indexer's delegeringspool -Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: +En annan sak som en Delegater måste överväga är vilken proportion av Delegationspoolen de äger. Alla delegationsbelöningar delas jämnt, med en enkel omviktning av poolen som avgörs av det belopp som Delegaterna har deponerat i poolen. Det ger Delegaterna en andel av poolen: -![Share formula](/img/Share-Forumla.png) +![Dela formel](/img/Share-Forumla.png) -Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. +Genom att använda denna formel kan vi se att det faktiskt är möjligt för en Indexer som erbjuder endast 20% till Delegater att faktiskt ge Delegater en ännu bättre belöning än en Indexer som ger 90% till Delegater. -A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. +En Delegater kan därför göra matematiska beräkningar för att fastställa att Indexer som erbjuder 20% till Delegater erbjuder en bättre avkastning. -### Considering the delegation capacity +### Att överväga delegeringskapaciteten -Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. +En annan sak att överväga är delegeringskapaciteten. För närvarande är Delegationsförhållandet inställt på 16. Det innebär att om en Indexer har satsat 1 000 000 GRT är deras Delegationskapacitet 16 000 000 GRT av Delegerade tokens som de kan använda i protokollet. Alla delegerade tokens över denna mängd kommer att utspäda alla Delegaternas belöningar. -Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. +Tänk dig att en Indexer har 100 000 000 GRT delegerade till dem och deras kapacitet är endast 16 000 000 GRT. Detta innebär effektivt att 84 000 000 GRT-tokens inte används för att tjäna tokens. Och alla Delegater, och Indexer, tjänar mycket mindre belöningar än de kunde. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Därför bör en Delegater alltid överväga Indexer's Delegationskapacitet och faktor detta in i deras beslutsfattande. -## Delegator FAQs and Bugs +## Delegaters FAQ och buggar -### MetaMask "Pending Transaction" Bug +### Metamask "Väntande transaktion" bugg -**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** +**När jag försöker delegera visas min transaktion i MetaMask som "Väntande" eller "I kö" längre än förväntat. Vad ska jag göra?** -At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. +Ibland kan försök att delegera till indexers via MetaMask misslyckas och resultera i långvariga perioder av "Väntande" eller "I kö" -transaktionsförsök. Till exempel kan en användare försöka delegera med en otillräcklig gasavgift i förhållande till de nuvarande priserna, vilket resulterar i att transaktionsförsöket visas som "Väntande" i deras MetaMask-plånbok i 15+ minuter. När detta inträffar kan efterföljande transaktioner försökas av en användare, men dessa kommer inte att bearbetas förrän den ursprungliga transaktionen bryts, eftersom transaktioner för en adress måste bearbetas i ordning. I sådana fall kan dessa transaktioner avbrytas i MetaMask, men transaktionsförsöken kommer att kosta gasavgifter utan någon garanti att efterföljande försök kommer att lyckas. En enklare lösning på denna bugg är att starta om webbläsaren (t.ex. använda "abort:restart" i adressfältet), vilket kommer att avbryta alla tidigare försök utan att gas subtraheras från plånboken. Flera användare som har stött på detta problem och har rapporterat framgångsrika transaktioner efter att ha startat om sin webbläsare och försökt att delegera. -## Video guide for the network UI +## Videoguide för nätverks-UI -This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. +Denna guide ger en fullständig översikt över detta dokument och hur man överväger allt i detta dokument vid interaktion med UI. diff --git a/website/pages/sv/network/developing.mdx b/website/pages/sv/network/developing.mdx index 9c543348259d..75a9740f2ad3 100644 --- a/website/pages/sv/network/developing.mdx +++ b/website/pages/sv/network/developing.mdx @@ -1,53 +1,53 @@ --- -title: Developing +title: Utveckling --- -Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. +Utvecklare utgör efterfrågesidan av The Graph-ekosystemet. Utvecklare bygger undergrafer och publicerar dem på The Graph Nätverk. Därefter frågar de levande undergrafer med GraphQL för att driva sina applikationer. -## Subgraph Lifecycle +## Subgrafens Livscykel -Subgraphs deployed to the network have a defined lifecycle. +Undergrafer som distribueras till nätverket har en definierad livscykel. -### Build locally +### Bygg lokalt -As with all subgraph development, it starts with local development and testing. Developers can use the same local setup whether they are building for The Graph Network, the hosted service or a local Graph Node, leveraging `graph-cli` and `graph-ts` to build their subgraph. Developers are encouraged to use tools such as [Matchstick](https://github.com/LimeChain/matchstick) for unit testing to improve the robustness of their subgraphs. +Precis som med all subgrafutveckling börjar det med lokal utveckling och testning. Utvecklare kan använda samma lokala uppsättning oavsett om de bygger för The Graph Nätverk, den värdade tjänsten eller en lokal Graph Node, genom att använda `graph-cli` och `graph-ts` för att bygga sin subgraf. Utvecklare uppmuntras att använda verktyg som [Matchstick](https://github.com/LimeChain/matchstick) för enhetstestning för att förbättra robustheten hos sina subgrafer. -> There are certain constraints on The Graph Network, in terms of feature and network support. Only subgraphs on [supported networks](/developing/supported-networks) will earn indexing rewards, and subgraphs which fetch data from IPFS are also not eligible. +> Det finns vissa begränsningar på The Graf Nätverk, i termer av funktioner och nätverksstöd. Endast subgrafer på [stödda nätverk](/developing/supported-networks) kommer att tjäna indexbelöningar, och subgrafer som hämtar data från IPFS är heller inte kvalificerade. -### Deploy to the Subgraph Studio +### Distribuera till Subgraf Studio -Once defined, the subgraph can be built and deployed to the [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). The Subgraph Studio is a sandbox environment which will index the deployed subgraph and make it available for rate-limited development and testing. This gives developers an opportunity to verify that their subgraph does not encounter any indexing errors, and works as expected. +När subgrafen är definierad kan den byggas och distribueras till [Subgraf Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). Subgraph Studio är en sandlådemiljö som kommer att indexera den distribuerade subgrafen och göra den tillgänglig för utveckling och testning med begränsad hastighet. Detta ger utvecklare möjlighet att verifiera att deras subgraf inte stöter på några indexeringsfel och fungerar som förväntat. -### Publish to the Network +### Publicera till Nätverket -When the developer is happy with their subgraph, they can publish it to The Graph Network. This is an on-chain action, which registers the subgraph so that it is discoverable by Indexers. Published subgraphs have a corresponding NFT, which is then easily transferable. The published subgraph has associated metadata, which provides other network participants with useful context and information. +När utvecklaren är nöjd med sin subgraf kan de publicera den på The Graf Nätverk. Detta är en on-chain-åtgärd, som registrerar subgrafen så att den kan upptäckas av Indexers. Publicerade subgrafer har en motsvarande NFT, som sedan kan överföras enkelt. Den publicerade subgrafen har associerad metadata, som ger andra nätverksdeltagare användbar sammanhang och information. -### Signal to Encourage Indexing +### Signal för Att Främja Indexering -Published subgraphs are unlikely to be picked up by Indexers without the addition of signal. Signal is locked GRT associated with a given subgraph, which indicates to Indexers that a given subgraph will receive query volume, and also contributes to the indexing rewards available for processing it. Subgraph developers will generally add signal to their subgraph, in order to encourage indexing. Third party Curators may also signal on a given subgraph, if they deem the subgraph likely to drive query volume. +Publicerade subgrafer kommer troligen inte att plockas upp av Indexers utan tillsats av signal. Signal är låst GRT som är associerat med en given subgraf, vilket indikerar för Indexers att en given subgraf kommer att få frågevolym och bidrar också till de indexbelöningar som är tillgängliga för att bearbeta den. Subgrafutvecklare lägger vanligtvis till signal i sin subgraf för att främja indexering. Tredje part Curators kan också signalera på en given subgraf om de anser att subgrafen sannolikt kommer att generera frågevolym. -### Querying & Application Development +### Frågor & Applikationsutveckling -Once a subgraph has been processed by Indexers and is available for querying, developers can start to use the subgraph in their applications. Developers query subgraphs via a gateway, which forwards their queries to an Indexer who has processed the subgraph, paying query fees in GRT. +När en subgraf har bearbetats av Indexers och är tillgänglig för frågor kan utvecklare börja använda subgrafen i sina applikationer. Utvecklare frågar subgrafer via en gateway, som vidarebefordrar deras frågor till en Indexer som har bearbetat subgrafen och betalar frågeavgifter i GRT. -In order to make queries, developers must generate an API key, which can be done in the Subgraph Studio. This API key must be funded with GRT, in order to pay query fees. Developers can set a maximum query fee, in order to control their costs, and limit their API key to a given subgraph or origin domain. The Subgraph Studio provides developers with data on their API key usage over time. +För att kunna göra frågor måste utvecklare generera en API-nyckel, vilket kan göras i Subgraf Studio. Denna API-nyckel måste finansieras med GRT för att betala frågeavgifter. Utvecklare kan ange en maximal frågeavgift för att kontrollera sina kostnader och begränsa sin API-nyckel till en given subgraf eller ursprungsdomän. Subgraf Studio ger utvecklare data om deras API-nyckelanvändning över tiden. -Developers are also able to express an Indexer preference to the gateway, for example preferring Indexers whose query response is faster, or whose data is most up to date. These controls are set in the Subgraph Studio. +Utvecklare kan också uttrycka ett Indexer-förslag till gatewayen, till exempel att föredra Indexers vars frågesvar är snabbare eller vars data är mest aktuell. Dessa kontroller är inställda i Subgraf Studio. -### Updating Subgraphs +### Uppdatering av Subgrafer -After a time a subgraph developer may want to update their subgraph, perhaps fixing a bug or adding new functionality. The subgraph developer may deploy new version(s) of their subgraph to the Subgraph Studio for rate-limited development and testing. +Efter en tid kan en subgrafutvecklare vilja uppdatera sin subgraf, kanske fixa en bugg eller lägga till ny funktionalitet. Subgrafutvecklaren kan deploya nya versioner av sin subgraf till Subgraf Studio för utveckling och testning med begränsad hastighet. -Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +När Subgrafutvecklaren är redo att uppdatera kan de initiera en transaktion för att peka sin subgraf till den nya versionen. Att uppdatera subgrafen migrerar all signal till den nya versionen (förutsatt att användaren som tillämpade signalen valde "auto-migrera"), vilket också medför en migrationsavgift. Denna signalmigration bör få Indexers att börja indexera den nya versionen av subgrafen, så den borde snart bli tillgänglig för frågor. -### Deprecating Subgraphs +### Avveckling av Subgrafer -At some point a developer may decide that they no longer need a published subgraph. At that point they may deprecate the subgraph, which returns any signalled GRT to the Curators. +Vid någon punkt kan en utvecklare besluta att de inte längre behöver en publicerad subgraf. Vid den tidpunkten kan de avveckla subgrafen, vilket returnerar all signalerad GRT till Curators. -### Diverse Developer Roles +### Olika Utvecklarroller -Some developers will engage with the full subgraph lifecycle on the network, publishing, querying and iterating on their own subgraphs. Some may be focused on subgraph development, building open APIs which others can build on. Some may be application focused, querying subgraphs deployed by others. +Vissa utvecklare kommer att engagera sig i hela subgrafens livscykel på nätverket, publicera, fråga och iterera på sina egna subgrafer. Vissa kanske fokuserar på subgrafutveckling, bygger öppna API: er som andra kan bygga på. Vissa kan vara applikationsinriktade och fråga subgrafer som har distribuerats av andra. -### Developers and Network Economics +### Utvecklare och Nätverksekonomi -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. +Utvecklare är en nyckelaktör i nätverket ekonomiskt sett, låser upp GRT för att främja indexering och viktigast av allt, frågar subgrafer, vilket är nätverkets primära värdeutbyte. Subgrafutvecklare bränner också GRT varje gång en subgraf uppdateras. diff --git a/website/pages/sv/network/explorer.mdx b/website/pages/sv/network/explorer.mdx index b3a549900b83..b65008196f49 100644 --- a/website/pages/sv/network/explorer.mdx +++ b/website/pages/sv/network/explorer.mdx @@ -1,203 +1,203 @@ --- -title: Graph Explorer +title: Graf Utforskaren --- -Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): +Välkommen till Graf Utforskaren, eller som vi gillar att kalla det, din decentraliserade portal till subgraffar och nätverksdata. 👩🏽‍🚀 Graf Utforskaren består av flera delar där du kan interagera med andra subgraffutvecklare, dapputvecklare, kuratorer, indexerare och delegater. För en generell översikt av Graf Utforskaren, kolla in videon nedan (eller fortsätt läsa nedan): -## Subgraphs +## Subgraffar -First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. +Först och främst, om du precis har avslutat att distribuera och publicera din subgraff i Subgraffstudio, är fliken Subgraffar längst upp på navigationsfältet platsen för att se dina egna färdiga subgraffar (och andras subgraffar) på det decentraliserade nätverket. Här kan du hitta den exakta subgraffen du letar efter baserat på skapelsedatum, signalbelopp eller namn. -![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) +![Utforskaren Bild 1](/img/Subgraphs-Explorer-Landing.png) -When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. +När du klickar in på en subgraff kan du testa frågor i lekplatsen och använda nätverksinformation för att fatta informerade beslut. Du kommer också att kunna signalera GRT på din egen subgraff eller andra subgraffar för att göra indexerare medvetna om dess vikt och kvalitet. Detta är avgörande eftersom signalering på en subgraff uppmuntrar den att indexeras, vilket innebär att den kommer att synas på nätverket för att så småningom utföra frågor. -![Explorer Image 2](/img/Subgraph-Details.png) +![Utforskaren Bild 2](/img/Subgraph-Details.png) -On each subgraph’s dedicated page, several details are surfaced. These include: +På varje dedikerad sida för subgraff visas flera detaljer, inklusive: -- Signal/Un-signal on subgraphs -- View more details such as charts, current deployment ID, and other metadata -- Switch versions to explore past iterations of the subgraph -- Query subgraphs via GraphQL -- Test subgraphs in the playground -- View the Indexers that are indexing on a certain subgraph -- Subgraph stats (allocations, Curators, etc) -- View the entity who published the subgraph +- Signalera/Sluta signalera på subgraffar +- Visa mer detaljer som diagram, aktuell distributions-ID och annan metadata +- Växla versioner för att utforska tidigare iterationer av subgraffen +- Fråga subgraffar via GraphQL +- Testa subgraffar i lekplatsen +- Visa indexerare som indexerar på en viss subgraff +- Subgraffstatistik (tilldelningar, kuratorer, etc.) +- Visa enheten som publicerade subgraffen -![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) +![Utforskaren Bild 3](/img/Explorer-Signal-Unsignal.png) -## Participants +## Deltagare -Within this tab, you’ll get a bird’s eye view of all the people that are participating in the network activities, such as Indexers, Delegators, and Curators. Below, we’ll go into an in-depth review of what each tab means for you. +Inom den här fliken får du en översikt över alla personer som deltar i nätverksaktiviteter, såsom indexerare, delegater och kuratorer. Nedan går vi igenom vad varje flik innebär för dig. -### 1. Indexers +### 1. Indexerare -![Explorer Image 4](/img/Indexer-Pane.png) +![Utforskaren Bild 4](/img/Indexer-Pane.png) -Let’s start with the Indexers. Indexers are the backbone of the protocol, being the ones that stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. In the Indexers table, you’ll be able to see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made off of query fees and indexing rewards. Deep dives below: +Låt oss börja med indexerare. Indexerare är ryggraden i protokollet och de satsar på subgraffar, indexerar dem och serverar frågor till alla som konsumerar subgraffar. I indexerarens tabell kan du se indexerarens delegeringsparametrar, deras insats, hur mycket de har satsat på varje subgraff och hur mycket intäkter de har tjänat på frågeavgifter och indexeringsbelöningar. Här är några detaljer: -- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators -- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards -- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters -- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior -- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed -- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing -- Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated -- Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. -- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time -- Indexer Rewards - this is the total indexer rewards earned by the Indexer and their Delegators over all time. Indexer rewards are paid through GRT issuance. +- Andel av frågeavgift - den % av frågeavgifterna som indexeraren behåller när de delar med delegater +- Effektiv belöningsandel - belöningsandelen för indexeringsbelöning som tillämpas på delegeringspoolen. Om den är negativ innebär det att indexeraren ger bort en del av sina belöningar. Om den är positiv innebär det att indexeraren behåller en del av sina belöningar +- Nedkylningsåterstående - den tid som återstår tills indexeraren kan ändra ovanstående delegeringsparametrar. Nedkylningsperioder ställs upp av indexerare när de uppdaterar sina delegeringsparametrar +- Ägd - Detta är indexerarens deponerade insats, som kan straffas för skadligt eller felaktigt beteende +- Delegerad - Insats från delegater som kan tilldelas av indexeraren, men som inte kan straffas +- Tilldelad - Insats som indexerare aktivt tilldelar till de subgraffar de indexerar +- Tillgänglig delegeringskapacitet - mängden delegerad insats som indexerare fortfarande kan ta emot innan de blir överdelegerade +- Maximal delegeringskapacitet - den maximala mängden delegerad insats som indexeraren produktivt kan acceptera. Överskjuten delegerad insats kan inte användas för tilldelningar eller beräkningar av belöningar. +- Frågeavgifter - detta är de totala avgifter som slutanvändare har betalat för frågor från en indexerare över tid +- Indexeringsbelöningar - detta är de totala indexeringsbelöningarna som indexeraren och deras delegater har tjänat över tid. Indexeringsbelöningar betalas genom GRT-utgivning. -Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. Indexing parameters are set by clicking on the right-hand side of the table, or by going into an Indexer’s profile and clicking the “Delegate” button. +Indexerare kan tjäna både frågeavgifter och indexeringsbelöningar. Funktionellt sker detta när nätverksdeltagare delegerar GRT till en indexerare. Detta gör att indexerare kan få frågeavgifter och belöningar beroende på deras indexeringsparametrar. Indexeringsparametrar ställs in genom att klicka på höger sida av tabellen eller genom att gå in på indexerarens profil och klicka på "Delegera"-knappen. -To learn more about how to become an Indexer, you can take a look at the [official documentation](/network/indexing) or [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) +För att lära dig mer om hur du blir indexerare kan du titta på [officiell dokumentation](/Nätverk/indexing) eller [The Graf Academy Indexer-guiden.](https://thegraph.academy/delegators/choosing-indexers/) -![Indexing details pane](/img/Indexing-Details-Pane.png) +![Indexeringsdetaljerpanel](/img/Indexing-Details-Pane.png) -### 2. Curators +### 2. Kuratorer -Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially attractive subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. +Kuratorer analyserar subgraffar för att identifiera vilka subgraffar som har högst kvalitet. När en kurator har hittat en potentiellt attraktiv subgraff kan de kurera den genom att signalera på dess bindningskurva. På så sätt låter kuratorer indexerare veta vilka subgraffar som är av hög kvalitet och bör indexerad. -Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. By depositing GRT, Curators mint curation shares of a subgraph. As a result, Curators are eligible to earn a portion of the query fees that the subgraph they have signaled on generates. The bonding curve incentivizes Curators to curate the highest quality data sources. The Curator table in this section will allow you to see: +Kuratorer kan vara samhällsmedlemmar, datakonsumenter eller till och med subgraffutvecklare som signalerar på sina egna subgraffar genom att deponera GRT-token i en bindningskurva. Genom att deponera GRT skapar kuratorer kuratorandelar av en subgraff. Som ett resultat är kuratorer berättigade att tjäna en del av frågeavgifterna som subgraffen de har signalerat på genererar. Bindningskurvan uppmuntrar kuratorer att kurera de högsta kvalitetsdatakällorna. Kuratortabellen i detta avsnitt låter dig se: -- The date the Curator started curating -- The number of GRT that was deposited -- The number of shares a Curator owns +- Datumet då kuratorn började kurera +- Antalet GRT som deponerades +- Antalet andelar en kurator äger -![Explorer Image 6](/img/Curation-Overview.png) +![Utforskaren Bild 6](/img/Curation-Overview.png) -If you want to learn more about the Curator role, you can do so by visiting the following links of [The Graph Academy](https://thegraph.academy/curators/) or [official documentation.](/network/curating) +Om du vill lära dig mer om rollen som kurator kan du göra det genom att besöka följande länkar från [The Graph Academy](https://thegraph.academy/curators/) eller [officiell dokumentation.](/Nätverk/curating) -### 3. Delegators +### 3. Delegater -Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers seek to attract Delegators by offering them a portion of the indexing rewards and query fees that they earn. +Delegater spelar en nyckelroll för att upprätthålla säkerheten och decentraliseringen av The Graph Nätverk. De deltar i nätverket genom att delegera (det vill säga "satsa") GRT-tokens till en eller flera indexerare. Utan delegater är det mindre sannolikt att indexerare tjänar betydande belöningar och avgifter. Därför försöker indexerare locka delegater genom att erbjuda dem en del av indexeringsbelöningarna och frågeavgifterna de tjänar. -Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! +Delegater väljer i sin tur Indexers baserat på ett antal olika variabler, såsom tidigare prestanda, belöningsräntor för indexering och andel av frågeavgifter. Rekommendation inom gemenskapen kan också spela en roll i detta! Det rekommenderas att ansluta med de indexers som valts via [The Graph's Discord](https://discord.gg/graphprotocol) eller [The Graph Forum](https://forum.thegraph.com/)! -![Explorer Image 7](/img/Delegation-Overview.png) +![Utforskaren Bild 7](/img/Delegation-Overview.png) -The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: +Delegattabellen kommer att låta dig se aktiva delegater i samhället, samt metriker som: -- The number of Indexers a Delegator is delegating towards -- A Delegator’s original delegation -- The rewards they have accumulated but have not withdrawn from the protocol -- The realized rewards they withdrew from the protocol -- Total amount of GRT they have currently in the protocol -- The date they last delegated at +- Antal indexerare en delegat delegerar till +- En delegats ursprungliga delegation +- Belöningar de har ackumulerat men inte har dragit tillbaka från protokollet +- De realiserade belöningarna de drog tillbaka från protokollet +- Totalt belopp av GRT som de för närvarande har i protokollet +- Datumet då de senast delegerade -If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). +Om du vill lära dig mer om hur du blir delegat, behöver du inte leta längre! Allt du behöver göra är att besöka [officiell dokumentation](/Nätverk/delegating) eller [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). -## Network +## Nätverk -In the Network section, you will see global KPIs as well as the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. +I avsnittet Nätverk kommer du att se globala KPI:er samt möjligheten att växla till en per-epok-basis och analysera nätverksmetriker mer detaljerat. Dessa detaljer ger dig en uppfattning om hur nätverket presterar över tiden. -### Activity +### Aktivitet -The activity section has all the current network metrics as well as some cumulative metrics over time. Here you can see things like: +Aktivitetsavsnittet har alla aktuella nätverksmetriker samt vissa kumulativa metriker över tid. Här kan du se saker som: -- The current total network stake -- The stake split between the Indexers and their Delegators -- Total supply, minted, and burned GRT since the network inception -- Total Indexing rewards since the inception of the protocol -- Protocol parameters such as curation reward, inflation rate, and more -- Current epoch rewards and fees +- Nuvarande totala nätverksinsats +- Insatsen fördelad mellan indexerare och deras delegater +- Totalt utbud, myntade och brända GRT sedan nätverkets start +- Totala indexeringsbelöningar sedan protokollets början +- Protokollparametrar såsom kuratorbelöning, inflationstakt och mer +- Nuvarande epokbelöningar och avgifter -A few key details that are worth mentioning: +Några viktiga detaljer som är värda att nämna: -- **Query fees represent the fees generated by the consumers**, and they can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. -- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once the Indexers close their allocations towards the subgraphs they’ve been indexing. Thus the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). +- **Frågeavgifter representerar avgifterna som genereras av användarna**, och de kan krävas (eller inte) av indexerare efter en period på minst 7 epoker (se nedan) efter att deras tilldelningar till subgraffar har avslutats och den data de serverat har validerats av användarna. +- **Indexeringsbelöningar representerar mängden belöningar som indexerare har krävt från nätverksutgivningen under epoken.** Även om protokollutgivningen är fast, genereras belöningarna endast när indexerare stänger sina tilldelningar till subgraffar som de har indexerat. Därför varierar antalet belöningar per epok (det vill säga under vissa epoker kan indexerare sammanlagt stänga tilldelningar som har varit öppna i många dagar). -![Explorer Image 8](/img/Network-Stats.png) +![Utforskaren Bild 8](/img/Network-Stats.png) -### Epochs +### Epoker -In the Epochs section, you can analyze on a per-epoch basis, metrics such as: +I avsnittet Epoker kan du analysera på en per-epok-basis, metriker som: -- Epoch start or end block -- Query fees generated and indexing rewards collected during a specific epoch -- Epoch status, which refers to the query fee collection and distribution and can have different states: - - The active epoch is the one in which Indexers are currently allocating stake and collecting query fees - - The settling epochs are the ones in which the state channels are being settled. This means that the Indexers are subject to slashing if the consumers open disputes against them. - - The distributing epochs are the epochs in which the state channels for the epochs are being settled and Indexers can claim their query fee rebates. - - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers, thus being finalized. +- Epokens start- eller slutblock +- Frågeavgifter som genererats och indexeringsbelöningar som samlats in under en specifik epok +- Epokstatus, som hänvisar till frågeavgiftsinsamling och distribution och kan ha olika tillstånd: + - Den aktiva epoken är den där indexerare för närvarande allokerar insats och samlar frågeavgifter + - De avvecklande epokerna är de där statliga kanaler avvecklas. Detta innebär att indexerare är föremål för straff om användarna öppnar tvister mot dem. + - De distribuerande epokerna är de epoker där statliga kanaler för epokerna avvecklas och indexerare kan kräva sina frågeavgiftsrabatter. + - De avslutade epokerna är de epoker som inte har några frågeavgiftsrabatter kvar att kräva av indexerare, och är därmed avslutade. -![Explorer Image 9](/img/Epoch-Stats.png) +![Utforskaren Bild 9](/img/Epoch-Stats.png) -## Your User Profile +## Din användarprofil -Now that we’ve talked about the network stats, let’s move on to your personal profile. Your personal profile is the place for you to see your network activity, no matter how you’re participating on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see: +Nu när vi har pratat om nätverksstatistik, låt oss gå vidare till din personliga profil. Din personliga profil är platsen där du kan se din nätverksaktivitet, oavsett hur du deltar i nätverket. Din kryptoplånbok kommer att fungera som din användarprofil, och med Användardashboarden kan du se: -### Profile Overview +### Profilöversikt -This is where you can see any current actions you took. This is also where you can find your profile information, description, and website (if you added one). +Här kan du se de senaste åtgärder du har vidtagit. Detta är också där du hittar din profilinformation, beskrivning och webbplats (om du har lagt till en). -![Explorer Image 10](/img/Profile-Overview.png) +![Utforskaren Bild 10](/img/Profile-Overview.png) -### Subgraphs Tab +### Subgraffar-fliken -If you click into the Subgraphs tab, you’ll see your published subgraphs. This will not include any subgraphs deployed with the CLI for testing purposes – subgraphs will only show up when they are published to the decentralized network. +Om du klickar på Subgraffar-fliken ser du dina publicerade subgraffar. Detta inkluderar inte några subgraffar som distribuerats med CLI för teständamål - subgraffar kommer bara att visas när de publiceras på det decentraliserade nätverket. -![Explorer Image 11](/img/Subgraphs-Overview.png) +![Utforskaren Bild 11](/img/Subgraphs-Overview.png) -### Indexing Tab +### Indexeringstabell -If you click into the Indexing tab, you’ll find a table with all the active and historical allocations towards the subgraphs, as well as charts that you can analyze and see your past performance as an Indexer. +Om du klickar på Indexeringsfliken hittar du en tabell med alla aktiva och historiska tilldelningar till subgraffar, samt diagram som du kan analysera och se din tidigare prestanda som indexerare. -This section will also include details about your net Indexer rewards and net query fees. You’ll see the following metrics: +I det här avsnittet hittar du också information om dina nettobelöningar som indexerare och nettovärdaravgifter. Du kommer att se följande metriker: -- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed -- Total Query Fees - the total fees that users have paid for queries served by you over time -- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT -- Fee Cut - the % of query fee rebates that you will keep when you split with Delegators -- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators -- Owned - your deposited stake, which could be slashed for malicious or incorrect behavior +- Delegerad insats - insatsen från delegater som kan tilldelas av dig men inte kan straffas +- Totala frågeavgifter - de totala avgifter som användare har betalat för frågor som du har serverat över tid +- Indexeringsbelöningar - den totala mängd indexeringsbelöningar du har fått, i GRT +- Avgiftsskärning - den procentandel av frågeavgiftsrabatter som du kommer att behålla när du delar med delegater +- Belöningsrabatt - den procentandel av indexeringsbelöningar som du kommer att behålla när du delar med delegater +- Ägd - din deponerade insats, som kan straffas för skadligt eller felaktigt beteende -![Explorer Image 12](/img/Indexer-Stats.png) +![Utforskaren Bild 12](/img/Indexer-Stats.png) -### Delegating Tab +### Delegattabell -Delegators are important to the Graph Network. A Delegator must use their knowledge to choose an Indexer that will provide a healthy return on rewards. Here you can find details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. +Delegater är viktiga för The Graph Nätverk. En delegat måste använda sin kunskap för att välja en indexerare som kommer att ge en hälsosam avkastning på belöningar. Här hittar du detaljer om dina aktiva och historiska delegationer, samt metriker för indexerare som du har delegerat till. -In the first half of the page, you can see your delegation chart, as well as the rewards-only chart. To the left, you can see the KPIs that reflect your current delegation metrics. +I den första halvan av sidan kan du se din delegatdiagram, liksom diagrammet för endast belöningar. Till vänster kan du se KPI:er som återspeglar dina aktuella delegationsmetriker. -The Delegator metrics you’ll see here in this tab include: +De delegatmetriker du kommer att se här i den här fliken inkluderar: -- Total delegation rewards -- Total unrealized rewards -- Total realized rewards +- Totala delegationsbelöningar +- Totala orealiserade belöningar +- Totala realiserade belöningar -In the second half of the page, you have the delegations table. Here you can see the Indexers that you delegated towards, as well as their details (such as rewards cuts, cooldown, etc). +I den andra halvan av sidan har du delegattabellen. Här kan du se indexerarna som du har delegerat till, samt deras detaljer (som belönningsskärningar, nedkylning, etc). -With the buttons on the right side of the table, you can manage your delegation - delegate more, undelegate, or withdraw your delegation after the thawing period. +Med knapparna på höger sida av tabellen kan du hantera din delegation - delegera mer, avdelegatera eller ta tillbaka din delegation efter upptiningstiden. -Keep in mind that this chart is horizontally scrollable, so if you scroll all the way to the right, you can also see the status of your delegation (delegating, undelegating, withdrawable). +Kom ihåg att denna tabell kan rullas horisontellt, så om du rullar hela vägen till höger kan du också se status för din delegation (delegering, avdelegering, återkallelig). -![Explorer Image 13](/img/Delegation-Stats.png) +![Utforskaren Bild 13](/img/Delegation-Stats.png) -### Curating Tab +### Kureringstabell -In the Curation tab, you’ll find all the subgraphs you’re signaling on (thus enabling you to receive query fees). Signaling allows Curators to highlight to Indexers which subgraphs are valuable and trustworthy, thus signaling that they need to be indexed on. +I Kureringstabellen hittar du alla subgraffar du signalerar på (vilket gör det möjligt för dig att ta emot frågeavgifter). Signalering gör att kuratorer kan informera indexerare om vilka subgraffar som är värdefulla och pålitliga, vilket signalerar att de bör indexerats. -Within this tab, you’ll find an overview of: +Inom den här fliken hittar du en översikt över: -- All the subgraphs you're curating on with signal details -- Share totals per subgraph -- Query rewards per subgraph -- Updated at date details +- Alla subgraffar du signalerar på med signaldetaljer +- Andelar totalt per subgraff +- Frågebelöningar per subgraff +- Uppdaterade datumdetaljer -![Explorer Image 14](/img/Curation-Stats.png) +![Utforskaren Bild 14](/img/Curation-Stats.png) -## Your Profile Settings +## Dina profilinställningar -Within your user profile, you’ll be able to manage your personal profile details (like setting up an ENS name). If you’re an Indexer, you have even more access to settings at your fingertips. In your user profile, you’ll be able to set up your delegation parameters and operators. +Inom din användarprofil kommer du att kunna hantera dina personliga profiluppgifter (som att ställa in ett ENS-namn). Om du är indexerare har du ännu mer åtkomst till inställningar inom räckhåll. I din användarprofil kan du ställa in dina delegationsparametrar och operatörer. -- Operators take limited actions in the protocol on the Indexer's behalf, such as opening and closing allocations. Operators are typically other Ethereum addresses, separate from their staking wallet, with gated access to the network that Indexers can personally set -- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. +- Operatörer tar begränsade åtgärder i protokollet på indexerarens vägnar, såsom att öppna och stänga tilldelningar. Operatörer är vanligtvis andra Ethereum-adresser, separata från deras stakningsplånbok, med gated access till nätverket som indexerare personligen kan ställa in +- Delegationsparametrar låter dig kontrollera fördelningen av GRT mellan dig och dina delegater. -![Explorer Image 15](/img/Profile-Settings.png) +![Utforskaren Bild 15](/img/Profile-Settings.png) -As your official portal into the world of decentralized data, The Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. +Som din officiella portal till världen av decentraliserade data, låter Graf Utforskaren dig ta en mängd olika åtgärder, oavsett din roll i nätverket. Du kan komma åt dina profilinställningar genom att öppna rullgardinsmenyn bredvid din adress och sedan klicka på Inställningar-knappen.
    ![Wallet details](/img/Wallet-Details.png)
    diff --git a/website/pages/sv/network/indexing.mdx b/website/pages/sv/network/indexing.mdx index c40fd87a22fe..609035dec6fc 100644 --- a/website/pages/sv/network/indexing.mdx +++ b/website/pages/sv/network/indexing.mdx @@ -1,48 +1,48 @@ --- -title: Indexing +title: Indexering --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexerare är nodoperatörer i The Graph Network som satsar Graph Tokens (GRT) för att tillhandahålla indexering och frågebehandlingstjänster. Indexerare tjänar avgifter för frågor och indexering samt får frågebetalningar som återbetalas enligt en exponentiell återbetalningsfunktion. -GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. +GRT som satsas i protokollet är föremål för en tiningperiod och kan drabbas av strykning om indexerare är skadliga och tillhandahåller felaktiga data till applikationer eller om de indexerar felaktigt. Indexerare tjänar också belöningar för delegerat satsning från Delegater, för att bidra till nätverket. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexerare väljer subgrafer att indexera baserat på subgrafens kuratersignal, där Curators satsar GRT för att ange vilka subgrafer som är av hög kvalitet och bör prioriteras. Konsumenter (t.ex. applikationer) kan också ställa in parametrar för vilka indexerare som behandlar frågor för deras subgrafer och ange preferenser för pris på frågebetalning. ## FAQ -### What is the minimum stake required to be an Indexer on the network? +### Vad är det minsta satsade belopp som krävs för att vara en indexerare i nätverket? -The minimum stake for an Indexer is currently set to 100K GRT. +Det minsta beloppet för en indexerare är för närvarande inställt på 100 000 GRT. -### What are the revenue streams for an Indexer? +### Vad är intäktskällorna för en indexerare? -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. +**Frågebetalningsåterbetalningar** - Betalningar för att servera frågor i nätverket. Dessa betalningar medieras via tillståndskanaler mellan en indexerare och en gateway. Varje frågebegäran från en gateway innehåller en betalning och det motsvarande svaret är en bevis på giltigheten av frågeresultatet. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Indexeringsbelöningar** - Genererade genom en årlig protokollsbredd på 3%, fördelas indexerare som indexerar subgrafdepåer för nätverket. -### How are indexing rewards distributed? +### Hur fördelas indexeringsbelöningar? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Indexeringsbelöningar kommer från protokollsinflation som är inställd på en årlig emission på 3%. De fördelas över subgrafer baserat på andelen av all kuratersignal på varje subgraf, och fördelas sedan proportionellt till indexerare baserat på deras tilldelade insats på den subgrafen. **En tilldelning måste avslutas med ett giltigt bevis på indexering (POI) som uppfyller de standarder som fastställts av skiljekommittéstadgan för att vara berättigad till belöningar.** -Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. +Många verktyg har skapats av gemenskapen för att beräkna belöningar; du hittar en samling av dem organiserade i [Gemenskapsrådssamlingen](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). Du kan också hitta en uppdaterad lista över verktyg i kanalerna #Delegater och #Indexers på [Discord-servern](https://discord.gg/graphprotocol). Här länkar vi en [rekommenderad allokeringsoptimerare](https://github.com/graphprotocol/AllocationOpt.jl) integrerad med indexer-programvarustapeln. -### What is a proof of indexing (POI)? +### Vad är ett bevis på indexering (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POI:er används i nätverket för att verifiera att en indexerare indexerar de subgrafer de har tilldelat sig. Ett POI för det första blocket i den nuvarande epoken måste lämnas in när en tilldelning stängs för att vara berättigad till indexeringsbelöningar. Ett POI för ett block är en digest för alla entity store-transaktioner för en specifik subgrafdepå fram till och med det blocket. -### When are indexing rewards distributed? +### När fördelas indexeringsbelöningar? -Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). +Tilldelningar ackumulerar kontinuerligt belöningar medan de är aktiva och tilldelade inom 28 epoker. Belöningarna samlas in av indexerarna och distribueras när deras tilldelningar stängs. Det sker antingen manuellt, när indexeraren vill tvinga dem att stängas, eller efter 28 epoker kan en Delegat stänga tilldelningen för indexeraren, men detta resulterar inte i några belöningar. 28 epoker är den maximala tilldelningens livslängd (för närvarande varar en epok i cirka ~24h). -### Can pending indexing rewards be monitored? +### Kan väntande indexeringsbelöningar övervakas? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +Kontraktet RewardsManager har en skrivskyddad funktion [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) som kan användas för att kontrollera de väntande belöningarna för en specifik tilldelning. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +Många av gemenskapens egentillverkade instrument inkluderar värden för väntande belöningar och de kan enkelt kontrolleras manuellt genom att följa dessa steg: -1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: +1. Fråga [mainnet-subgrafen](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) för att få ID:n för alla aktiva tilldelningar: ```graphql query indexerAllocations { @@ -58,139 +58,139 @@ query indexerAllocations { } ``` -Use Etherscan to call `getRewards()`: +Använd Etherscan för att anropa `getRewards()`: -- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Navigera till [Etherscan-gränssnittet till belöningskontraktet](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) -* To call `getRewards()`: - - Expand the **10. getRewards** dropdown. - - Enter the **allocationID** in the input. - - Click the **Query** button. +* För att anropa `getRewards()`: + - Expandera rullgardinsmenyn **10. getRewards**. + - Ange **allocationID** i inmatningen. + - Klicka på **Fråga**-knappen. -### What are disputes and where can I view them? +### Vad är tvister och var kan jag se dem? -Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. +Indexerares frågor och tilldelningar kan båda bli föremål för tvister på The Graph under tvisteperioden. Tvisteperioden varierar beroende på typen av tvist. Frågor/erkännanden har en tvistefönster på 7 epocher, medan tilldelningar har 56 epocher. Efter att dessa perioder har passerat kan inga tvister öppnas mot vare sig tilldelningar eller frågor. När en tvist öppnas krävs en insättning av minst 10 000 GRT av Fishermen, som kommer att vara låsta tills tvisten är avslutad och en resolution har lämnats. Fishermen är nätverksdeltagare som öppnar tvister. -Disputes have **three** possible outcomes, so does the deposit of the Fishermen. +Tvister har **tre** möjliga utfall, liksom insättningen från Fishermen. -- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. -- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. -- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. +- Om tvisten avvisas kommer den GRT som satts in av Fishermen att brännas, och den ifrågasatta Indexern kommer inte att bli straffad. +- Om tvisten avgörs som oavgjord kommer Fishermens insättning att återbetalas, och den ifrågasatta Indexern kommer inte att bli straffad. +- Om tvisten godkänns kommer den GRT som satts in av Fishermen att återbetalas, den ifrågasatta Indexern kommer att bli straffad, och Fishermen kommer att tjäna 50% av den straffade GRT. -Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. +Tvister kan ses i gränssnittet på en Indexers profil under fliken `Tvister`. -### What are query fee rebates and when are they distributed? +### Vad är återbetalningar av frågeavgifter och när distribueras de? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Förfrågningsavgifter samlas in av gateway och fördelas till indexerare enligt den exponentiella rabattfunktionen (se GIP [här](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). Den exponentiella rabattfunktionen föreslås som ett sätt att säkerställa att indexerare uppnår det bästa resultatet genom att troget servera förfrågningar. Den fungerar genom att ge indexerare incitament att allokerar en stor mängd insats (som kan beskäras om de begår fel när de serverar en förfrågan) i förhållande till den mängd förfrågningsavgifter de kan samla in. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +När en tilldelning har avslutats är återbetalningarna tillgängliga för Indexern att hämta. Vid hämtning distribueras frågeavgiftsåterbetalningarna till Indexern och deras Delegatorer baserat på frågeavgiftsminskningen och den exponentiella rabattfunktionen. -### What is query fee cut and indexing reward cut? +### Vad är frågeavgiftsminskning och minskning av indexeringsbelöning? -The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. +Värdena `queryFeeCut` och `indexingRewardCut` är delegationparametrar som Indexern kan sätta tillsammans med `cooldownBlocks` för att kontrollera distributionen av GRT mellan Indexern och deras Delegatorer. Se de sista stegen i [Staking i protokollet](/network/indexing#stake-in-the-protocol) för anvisningar om att ställa in delegationparametrarna. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - andelen frågeavgiftsåterbetalningar som kommer att distribueras till Indexern. Om detta är inställt på 95% kommer Indexern att få 95% av de frågeavgifter som tjänas när en tilldelning avslutas, medan de andra 5% fördelas till Delegatorerna. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - andelen indexeringsbelöningar som kommer att distribueras till Indexern. Om detta är inställt på 95% kommer Indexern att få 95% av indexeringsbelöningarna när en tilldelning avslutas, och Delegatorerna kommer att dela de återstående 5%. -### How do Indexers know which subgraphs to index? +### Hur vet Indexers vilka subgrafer de ska indexera? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexers kan skilja sig åt genom att tillämpa avancerade tekniker för att fatta beslut om indexering av subgrafer, men för att ge en allmän idé kommer vi att diskutera flera viktiga metoder som används för att utvärdera subgrafer i nätverket: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Kureringssignal** - Andelen nätverkskureringssignal som används för en specifik subgraf är en bra indikator på intresset för den subgrafen, särskilt under uppstartsfasen när frågevolymen ökar. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Inkassering av frågeavgifter** - Historisk data för volymen av frågeavgifter som samlats in för en specifik subgraf är en bra indikator på framtida efterfrågan. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Insatsbelopp** - Att övervaka beteendet hos andra Indexers eller att titta på andelar av total insats som allokerats till specifika subgrafer kan låta en Indexer övervaka tillgångssidan för subgrafsförfrågningar och identifiera subgrafer som nätverket visar förtroende för eller subgrafer som kan behöva mer tillgång. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgrafer utan indexeringsbelöningar** - Vissa subgrafer genererar inte indexeringsbelöningar huvudsakligen eftersom de använder otillåtna funktioner som IPFS eller eftersom de frågar ett annat nätverk utanför mainnet. Du kommer att se ett meddelande på en subgraf om den inte genererar indexeringsbelöningar. -### What are the hardware requirements? +### Vilka är de tekniska kraven? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Liten** - Tillräckligt för att komma igång med att indexera flera subgrafer, kommer sannolikt att behöva utökas. +- **Standard** - Standardinställning, detta är vad som används i exempelvis k8s/terraform-implementeringsmanifesten. +- **Medium** - Produktionsindexer som stöder 100 subgrafer och 200-500 förfrågningar per sekund. +- **Stor** - Förberedd för att indexera alla för närvarande använda subgrafer och att ta emot förfrågningar för relaterad trafik. -| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | +| Konfiguration | Postgres
    (CPU:er) | Postgres
    (minne i GB) | Postgres
    (disk i TB) | VM:er
    (CPU:er) | VM:er
    (minne i GB) | | --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | +| Liten | 4 | 8 | 1 | 4 | 16 | | Standard | 8 | 30 | 1 | 12 | 48 | | Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Stor | 72 | 468 | 3,5 | 48 | 184 | -### What are some basic security precautions an Indexer should take? +### Vilka grundläggande säkerhetsåtgärder bör en Indexer vidta? -- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. +- **Operatörplånbok** - Att skapa en operatörplånbok är en viktig försiktighetsåtgärd eftersom den möjliggör att en Indexer kan upprätthålla separation mellan sina nycklar som styr insatsen och de som är ansvariga för dagliga operationer. Se [Insats i protokollet](/network/indexing#stake-in-the-protocol) för anvisningar. -- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. +- **Brandvägg** - Endast Indexertjänsten behöver vara offentligt exponerad och särskild uppmärksamhet bör ägnas åt att säkra administrativa portar och databasåtkomst: Graph Node JSON-RPC-gränssnittet (standardport: 8030), Indexerhanterings-API-gränssnittet (standardport: 18000) och PostgreSQL-databasgränssnittet (standardport: 5432) bör inte vara exponerade. -## Infrastructure +## Infrastruktur -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +I centrum av en Indexers infrastruktur finns Graph Node, som övervakar de indexerade nätverken, extraherar och laddar data enligt en subgrafdefinition och serverar det som en [GraphQL API](/about/#how-the-graph-works). Graph Node måste vara ansluten till en endpoint som exponerar data från varje indexerat nätverk; en IPFS-nod för att hämta data; en PostgreSQL-databas för lagring; och Indexer-komponenter som underlättar dess interaktioner med nätverket. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL-databas** - Huvudlagret för Graph Node, detta är där subgrafdata lagras. Indexertjänsten och agenten använder också databasen för att lagra state channel-data, kostnadsmodeller, indexeringsregler och tilldelningsåtgärder. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Dataendpoint** - För EVM-kompatibla nätverk måste Graph Node vara ansluten till en endpoint som exponerar en EVM-kompatibel JSON-RPC-API. Detta kan ta form av en enskild klient eller det kan vara en mer komplex konfiguration som balanserar belastningen över flera. Det är viktigt att vara medveten om att vissa subgrafer kan kräva specifika klientfunktioner som arkivläge och/eller parity-spårnings-API. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS-nod (version mindre än 5)** - Metadata för subgrafdistribution lagras på IPFS-nätverket. Graph Node har huvudsakligen åtkomst till IPFS-noden under subgrafdistributionen för att hämta subgrafmanifestet och alla länkade filer. Nätverksindexer behöver inte hosta sin egen IPFS-nod, en IPFS-nod för nätverket är värd på https://ipfs.network.thegraph.com. -- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. +- **Indexertjänst** - Hanterar alla nödvändiga externa kommunikationer med nätverket. Delar kostnadsmodeller och indexeringsstatus, skickar frågebegäranden från gateways vidare till en Graph Node och hanterar frågebetalningar via tillståndskanaler med gatewayen. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexeragent** - Underlättar Indexers interaktioner på kedjan, inklusive registrering i nätverket, hantering av subgrafdistributioner till sina Graph Node/noder och hantering av tilldelningar. -- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. +- **Prometheus-metrisk server** - Graph Node och Indexer-komponenter loggar sina metriska data till metrisk servern. -Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. +Observera: För att stödja smidig skalning rekommenderas det att fråge- och indexeringsbekymmer separeras mellan olika uppsättningar noder: frågenoder och indexnoder. -### Ports overview +### Översikt över portar -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. +> **Viktigt**: Var försiktig med att offentligt exponera portar - **administrativa portar** bör vara säkra. Detta inkluderar JSON-RPC för Graph Node och Indexer-hanteringsendpunkterna som beskrivs nedan. -#### Graph Node +#### Graf Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Syfte | Vägar | CLI-argument | Miljövariabel | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP-server
    (för subgraf-förfrågningar) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (för subgraf-prenumerationer) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (för hantering av distributioner) | / | --admin-port | - | +| 8030 | Subgrafindexeringsstatus-API | /graphql | --index-node-port | - | +| 8040 | Prometheus-metrar | /metrics | --metrics-port | - | -#### Indexer Service +#### Indexertjänst -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Syfte | Vägar | CLI-argument | Miljövariabel | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
    (for paid subgraph queries) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| 7600 | GraphQL HTTP-server
    (för betalda subgraf-förfrågningar) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus-metrar | /metrics | --metrics-port | - | -#### Indexer Agent +#### Indexeragent -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Syfte | Vägar | CLI-argument | Miljövariabel | +| ---- | --------------------- | ----- | ------------------------- | --------------------------------------- | +| 8000 | Indexerhanterings-API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Setup server infrastructure using Terraform on Google Cloud +### Konfigurera serverinfrastruktur med Terraform på Google Cloud -> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. +> Obs: Indexers kan alternativt använda AWS, Microsoft Azure eller Alibaba. -#### Install prerequisites +#### Installera förutsättningar -- Google Cloud SDK -- Kubectl command line tool +- SDK för Google Cloud +- Kubectl kommandoradsverktyg - Terraform -#### Create a Google Cloud Project +#### Skapa ett Google Cloud-projekt -- Clone or navigate to the Indexer repository. +- Klonad eller navigera till Indexer-repositoriet. -- Navigate to the ./terraform directory, this is where all commands should be executed. +- Navigera till ./terraform-mappen, detta är där alla kommandon ska köras. ```sh cd terraform ``` -- Authenticate with Google Cloud and create a new project. +- Autentisera dig med Google Cloud och skapa ett nytt projekt. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use the Google Cloud Console's billing page to enable billing for the new project. +- Använd faktureringssidan i Google Cloud Console för att aktivera fakturering för det nya projektet. -- Create a Google Cloud configuration. +- Skapa en Google Cloud-konfiguration. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Enable required Google Cloud APIs. +- Aktivera nödvändiga API:er för Google Cloud. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Create a service account. +- Skapa ett servicekonto. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Enable peering between database and Kubernetes cluster that will be created in the next step. +- Aktivera peering mellan databasen och Kubernetes-klustret som kommer att skapas i nästa steg. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Create minimal terraform configuration file (update as needed). +- Skapa en minimal konfigurationsfil för terraformen (uppdatera vid behov). ```sh indexer= @@ -262,24 +262,24 @@ database_password = "" EOF ``` -#### Use Terraform to create infrastructure +#### Använd Terraform för att skapa infrastruktur -Before running any commands, read through [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) and create a file `terraform.tfvars` in this directory (or modify the one we created in the last step). For each variable where you want to override the default, or where you need to set a value, enter a setting into `terraform.tfvars`. +Innan du kör några kommandon, läs igenom [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) och skapa en fil `terraform.tfvars` i denna katalog (eller modifiera den som vi skapade i det sista steget). För varje variabel där du vill åsidosätta standardinställningen, eller där du behöver ange ett värde, anger du en inställning i `terraform.tfvars`. -- Run the following commands to create the infrastructure. +- Kör följande kommandon för att skapa infrastrukturen. ```sh -# Install required plugins +# Installera nödvändiga plugins terraform init -# View plan for resources to be created +# Se plan för resurser som ska skapas terraform plan -# Create the resources (expect it to take up to 30 minutes) +# Skapa resurserna (räkna med att det tar upp till 30 minuter) terraform apply ``` -Download credentials for the new cluster into `~/.kube/config` and set it as your default context. +Ladda ner autentiseringsuppgifter för det nya klustret till `~/.kube/config` och ange det som din standardkontext. ```sh gcloud container clusters get-credentials $indexer @@ -287,21 +287,21 @@ kubectl config use-context $(kubectl config get-contexts --output='name' | grep $indexer) ``` -#### Creating the Kubernetes components for the Indexer +#### Skapa Kubernetes-komponenter för indexeraren -- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. +- Kopiera katalogen `k8s/overlays` till en ny katalog `$dir` och justera posten `bases` i `$dir/kustomization.yaml` så att den pekar på katalogen `k8s/base`. -- Read through all the files in `$dir` and adjust any values as indicated in the comments. +- Läs igenom alla filer i `$dir` och justera eventuella värden enligt anvisningarna i kommentarerna. -Deploy all resources with `kubectl apply -k $dir`. +Installera alla resurser med `kubectl apply -k $dir`. -### Graph Node +### Graf Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) är en öppen källkodsimplementering i Rust som eventkällor Ethereum-blockkedjan för att deterministiskt uppdatera en datalagring som kan förfrågas via GraphQL-endpunkten. Utvecklare använder subgrafer för att definiera sitt schema och en uppsättning avbildningar för att omvandla data som hämtas från blockkedjan, och Graph Node hanterar synkroniseringen av hela kedjan, övervakning av nya block och servering av den via en GraphQL-endpunkt. -#### Getting started from source +#### Komma igång från källkoden -#### Install prerequisites +#### Installera förutsättningar - **Rust** @@ -309,15 +309,15 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Ytterligare krav för Ubuntu-användare** - För att köra en Graph Node på Ubuntu kan några ytterligare paket behövas. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Inställningar -1. Start a PostgreSQL database server +1. Starta en PostgreSQL-databasserver ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Klona [Graph Node](https://github.com/graphprotocol/graph-node) repo och bygg källkoden genom att köra `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Nu när alla beroenden är konfigurerade startar du Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -336,81 +336,81 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Komma igång med Docker -#### Prerequisites +#### Förutsättningar -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. +- **Ethereum nod** - Som standard kommer docker compose-konfigurationen att använda mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) för att ansluta till Ethereum-noden på din värdmaskin. Du kan ersätta detta nätverksnamn och Url genom att uppdatera `docker-compose.yaml`. -#### Setup +#### Inställning -1. Clone Graph Node and navigate to the Docker directory: +1. Klona Graph Node och navigera till Docker-katalogen: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml`using the included script: +2. Endast för Linux-användare - Använd värdens IP-adress istället för `host.docker.internal` i `docker-compose.yaml` med hjälp av det medföljande skriptet: ```sh ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Starta en lokal Graph Node som kommer att ansluta till din Ethereum-ändpunkt: ```sh docker-compose up ``` -### Indexer components +### Indexerkomponenter -To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: +För att framgångsrikt delta i nätverket krävs nästan konstant övervakning och interaktion, så vi har byggt en uppsättning Typescript-applikationer för att underlätta en Indexers nätverksdeltagande. Det finns tre Indexer-komponenter: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards on chain and how much is allocated towards each. +- **Indexeragent** - Agenten övervakar nätverket och Indexerens egen infrastruktur och hanterar vilka subgraph-distributioner som indexeras och tilldelas på kedjan och hur mycket som tilldelas till varje. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexertjänst** - Den enda komponenten som behöver exponeras externt, tjänsten vidarebefordrar subgraph-förfrågningar till grafnoden, hanterar tillståndskanaler för förfrågningsbetalningar, delar viktig beslutsinformation till klienter som gatewayer. -- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. +- **Indexer CLI** - Kommandoradsgränssnittet för att hantera Indexeragenten. Det gör det möjligt för Indexers att hantera kostnadsmodeller, manuella tilldelningar, kö för åtgärder och indexregler. -#### Getting started +#### Komma igång -The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! +Indexer-agent och Indexer-service bör placeras tillsammans med din Graph Node-infrastruktur. Det finns många sätt att konfigurera virtuella exekveringsmiljöer för dina Indexer-komponenter; här förklarar vi hur du kör dem på bare-metal med hjälp av NPM-paket eller källkod, eller via Kubernetes och Docker på Google Cloud Kubernetes Engine. Om dessa installations exempel inte passar bra för din infrastruktur kommer det sannolikt att finnas en gemenskapsråd att använda som referens, kom och säg hej på [Discord](https://discord.gg/graphprotocol)! Kom ihåg att [satsa på protokollet](/network/indexing#stake-in-the-protocol) innan du startar dina Indexer-komponenter! -#### From NPM packages +#### Från NPM-paket ```sh npm install -g @graphprotocol/indexer-service npm install -g @graphprotocol/indexer-agent -# Indexer CLI is a plugin for Graph CLI, so both need to be installed: +# Indexer CLI är ett plugin för Graph CLI, så båda måste installeras: npm install -g @graphprotocol/graph-cli npm install -g @graphprotocol/indexer-cli -# Indexer service +# Indexeringstjänst graph-indexer-service start ... -# Indexer agent +# Indexerare agent graph-indexer-agent start ... -# Indexer CLI -#Forward the port of your agent pod if using Kubernetes +# CLI för indexerare +#Vidarebefordra porten till din agent-pod om du använder Kubernetes kubectl port-forward pod/POD_ID 18000:8000 graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### From source +#### Från källa ```sh -# From Repo root directory +# Från Repos rotkatalog yarn -# Indexer Service +# Indexeringstjänst cd packages/indexer-service ./bin/graph-indexer-service start ... -# Indexer agent +# Indexerare agent cd packages/indexer-agent ./bin/graph-indexer-service start ... @@ -420,16 +420,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Using docker +#### Använda docker -- Pull images from the registry +- Hämta bilder från registret ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Or build images locally from source +Eller skapa bilder lokalt från källkod ```sh # Indexer service @@ -444,24 +444,24 @@ docker build \ -t indexer-agent:latest \ ``` -- Run the components +- Kör komponenterna ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). +**OBS**: Efter att ha startat containrarna bör Indexertjänsten vara åtkomlig på [http://localhost:7600](http://localhost:7600) och Indexeragenten bör exponera Indexerhanterings-API:et på [http://localhost:18000/](http://localhost:18000/). -#### Using K8s and Terraform +#### Användning av K8s och Terraform -See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section +Se avsnittet [Konfigurera serverinfrastruktur med Terraform på Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) -#### Usage +#### Användning -> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **OBS**: Alla körtidskonfigurationsvariabler kan antingen tillämpas som parametrar till kommandot vid start eller med miljövariabler i formatet `COMPONENT_NAME_VARIABLE_NAME` (ex. `INDEXER_AGENT_ETHEREUM`). -#### Indexer agent +#### Indexeragent ```sh graph-indexer-agent start \ @@ -490,7 +490,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer service +#### Indexeringstjänst ```sh SERVER_HOST=localhost \ @@ -518,56 +518,56 @@ graph-indexer-service start \ #### Indexer CLI -The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. +Indexer CLI är ett insticksprogram för [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) tillgängligt i terminalen på `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexer management using Indexer CLI +#### Indexerhantering med Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +Det föreslagna verktyget för att interagera med **Indexer Management API** är **Indexer CLI**, ett tillägg till **Graph CLI**. Indexeragenten behöver input från en Indexer för att autonomt interagera med nätverket på Indexers vägnar. Mekanismen för att definiera Indexeragentens beteende är **allokeringhantering** och **indexeringsregler**. I automatiskt läge kan en Indexer använda **indexeringsregler** för att tillämpa sin specifika strategi för att välja subgrafer att indexera och utföra frågor för. Regler hanteras via ett GraphQL API som serveras av agenten och kallas Indexer Management API. I manuellt läge kan en Indexer skapa allokationsåtgärder med **åtgärds kö** och godkänna dem explicit innan de utförs. I övervakningsläge används **indexeringsregler** för att fylla **åtgärds kö** och kräver också explicit godkännande för utförande. -#### Usage +#### Användning -The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. +**Indexer CLI** ansluter till Indexeragenten, vanligtvis via port-vidarebefordran, så CLI behöver inte köras på samma server eller kluster. För att hjälpa dig komma igång och ge lite kontext kommer CLI att beskrivas här kortfattat. -- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - Anslut till Indexerhanterings-API:et. Vanligtvis öppnas anslutningen till servern via port-vidarebefordran, så CLI kan enkelt användas fjärrstyras. (Exempel: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. +- `graph indexer rules get [options] [ ...]` - Hämta en eller flera indexeringsregler med `all` som `` för att hämta alla regler, eller `global` för att hämta de globala standardvärdena. Ett ytterligare argument `--merged` kan användas för att ange att regler specifika för distributionen slås samman med den globala regeln. Detta är hur de tillämpas i Indexeragenten. -- `graph indexer rules set [options] ...` - Set one or more indexing rules. +- `graph indexer rules set [options] ...` - Ange en eller flera indexeringsregler. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Starta indexering av en subgraph-distribution om den är tillgänglig och ange dess `decisionBasis` till `always`, så kommer Indexeragenten alltid att välja att indexera den. Om den globala regeln är inställd på always kommer alla tillgängliga subgrafer på nätverket att indexeras. -- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. +- `graph indexer rules stop [options] ` - Stoppa indexeringen av en distribution och ange dess `decisionBasis` till never, så kommer den att hoppa över den här distributionen när den beslutar om distributioner att indexera. -- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. +- `graph indexer rules maybe [options] ` — Ange `decisionBasis` för en distribution till `rules`, så kommer Indexeragenten att använda indexeringsregler för att avgöra om den ska indexera den här distributionen. -- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additonal argument `--status` can be used to print out all actions of a certain status. +- `graph indexer actions get [options] ` - Hämta en eller flera åtgärder med `all` eller lämna `action-id` tomt för att hämta alla åtgärder. Ett ytterligare argument `--status` kan användas för att skriva ut alla åtgärder med en viss status. -- `graph indexer action queue allocate ` - Queue allocation action +- `graph indexer action queue allocate ` - Köa allokationsåtgärd -- `graph indexer action queue reallocate ` - Queue reallocate action +- `graph indexer action queue reallocate ` - Köa omallokeringsåtgärd -- `graph indexer action queue unallocate ` - Queue unallocate action +- `graph indexer action queue unallocate ` - Köa avallokeringsåtgärd -- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator +- `graph indexer actions cancel [ ...]` - Avbryt alla åtgärder i kön om id inte anges, annars avbryt arrayen med id med mellanslag som separator -- `graph indexer actions approve [ ...]` - Approve multiple actions for execution +- `graph indexer actions approve [ ...]` - Godkänn flera åtgärder för utförande -- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately +- `graph indexer actions execute approve` - Tvinga arbetaren att omedelbart utföra godkända åtgärder -All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. +Alla kommandon som visar regler i utdata kan välja mellan de stödda utdataformaten (`table`, `yaml` och `json`) med hjälp av argumentet `-output`. -#### Indexing rules +#### Indexeringsregler -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Indexeringsregler kan antingen tillämpas som globala standardvärden eller för specifika subgraph-distributioner med deras ID. Fälten `deployment` och `decisionBasis` är obligatoriska, medan alla andra fält är valfria. När en indexeringsregel har `rules` som `decisionBasis`, jämför Indexeragenten tröskelvärden som inte är null på den regeln med värden som hämtas från nätverket för den motsvarande distributionen. Om subgraph-distributionen har värden över (eller under) någon av tröskelvärdena kommer den att väljas för indexering. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +Till exempel, om den globala regeln har en `minStake` på **5** (GRT), kommer vilken subgraph-distribution som helst som har mer än 5 (GRT) satsat på den att indexeras. Tröskelregler inkluderar `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake` och `minAverageQueryFees`. -Data model: +Datamodell: ```graphql type IndexingRule { @@ -601,7 +601,7 @@ IndexingDecisionBasis { } ``` -Example usage of indexing rule: +Exempel på användning av indexeringsregel: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,20 +613,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Actions queue CLI +#### CLI tail-åtgärder -The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. +Indexer-cli tillhandahåller ett `actions`-modul för manuellt arbete med åtgärds kön. Det använder **Graphql API** som hostas av indexeringshanteringsservern för att interagera med åtgärds kön. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +Åtgärdsutförande-arbetaren kommer endast att hämta objekt från kön för att utföra om de har `ActionStatus = approved`. På den rekommenderade vägen läggs åtgärder till i kön med ActionStatus = queued, så de måste sedan godkännas för att utföras på kedjan. Den generella flödet kommer att se ut som följer: -- Action added to the queue by the 3rd party optimizer tool or indexer-cli user -- Indexer can use the `indexer-cli` to view all queued actions -- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. -- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. -- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. -- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. +- Åtgärd läggs till i kön av tredjeparts-optimeringsverktyget eller indexer-cli-användaren +- Indexer kan använda `indexer-cli` för att visa alla köade åtgärder +- Indexer (eller annan programvara) kan godkänna eller avbryta åtgärder i kön med hjälp av `indexer-cli`. Godkänn och avbryt kommandon tar en matris av åtgärds-id som inmatning. +- Utförande-arbetaren kollar regelbundet kön för godkända åtgärder. Den hämtar de `approved` åtgärderna från kön, försöker utföra dem och uppdaterar värdena i databasen beroende på utförandestatus till `success` eller `failed`. +- Om en åtgärd är framgångsrik kommer arbetaren att se till att det finns en indexeringsregel som berättar för agenten hur allokeringen ska hanteras framöver, användbart när man tar manuella åtgärder medan agenten är i `auto` eller `oversight` -läge. +- Indexer kan övervaka åtgärds kön för att se en historia över åtgärdsutförande och om det behövs godkänna om och uppdatera åtgärdsobjekt om de misslyckades med utförande. Åtgärds kön ger en historia över alla köade och tagna åtgärder. -Data model: +Datamodell: ```graphql Type ActionInput { @@ -659,147 +659,143 @@ ActionType { } ``` -Example usage from source: +Exempel på användning från källa: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` -Note that supported action types for allocation management have different input requirements: +Observera att åtgärdstyper som stöds för allokeringshantering har olika krav på indata: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Tilldela` - allokera insats till en specifik subgraph-deploering - - required action params: + - obligatoriska åtgärdsparametrar: - deploymentID - - amount + - belopp -- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere +- `Avslå` - stäng allokeringen och frigör insatsen för omallokering någon annanstans - - required action params: + - obligatoriska åtgärdsparametrar: - allocationID - deploymentID - - optional action params: + - valfria åtgärdsparametrar: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (tvingar användning av den angivna POI även om den inte matchar det som grafnoden tillhandahåller) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Omallokera` - atomiskt stäng allokeringen och öppna en ny allokering för samma subgraph-deploering - - required action params: + - obligatoriska åtgärdsparametrar: - allocationID - deploymentID - - amount - - optional action params: + - belopp + - valfria åtgärdsparametrar: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (tvingar användning av den angivna POI även om den inte matchar det som grafnoden tillhandahåller) -#### Cost models +#### Kostnadsmodeller -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Kostnadsmodeller tillhandahåller dynamisk prissättning för frågor baserat på marknaden och frågans egenskaper. Indexer Service delar en kostnadsmodell med gatewayerna för varje subgraph för vilka de avser att svara på frågor. Gatewayerna använder i sin tur kostnadsmodellen för att fatta beslut om indexeringsval per fråga och för att förhandla om betalning med valda indexers. #### Agora -The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. +Agora-språket ger ett flexibelt format för deklaration av kostnadsmodeller för frågor. En Agora-prismodell är en sekvens av uttalanden som utförs i ordning för varje toppnivåfråga i en GraphQL-fråga. För varje toppnivåfråga avgör det första uttalandet som matchar den priset för den frågan. -A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. +Ett uttalande består av en predikat, som används för att matcha GraphQL-frågor, och ett kostnadsuttryck som när det utvärderas ger en kostnad i decimal GRT. Värden i den namngivna argumentpositionen i en fråga kan fångas i predikatet och användas i uttrycket. Globala variabler kan också sättas och ersättas för platshållare i ett uttryck. -Example cost model: +Exempel kostnadsmodell: ``` -# This statement captures the skip value, -# uses a boolean expression in the predicate to match specific queries that use `skip` -# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +# Detta uttalande fångar skip-värdet, +# använder ett booleskt uttryck i predikatet för att matcha specifika frågor som använder `skip` +# och ett kostnadsuttryck för att beräkna kostnaden baserat på `skip`-värdet och den globala SYSTEM_LOAD query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# This default will match any GraphQL expression. -# It uses a Global substituted into the expression to calculate cost +# Denna standard matchar alla GraphQL-uttryck. +# Den använder en Global som ersatts i uttrycket för att beräkna kostnaden default => 0.1 * $SYSTEM_LOAD; ``` -Example query costing using the above model: +Exempel på kostnadskalkyl enligt ovanstående modell: -| Query | Price | +| Fråga | Pris | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### Applying the cost model +#### Tillämpning av kostnadsmodellen -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. +Kostnadsmodeller tillämpas via Indexer CLI, som skickar dem till Indexer Management API för Indexer agent för lagring i databasen. Indexer Service kommer sedan att hämta dem och servera kostnadsmodellerna till gatewayerna när de begär dem. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interacting with the network +## Interagera med nätverket -### Stake in the protocol +### Satsa i protokollet -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ +De första stegen för att delta i nätverket som en Indexer är att godkänna protokollet, satsa medel och (valfritt) sätta upp en operatörsadress för dagliga protokollinteraktioner. _ **Observera**: För dessa instruktioner används Remix för kontraktsinteraktion, men använd gärna din valfria verktyg ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/) och [MyCrypto](https://www.mycrypto.com/account) är några andra kända verktyg)._ -Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. +När en Indexer har satsat GRT i protokollet kan [Indexer-komponenterna](/network/indexing#indexer-components) startas och börja interagera med nätverket. -#### Approve tokens +#### Godkänn tokens -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Öppna [Remix-appen](https://remix.ethereum.org/) i en webbläsare -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. I `Filutforskaren` skapa en fil med namnet **GraphToken.abi** med [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. +3. Med `GraphToken.abi` markerad och öppen i redigeraren, växla till avsnittet Implementera och `Kör transaktioner` i Remix-gränssnittet. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Under miljö väljer du `Injected Web3` och under `Konto` väljer du din Indexer-adress. -5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. +5. Ange kontraktadressen för GraphToken - Klistra in kontraktadressen för GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) bredvid `Vid adress` och klicka på knappen `Vid adress` för att tillämpa. -6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). +6. Anropa funktionen `approve(spender, amount)` för att godkänna Staking-kontraktet. Fyll i `spender` med Staking-kontraktadressen (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) och `amount` med de tokens som ska satsas (i wei). -#### Stake tokens +#### Satsa tokens -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Öppna [Remix-appen](https://remix.ethereum.org/) i en webbläsare -2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. +2. I `Filutforskaren` skapa en fil med namnet **Staking.abi** med stakings ABI. -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. +3. Med `Staking.abi` markerad och öppen i redigeraren, växla till avsnittet `Implementera` och `Kör transaktioner` i Remix-gränssnittet. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Under miljö väljer du `Injected Web3` och under `Konto` väljer du din Indexer-adress. -5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. +5. Ange kontraktadressen för Staking - Klistra in kontraktadressen för Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) bredvid `Vid adress` och klicka på knappen `Vid adress` för att tillämpa. -6. Call `stake()` to stake GRT in the protocol. +6. Anropa `stake()` för att satsa GRT i protokollet. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Valfritt) Indexers kan godkänna en annan adress att vara operatör för sin Indexer-infrastruktur för att separera de nycklar som kontrollerar medlen från de som utför dagliga åtgärder som att tilldela på subgrafer och servera (betalda) frågor. För att ställa in operatören anropas `setOperator()` med operatörsadressen. -8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. +8. (Valfritt) För att kontrollera fördelningen av belöningar och strategiskt attrahera Delegators kan Indexers uppdatera sina delegationsparametrar genom att uppdatera sina `indexingRewardCut` (delar per miljon), `queryFeeCut` (delar per miljon) och `cooldownBlocks` (antal block). För att göra detta, anropa `setDelegationParameters()`. Följande exempel anger `queryFeeCut` för att fördela 95% av frågebidragen till Indexer och 5% till Delegators, ställ `indexingRewardCut` för att fördela 60% av indexbelöningarna till Indexer och 40% till Delegators, och ställ in perioden för `thecooldownBlocks` till 500 block. ``` setDelegationParameters(950000, 600000, 500) ``` -### The life of an allocation - -After being created by an Indexer a healthy allocation goes through four states. - -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +### En allokations livscykel -- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). +Efter att ha skapats av en Indexer går en sund allokering igenom fyra tillstånd. -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. +- **Aktiv** - När en allokering skapas på kedjan ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) betraktas den som **aktiv**. En del av Indexerens egen och/eller delegerad insats allokeras till en subgraph-deployering, vilket möjliggör att de kan hävda indexbelöningar och tjäna frågor för den subgraph-deployeringen. Indexer-agenten hanterar skapandet av allokeringar baserat på Indexer-reglerna. -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. +- **Stängd** - En Indexer är fri att stänga en allokering när 1 epoch har passerat ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) eller deras Indexer-agent kommer automatiskt att stänga allokeringen efter den **maxAllocationEpochs** (för närvarande 28 dagar). När en allokering stängs med ett giltigt bevis för indexering (POI) fördelas deras indexbelöningar till Indexer och dess Delegators (se "hur fördelas belöningar?" nedan för att lära dig mer). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Det rekommenderas att Indexers använder funktionen för offchain-synkronisering för att synkronisera subgraph-deploys till kedjehuvudet innan de skapar allokeringen på kedjan. Den här funktionen är särskilt användbar för subgraphs som kan ta längre tid än 28 epoker att synkronisera eller har vissa chanser att misslyckas obestämt. diff --git a/website/pages/sv/network/overview.mdx b/website/pages/sv/network/overview.mdx index bee546908372..f1f46b58accd 100644 --- a/website/pages/sv/network/overview.mdx +++ b/website/pages/sv/network/overview.mdx @@ -1,15 +1,15 @@ --- -title: Network Overview +title: Nätverksöversikt --- -The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. +The Graph Nätverk är en decentraliserad indexeringsprotokoll för att organisera blockkedjedata. Applikationer använder GraphQL för att fråga öppna API: er som kallas subgrafer, för att hämta data som är indexerad på nätverket. Med The Graph kan utvecklare bygga serverlösa applikationer som körs helt på offentlig infrastruktur. -## Overview +## Översikt -The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. +The Graph Nätverk består av Indexers, Curators och Delegatorer som tillhandahåller tjänster till nätverket och levererar data till Web3-applikationer. Konsumenter använder applikationerna och konsumerar datan. -![Token Economics](/img/Network-roles@2x.png) +![Tokenekonomi](/img/Network-roles@2x.png) -To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20 used to allocate resources in the network. +För att säkerställa den ekonomiska säkerheten för The Graph Nätverk och integriteten hos den data som frågas, satsar deltagare och använder Graph Tokens ([GRT](/tokenomics)). GRT är en arbetsnyttighetstoken som är en ERC-20-token som används för att allokera resurser i nätverket. -Active Indexers, Curators and Delegators can provide services and earn income from the network, proportional to the amount of work they perform and their GRT stake. +Aktiva Indexers, Curators och Delegatorer kan tillhandahålla tjänster och tjäna inkomst från nätverket, proportionellt mot mängden arbete de utför och deras GRT-satsning. diff --git a/website/pages/sv/new-chain-integration.mdx b/website/pages/sv/new-chain-integration.mdx index c5934efa6f87..9888c83b03b4 100644 --- a/website/pages/sv/new-chain-integration.mdx +++ b/website/pages/sv/new-chain-integration.mdx @@ -1,75 +1,75 @@ --- -title: Integrating New Networks +title: Integrering av Nya Nätverk --- -Graph Node can currently index data from the following chain types: +Graf Node kan för närvarande indexera data från följande typer av blockkedjor: -- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) -- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) -- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) -- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) +- Ethereum, via EVM JSON-RPC och [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) +- NEAR, via en [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) +- Cosmos, via en [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) +- Arweave, via en [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) -If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. +Om du är intresserad av någon av dessa blockkedjor är integrering en fråga om konfiguration och testning av Graf Node. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +Om du är intresserad av en annan typ av blockkedja måste en ny med Graf Node byggas. Vårt rekommenderade tillvägagångssätt är att utveckla en ny Firehose för den aktuella blockkedjan och sedan integrera den Firehosen med Graph Node. Mer information nedan. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). +Om blockkedjan är EVM-ekvivalent och klienten/noden exponerar den standardiserade EVM JSON-RPC API:n, bör Graf Node kunna indexera den nya blockkedjan. För mer information, se [Testa en EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. -## Difference between EVM JSON-RPC & Firehose +## Skillnad mellan EVM JSON-RPC och Firehose -While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. +Medan båda alternativen är lämpliga för subgrafer krävs en Firehose alltid för utvecklare som vill bygga med [Substreams](substreams/), som att bygga [Substreams-drivna subgrafer](cookbook/substreams-powered-subgraphs/). Dessutom möjliggör Firehose förbättrade indexeringstider jämfört med JSON-RPC. -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +Nya EVM-blockkedjeintegratörer kan också överväga den Firehose-baserade metoden med tanke på fördelarna med substreams och dess massivt parallella indexeringsegenskaper. Att stödja båda alternativen ger utvecklare möjlighet att välja mellan att bygga substreams eller subgrafer för den nya blockkedjan. -> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **OBS**: En Firehose-baserad integration för EVM-blockkedjor kommer fortfarande att kräva att Indexers kör blockkedjans arkiv-RPC-nod för att korrekt indexera subgrafer. Detta beror på att Firehosen inte kan tillhandahålla den smarta kontraktsstatus som normalt är åtkomlig via `eth_call` RPC-metoden. (Det är värt att påminna om att eth_calls inte är [en bra praxis för utvecklare](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) --- -## Testing an EVM JSON-RPC +## Testa en EVM JSON-RPC -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: +För att Graf Node ska kunna ta emot data från en EVM-blockkedja måste RPC-noden exponera följande EVM JSON-RPC-metoder: - `eth_getLogs` -- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_call` \_(för historiska block, med EIP-1898 - kräver arkivnod): - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ +- `eth_getTransactionReceipt`, i en JSON-RPC batch-begäran +- _`trace_filter`_ _(valfritt krav för att Graf Node ska stödja anropshanterare)_ -### Graph Node Configuration +### Graf Node-konfiguration -**Start by preparing your local environment** +**Börja med att förbereda din lokala miljö** -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ +1. [Klona Graf Node](https://github.com/graphprotocol/graph-node) +2. Ändra [den här raden](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) för att inkludera det nya nätverksnamnet och den EVM JSON-RPC-kompatibla URL:n + > Byt inte namnet på env-var självt. Det måste förbli `ethereum` även om nätverksnamnet är annorlunda. +3. Kör en IPFS-nod eller använd den som används av The Graf: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Testa integrationen genom att lokalt distribuera en subgraf** -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +1. Installera [graph-cli](https://github.com/graphprotocol/graph-cli) +2. Skapa en enkel exempelsubgraf. Några alternativ är nedan: + 1. Den förpackade [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323)-smartkontrakt och subgraf är en bra startpunkt + 2. Starta en lokal subgraf från ett befintligt smart kontrakt eller en Solidity-utvecklingsmiljö [med hjälp av Hardhat med ett Graf-plugin](https://github.com/graphprotocol/hardhat-graph) +3. Anpassa den resulterande `subgraph.yaml` genom att ändra [`dataSources.network`](http://dataSources.network) till samma namn som tidigare angavs för Graf Node. +4. Skapa din subgraf i Graf Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Publicera din subgraf till Graf Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. +Graf Node bör synkronisera den distribuerade subgrafen om det inte finns några fel. Ge det tid att synkronisera, och skicka sedan några GraphQL-begäranden till API-slutpunkten som skrivs ut i loggarna. --- -## Integrating a new Firehose-enabled chain +## Integrera en ny Firehose-aktiverad blockkedja -Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: +Det är också möjligt att integrera en ny blockkedja med Firehose-metoden. Detta är för närvarande det bästa alternativet för icke-EVM-blockkedjor och ett krav för stöd för delströmmar. Ytterligare dokumentation fokuserar på hur Firehose fungerar, hur du lägger till Firehose-stöd för en ny blockkedja och integrerar den med Graf Node. Rekommenderade dokument för integratörer: -1. [General docs on Firehose](firehose/) +1. [Allmänna dokument om Firehose](firehose/) 2. [Adding Firehose support for a new chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) -3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) +3. [Integrera Graf Node med en ny blockkedja via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/sv/operating-graph-node.mdx b/website/pages/sv/operating-graph-node.mdx index 832b6cccf347..20fe3667765e 100644 --- a/website/pages/sv/operating-graph-node.mdx +++ b/website/pages/sv/operating-graph-node.mdx @@ -1,40 +1,40 @@ --- -title: Operating Graph Node +title: Drift av Graf Node --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graf Node är komponenten som indexerar subgraffar och gör den resulterande datan tillgänglig för förfrågan via en GraphQL API. Som sådan är den central för indexeringsstacken, och korrekt drift av Graph Node är avgörande för att driva en framgångsrik indexerare. -This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). +Detta ger en kontextuell översikt över Graph Node och några av de mer avancerade alternativ som är tillgängliga för indexerare. Detaljerad dokumentation och instruktioner finns i [Graph Node-repositoriet](https://github.com/graphprotocol/graph-node). -## Graph Node +## Graf Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graf Node](https://github.com/graphprotocol/graph-node) är referensimplementationen för indexeringsavsnitt på The Graph Nätverk, som ansluter till blockchain-klienter, indexerar subgraffar och gör indexerad data tillgänglig för förfrågan. -Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graf Node (och hela indexeringsstacken) kan köras på rå metall eller i en molnmiljö. Flexibiliteten hos den centrala indexeringskomponenten är avgörande för robustheten i The Graph Protocol. På samma sätt kan Graph Node [byggas från källan](https://github.com/graphprotocol/graph-node) eller indexerare kan använda en av de [medföljande Docker-bilderna](https://hub.docker.com/r/graphprotocol/graph-node). -### PostgreSQL database +### PostgreSQL-databas -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +Huvudlagret för Graph Node, här lagras subgrafdata, liksom metadata om subgraffar och nätverksdata som är oberoende av subgraffar, som blockcache och eth_call-cache. -### Network clients +### Nätverkskunder -In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. +För att indexera ett nätverk behöver Graf Node åtkomst till en nätverksklient via ett EVM-kompatibelt JSON-RPC API. Denna RPC kan ansluta till en enda klient eller så kan det vara en mer komplex konfiguration som lastbalanserar över flera. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +Medan vissa subgrafer kan kräva en fullständig nod, kan vissa ha indexeringsfunktioner som kräver ytterligare RPC-funktionalitet. Specifikt subgrafer som gör `eth_calls` som en del av indexering kommer att kräva en arkivnod som stöder [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), och subgrafer med `callHandlers` eller `blockHandlers` med en `call`-filtrering kräver `trace_filter`-stöd ([se trace-modulens dokumentation här](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). -### IPFS Nodes +### IPFS-noder -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Metadata för distribution av subgraffar lagras på IPFS-nätverket. Graf Node har främst åtkomst till IPFS-noden under distributionen av subgraffar för att hämta subgrafens manifest och alla länkade filer. Nätverksindexerare behöver inte värd sin egen IPFS-nod. En IPFS-nod för nätverket är värd på https://ipfs.network.thegraph.com. -### Prometheus metrics server +### Prometheus server för mätvärden -To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. +För att möjliggöra övervakning och rapportering kan Graf Node valfritt logga metrik till en Prometheus-metrisk server. -### Getting started from source +### Komma igång från källan -#### Install prerequisites +#### Installera förutsättningar - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Ytterligare krav för Ubuntu-användare** - För att köra en Graf Node på Ubuntu kan några ytterligare paket behövas. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Inställning -1. Start a PostgreSQL database server +1. Starta en PostgreSQL-databasserver ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Klona [Graf Node](https://github.com/graphprotocol/graph-node)-repon och bygg källkoden genom att köra `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Nu när alla beroenden är konfigurerade startar du Graf Node: ```sh cargo run -p graph-node --release -- \ @@ -69,37 +69,37 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -### Getting started with Kubernetes +### Komma igång med Kubernetes -A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +En komplett exempelkonfiguration för Kubernetes finns i [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). -### Ports +### Portar -When it is running Graph Node exposes the following ports: +När Graph Node är igång exponerar den följande portar: -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Syfte | Rutter | Argument för CLI | Miljö Variabel | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP-server
    (för frågor om undergrafer) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (för prenumerationer på undergrafer) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (för hantering av distributioner) | / | --admin-port | - | +| 8030 | Status för indexering av undergrafer API | /graphql | --index-node-port | - | +| 8040 | Prometheus mätvärden | /metrics | --metrics-port | - | -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. +> **Viktigt**: Var försiktig med att exponera portar offentligt - **administrationsportar** bör hållas säkra. Detta inkluderar JSON-RPC-slutpunkten för Graph Node. -## Advanced Graph Node configuration +## Avancerad konfiguration av Graf Node -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +På sitt enklaste sätt kan Graph Node användas med en enda instans av Graph Node, en enda PostgreSQL-databas, en IPFS-nod och nätverksklienter som krävs av de subgrafer som ska indexeras. -This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. +Denna konfiguration kan skalas horisontellt genom att lägga till flera Graph Nodes och flera databaser för att stödja dessa Graph Nodes. Avancerade användare kan vilja dra nytta av vissa av de horisontella skalningsfunktionerna i Graph Node, liksom några av de mer avancerade konfigurationsalternativen via filen `config.toml` och Graph Nodes miljövariabler. ### `config.toml` -A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. +En [TOML](https://toml.io/en/) konfigurationsfil kan användas för att ställa in mer komplexa konfigurationer än de som exponeras i CLI. Platsen för filen överförs med kommandoradsomkopplaren --config. -> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. +> När du använder en konfigurationsfil är det inte möjligt att använda alternativen --postgres-url, --postgres-secondary-hosts och --postgres-host-weights. -A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: +En minimal `config.toml`-fil kan tillhandahållas; följande fil är ekvivalent med att använda kommandoradsalternativet --postgres-url: ```toml [store] @@ -110,19 +110,19 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +Fullständig dokumentation av `config.toml` hittar du i [Graph Node-dokumentationen](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). -#### Multiple Graph Nodes +#### Flera Grafnoder -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestor), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Indexering med Graph Node kan skalas horisontellt genom att köra flera instanser av Graph Node för att dela upp indexering och frågning över olika noder. Detta kan göras enkelt genom att köra Graph Nodes konfigurerade med olika `node_id` vid start (t.ex. i Docker Compose-filen), som sedan kan användas i `config.toml`-filen för att specificera [dedikerade frågenoder](#dedikerade-frågenoder), [block ingestors](#dedikerade-block-ingestor), och dela upp subgrafer över noder med [deployeringsregler](#deployeringsregler). -> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. +> Observera att flera Graph Nodes alla kan konfigureras att använda samma databas, som i sig kan skalas horisontellt via sharding. -#### Deployment rules +#### Regler för utplacering -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +Med flera Graph Nodes är det nödvändigt att hantera deployering av nya subgrafer så att samma subgraf inte indexeras av två olika noder, vilket skulle leda till kollisioner. Detta kan göras genom att använda deployeringsregler, som också kan specificera vilken `shard` subgrafens data ska lagras i om databasens sharding används. Deployeringsregler kan matcha subgrafens namn och nätverket som deployeringen indexerar för att fatta ett beslut. -Example deployment rule configuration: +Exempel på konfiguration av deployeringsregler: ```toml [deployment] @@ -150,51 +150,51 @@ indexers = [ ] ``` -Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Läs mer om implementeringsregler [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). -#### Dedicated query nodes +#### Dedikerade frågenoder -Nodes can be configured to explicitly be query nodes by including the following in the configuration file: +Noder kan konfigureras för att uttryckligen vara frågenoder genom att inkludera följande i konfigurationsfilen: ```toml [general] query = "" ``` -Any node whose --node-id matches the regular expression will be set up to only respond to queries. +Alla noder vars --node-id matchar reguljärt uttryck kommer att konfigureras för att endast svara på förfrågningar. -#### Database scaling via sharding +#### Skalning av databas via sharding -For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. +För de flesta användningsfall är en enda Postgres-databas tillräcklig för att stödja en graph-node-instans. När en graph-node-instans växer utöver en enda Postgres-databas är det möjligt att dela upp lagringen av graph-node-data över flera Postgres-databaser. Alla databaser tillsammans bildar lagringsutrymmet för graph-node-instansen. Varje individuell databas kallas en shard. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shards kan användas för att dela upp subgraffsdeployeringar över flera databaser och kan också användas för att använda kopior för att sprida frågebelastningen över databaser. Detta inkluderar konfigurering av antalet tillgängliga databasanslutningar som varje `graph-node` bör behålla i sin anslutningspool för varje databas, vilket blir allt viktigare när fler subgrafer blir indexerade. -Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. +Sharding blir användbart när din befintliga databas inte kan hålla jämna steg med belastningen som Graph Node sätter på den och när det inte längre är möjligt att öka databasens storlek. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> Det är generellt sett bättre att göra en enda databas så stor som möjligt innan man börjar med shards. Ett undantag är när frågetrafiken är mycket ojämnt fördelad mellan subgrafer; i dessa situationer kan det hjälpa dramatiskt om högvolymsubgraferna hålls i en shard och allt annat i en annan, eftersom den konfigurationen gör det mer troligt att data för högvolymsubgraferna stannar i databasens interna cache och inte ersätts av data som inte behövs lika mycket från lågvolymsubgrafer. -In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. +När det gäller att konfigurera anslutningar, börja med max_connections i postgresql.conf som är inställt på 400 (eller kanske till och med 200) och titta på Prometheus-metrarna store_connection_wait_time_ms och store_connection_checkout_count. Märkbara väntetider (något över 5 ms) är en indikation på att det finns för få anslutningar tillgängliga; höga väntetider beror också på att databasen är mycket upptagen (som hög CPU-belastning). Om databasen verkar annars stabil, indikerar höga väntetider att antalet anslutningar behöver ökas. I konfigurationen är det en övre gräns för hur många anslutningar varje graph-node-instans kan använda, och Graph Node kommer inte att hålla anslutningar öppna om det inte behöver dem. -Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Läs mer om konfiguration av lagring [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). -#### Dedicated block ingestion +#### Intag av dedikerade block -If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: +Om det finns flera konfigurerade noder är det nödvändigt att specificera en nod som är ansvarig för inhämtning av nya block, så att alla konfigurerade indexnoder inte frågar huvudet av kedjan. Detta görs som en del av namnrymden `chains`, där du anger `node_id` som ska användas för blockinhämtning: ```toml [chains] ingestor = "block_ingestor_node" ``` -#### Supporting multiple networks +#### Stöd för flera nätverk -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +Graf Protocol ökar antalet nätverk som stöds för indexering av belöningar, och det finns många undergrafer som indexerar icke-stödda nätverk som en indexerare skulle vilja bearbeta. Filen `config.toml` möjliggör uttrycksfull och flexibel konfiguration av: -- Multiple networks -- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). -- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) +- Flera nätverk +- Flera leverantörer per nätverk (detta kan göra det möjligt att dela upp belastningen mellan leverantörer, och kan också möjliggöra konfiguration av fullständiga noder samt arkivnoder, där Graph Node föredrar billigare leverantörer om en viss arbetsbelastning tillåter det). +- Ytterligare information om leverantören, t. ex. funktioner, autentisering och typ av leverantör (för stöd för experimentell Firehose) -The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. +Avsnittet `[chains]` styr de ethereum-providers som graph-node ansluter till, och var block och andra metadata för varje kedja lagras. Följande exempel konfigurerar två kedjor, mainnet och kovan, där block för mainnet lagras i vip-sharden och block för kovan lagras i den primära sharden. Mainnet-kedjan kan använda två olika leverantörer, medan kovan bara har en leverantör. ```toml [chains] @@ -210,136 +210,136 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Läs mer om leverantörsconfiguration [här](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). -### Environment variables +### Miljö variabler -Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Node stöder ett utbud av miljövariabler som kan aktivera funktioner eller ändra Graph Node-beteendet. Dessa är dokumenterade [här](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). -### Continuous deployment +### Kontinuerlig driftsättning -Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. +Användare som driver en skalad indexering med avancerad konfiguration kan dra nytta av att hantera sina Graph Nodes med Kubernetes. -- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. +- Indexeringsförrådet har en [exempel på Kubernetes-referens](https://github.com/graphprotocol/indexer/tree/main/k8s) +- [Launchpad](https://docs.graphops.xyz/launchpad/intro) är en verktygslåda för att köra en Graph Protocol Indexet på Kubernetes som underhålls av GraphOps. Den tillhandahåller en uppsättning Hjelm-diagram och en CLI för att hantera en grafnod-distribution. -### Managing Graph Node +### Hantera Graf Noder -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Med en körande Graph Node (eller Graph Nodes!) är utmaningen sedan att hantera distribuerade subgrafer över dessa noder. Graph Node erbjuder en rad verktyg för att hjälpa till med hanteringen av subgrafer. -#### Logging +#### Loggning -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Nodes loggar kan ge användbar information för felsökning och optimering av Graph Node och specifika subgrafer. Graph Node stöder olika loggnivåer via miljövariabeln `GRAPH_LOG`, med följande nivåer: error, warn, info, debug eller trace. -In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). +Dessutom ger inställningen `GRAPH_LOG_QUERY_TIMING` till `gql` mer information om hur GraphQL-frågor körs (dock kommer detta att generera en stor mängd loggar). -#### Monitoring & alerting +#### Övervakning & varning -Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. +Graph Node tillhandahåller metrikerna via Prometheus-endpunkt på port 8040 som standard. Grafana kan sedan användas för att visualisera dessa metriker. -The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +Indexer-repositoriet tillhandahåller en [exempel Grafana-konfiguration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). #### Graphman -`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. +`graphman` är ett underhållsverktyg för Graph Node som hjälper till med diagnos och lösning av olika dagliga och exceptionella uppgifter. -The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. +Kommandot graphman ingår i de officiella containrarna, och du kan köra det med docker exen in i din graph-node-container. Det kräver en `config.toml`-fil. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +Fullständig dokumentation om `graphman`-kommandon finns i Graph Node-repositoriet. Se \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) i Graph Node `/docs` -### Working with subgraphs +### Arbeta med undergrafer -#### Indexing status API +#### Indexerings status API -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Tillgänglig som standard på port 8030/graphql, exponerar indexeringstatus-API: en en rad metoder för att kontrollera indexeringstatus för olika subgrafer, kontrollera bevis för indexering, inspektera subgrafegenskaper och mer. -The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Hela schemat är tillgängligt [här](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). -#### Indexing performance +#### Prestanda för indexering -There are three separate parts of the indexing process: +Det finns tre separata delar av indexeringsprocessen: -- Fetching events of interest from the provider -- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) -- Writing the resulting data to the store +- Hämta intressanta händelser från leverantören +- Bearbeta händelser i rätt ordning med lämpliga hanterare (detta kan innebära att kedjan anropas för status och att data hämtas från lagret) +- Skriva de resulterande data till butiken -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +Dessa stadier är pipelinerade (det vill säga de kan utföras parallellt), men de är beroende av varandra. När subgrafer är långsamma att indexera beror orsaken på den specifika subgrafgen. -Common causes of indexing slowness: +Vanliga orsaker till indexeringslångsamhet: -- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) -- Making large numbers of `eth_calls` as part of handlers -- A large amount of store interaction during execution -- A large amount of data to save to the store -- A large number of events to process -- Slow database connection time, for crowded nodes -- The provider itself falling behind the chain head -- Slowness in fetching new receipts at the chain head from the provider +- Tidsåtgång för att hitta relevanta händelser från kedjan (särskilt anropshanterare kan vara långsamma, eftersom de förlitar sig på `trace_filter`) +- Göra ett stort antal `eth_calls` som en del av handläggare +- En stor mängd butiksinteraktion under exekvering +- En stor mängd data att spara i butiken +- Ett stort antal evenemang att bearbeta +- Långsam databasanslutningstid, för överbelastade noder +- Leverantören själv faller bakom kedjehuvudet +- Långsamhet vid hämtning av nya kvitton från leverantören vid kedjehuvudet -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgrafindexeringsmetriker kan hjälpa till att diagnostisera grunden till indexeringens långsamhet. I vissa fall ligger problemet med subgrafgenen själv, men i andra fall kan förbättrade nätverksleverantörer, minskad databaskonflikt och andra konfigurationsförbättringar markant förbättra indexeringens prestanda. -#### Failed subgraphs +#### Undergrafer som misslyckats -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +Under indexering kan subgrafer misslyckas om de stöter på data som är oväntad, om någon komponent inte fungerar som förväntat eller om det finns något fel i händelsehanterare eller konfiguration. Det finns två allmänna typer av misslyckande: -- Deterministic failures: these are failures which will not be resolved with retries -- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. +- Deterministiska fel: detta är fel som inte kommer att lösas med retries +- Icke-deterministiska fel: dessa kan bero på problem med leverantören eller något oväntat Graph Node-fel. När ett icke-deterministiskt fel inträffar kommer Graph Node att försöka igen med de felande hanterarna och backa över tid. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +I vissa fall kan ett misslyckande vara lösbart av indexören (till exempel om felet beror på att det inte finns rätt typ av leverantör, kommer att tillåta indexering att fortsätta om den nödvändiga leverantören läggs till). Men i andra fall krävs en ändring i subgrafkoden. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Deterministiska misslyckanden betraktas som "slutliga", med en Proof of Indexing genererad för det misslyckande blocket, medan icke-deterministiska misslyckanden inte är det, eftersom subgrafen kanske lyckas "avmisslyckas" och fortsätta indexeringen. I vissa fall är den icke-deterministiska etiketten felaktig, och subgrafen kommer aldrig att övervinna felet; sådana misslyckanden bör rapporteras som problem i Graf Node-repositoriet. -#### Block and call cache +#### Blockera och anropa cache -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graf Node cachar viss data i lagringen för att undvika att hämtas från leverantören. Block cachas, liksom resultaten av `eth_calls` (det senare cachas från en specifik block). Denna cachning kan dramatiskt öka indexeringens hastighet under "omjustering" av en något ändrad subgraf. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +Men i vissa fall, om en Ethereum-nod har tillhandahållit felaktig data under en period, kan det ta sig in i cachen, vilket leder till felaktig data eller misslyckade subgrafer. I det här fallet kan indexerare använda `graphman` för att rensa den förgiftade cachen och sedan spola tillbaka de påverkade subgraferna, som sedan hämtar färsk data från den (förhoppningsvis) friska leverantören. -If a block cache inconsistency is suspected, such as a tx receipt missing event: +Om en blockcache-inkonsekvens misstänks, som att en tx-kvitto saknar händelse: -1. `graphman chain list` to find the chain name. -2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. - 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. - 2. If the block matches the provider, then the issue can be debugged directly against the provider. +1. `graphman chain list` för att hitta kedjans namn. +2. `graphman chain check-blocks by-number ` kontrollerar om det cachade blocket matchar leverantören, och tar bort blocket från cacheminnet om det inte gör det. + 1. Om det finns en skillnad kan det vara säkrare att trunkera hela cacheminnet med `graphman chain truncate `. + 2. Om blocket matchar leverantören kan problemet felsökas direkt mot leverantören. -#### Querying issues and errors +#### Fråga frågor och fel -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +När en subgraf har indexeras kan indexörer förvänta sig att servera frågor via subgrafens dedikerade frågendpunkt. Om indexören hoppas på att betjäna en betydande mängd frågor rekommenderas en dedikerad frågenod, och vid mycket höga frågevolymer kan indexörer vilja konfigurera replikskivor så att frågor inte påverkar indexeringsprocessen. -However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. +Men även med en dedikerad frågenod och repliker kan vissa frågor ta lång tid att utföra, och i vissa fall öka minnesanvändningen och negativt påverka frågetiden för andra användare. -There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. +Det finns inte en "silverkula", men en rad verktyg för att förebygga, diagnostisera och hantera långsamma frågor. -##### Query caching +##### Fråge cachning -Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graf Node cachar GraphQL-frågor som standard, vilket kan minska belastningen på databasen avsevärt. Detta kan konfigureras ytterligare med inställningarna `GRAPH_QUERY_CACHE_BLOCKS` och `GRAPH_QUERY_CACHE_MAX_MEM` - läs mer [här](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). -##### Analysing queries +##### Analyserar frågor -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Problematiska frågor dyker oftast upp på ett av två sätt. I vissa fall rapporterar användare själva att en viss fråga är långsam. I det fallet är utmaningen att diagnostisera orsaken till långsamheten - om det är ett generellt problem eller specifikt för den subgraf eller fråga. Och naturligtvis att lösa det om det är möjligt. -In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. +I andra fall kan utlösaren vara hög minnesanvändning på en frågenod, i vilket fall utmaningen först är att identifiera frågan som orsakar problemet. -Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. +Indexörer kan använda [qlog](https://github.com/graphprotocol/qlog/) för att bearbeta och sammanfatta Graph Nodes frågeloggar. `GRAPH_LOG_QUERY_TIMING` kan också aktiveras för att hjälpa till att identifiera och felsöka långsamma frågor. -Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. +Med en långsam fråga har indexörer några alternativ. Självklart kan de ändra sin kostnadsmodell för att kraftigt öka kostnaden för att skicka den problematiska frågan. Detta kan resultera i att frekvensen av den frågan minskar. Men det löser ofta inte grunden till problemet. -##### Account-like optimisation +##### Kontoliknande optimering -Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) +Databastabeller som lagrar enheter verkar generellt komma i två varianter: 'transaktionsliknande', där enheter, när de väl är skapade, aldrig uppdateras, dvs. de lagrar något liknande en lista över finansiella transaktioner, och 'konto-liknande', där enheter uppdateras mycket ofta, dvs. de lagrar något som finansiella konton som ändras varje gång en transaktion registreras. Tabeller med konto-liknande tabeller karakteriseras av att de innehåller ett stort antal enhetsversioner, men relativt få distinkta enheter. Ofta är antalet distinkta enheter i sådana tabeller 1% av det totala antalet rader (enhetsversioner) -For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. +För konto-liknande tabeller kan `graph-node` generera frågor som utnyttjar detaljer om hur Postgres slutligen lagrar data med en så hög förändringsfrekvens, nämligen att alla versioner för nyligen block är i en liten del av den övergripande lagringen för en sådan tabell. -The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. +Kommandot `graphman stats show visar, för varje enhetstyp/tabell i en deployment, hur många distinkta enheter och hur många enhetsversioner varje tabell innehåller. Den data är baserad på Postgres-interna uppskattningar och är därför nödvändigtvis oprecis och kan vara fel med en ordning av storlek. Ett `-1` i kolumnen `entities` innebär att Postgres tror att alla rader innehåller en distinkt enhet. -In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
  • ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. +I allmänhet är tabeller där antalet distinkta enheter är mindre än 1% av det totala antalet rader/enhetsversioner bra kandidater för konto-liknande optimering. När utdata från `graphman stats show` indikerar att en tabell kan dra nytta av denna optimering, kommer att köra `graphman stats show
    ` att utföra en full räkning av tabellen - det kan vara långsamt, men ger en precis mätning av förhållandet mellan distinkta enheter till totalt enhetsversioner. -Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. +När en tabell har fastställts som konto-liknande, kommer att köra `graphman stats account-like .
    ` att aktivera konto-liknande optimeringen för frågor mot den tabellen. Optimeringen kan stängas av igen med `graphman stats account-like --clear .
    ` Det tar upp till 5 minuter för frågenoder att märka att optimeringen har aktiverats eller stängts av. Efter att ha aktiverat optimeringen är det nödvändigt att verifiera att ändringen faktiskt inte gör att frågor blir långsammare för den tabellen. Om du har konfigurerat Grafana för att övervaka Postgres, skulle långsamma frågor dyka upp i `pg_stat_activity` i stora mängder, ta flera sekunder. I det fallet måste optimeringen stängas av igen. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +För subgrafer som liknar Uniswap är `pair` och `token` tabeller primära kandidater för denna optimering och kan ha en dramatisk effekt på databasbelastningen. -#### Removing subgraphs +#### Ta bort undergrafer -> This is new functionality, which will be available in Graph Node 0.29.x +> Detta är ny funktionalitet, som kommer att vara tillgänglig i Graf Node 0.29.x -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +Vid någon tidpunkt kan en indexer vilja ta bort en given subgraf. Detta kan enkelt göras via `graphman drop`, som raderar en distribution och all dess indexerade data. Distributionen kan specificeras antingen som ett subgrafnamn, en IPFS-hash `Qm..`, Eller databasens namnrymd `sgdNNN`. Ytterligare dokumentation finns tillgänglig [här](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). diff --git a/website/pages/sv/publishing/publishing-a-subgraph.mdx b/website/pages/sv/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..82beda3c26b3 100644 --- a/website/pages/sv/publishing/publishing-a-subgraph.mdx +++ b/website/pages/sv/publishing/publishing-a-subgraph.mdx @@ -1,33 +1,33 @@ --- -title: Publishing a Subgraph to the Decentralized Network +title: Publicera en Subgraph på Det Decentraliserade Nätverket --- -Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. +När din subgraph har [implementerats i Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), du har testat den och är redo att sätta den i produktion, kan du sedan publicera den på det decentraliserade nätverket. -Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. +Genom att publicera en subgraph på det decentraliserade nätverket blir den tillgänglig för [Kurators](/network/curating) att börja kurera och [Indexeres](/network/indexing) att börja indexera den. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + -You can find the list of the supported networks [Here](/developing/supported-networks). +Du hittar listan över de nätverk som stöds [Här](/developing/supported-networks). -## Publishing a subgraph +## Publicera en subgraph -Subgraphs can be published to the decentralized network directly from the Subgraph Studio dashboard by clicking on the **Publish** button. Once a subgraph is published, it will be available to view in the [Graph Explorer](https://thegraph.com/explorer/). +Subgrafer kan publiceras på det decentraliserade nätverket direkt från Subgraph Studio-panelen genom att klicka på **Publicera**-knappen. När en subgraph är publicerad blir den tillgänglig att visa i [Graph Explorer](https://thegraph.com/explorer/). -- Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. +- Subgrafer kan publiceras på Goerli, Arbitrum goerli, Arbitrum One eller Ethereum-mainnet. -- Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). +- Oavsett vilket nätverk subgraphen har publicerats på kan den indexera data på något av de [stödda nätverken](/developing/supported-networks). -- When publishing a new version for an existing subgraph the same rules apply as above. +- När du publicerar en ny version för en befintlig subgraph gäller samma regler som ovan. -## Curating your subgraph +## Kura din subgraph -> It is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. +> Det rekommenderas att du kurar din egen subgraph med 10 000 GRT för att säkerställa att den indexeras och blir tillgänglig för frågor så snart som möjligt. -Subgraph Studio enables you to be the first to curate your subgraph by adding GRT to your subgraph's curation pool in the same transaction. When publishing your subgraph, make sure to check the button that says, "Be the first to signal on this subgraph." +Subgraph Studio möjliggör för dig att vara den första att kura din subgraph genom att lägga till GRT i subgraphens kurationspool i samma transaktion. När du publicerar din subgraph, se till att markera knappen som säger "Var den första att signalera på denna subgraph." -![Curation Pool](/img/curate-own-subgraph-tx.png) +![Kurationspool](/img/curate-own-subgraph-tx.png) -## Updating metadata for a published subgraph +## Uppdatera metadata för en publicerad subgraph -Once your subgraph has been published to the decentralized network, you can modify the metadata at any time by making the update in the Subgraph Studio dashboard of the subgraph. After saving the changes and publishing your updates to the network, they will be reflected in The Graph Explorer. This won’t create a new version, as your deployment hasn’t changed. +När din subgraph har publicerats på det decentraliserade nätverket kan du när som helst ändra metadata genom att göra uppdateringen i Subgraph Studio-panelen för subgraphen. Efter att du har sparat ändringarna och publicerat dina uppdateringar på nätverket kommer de att återspeglas i The Graph Explorer. Detta kommer inte skapa en ny version, eftersom din implementation inte har ändrats. diff --git a/website/pages/sv/querying/distributed-systems.mdx b/website/pages/sv/querying/distributed-systems.mdx index 85337206bfd3..365340f65a1b 100644 --- a/website/pages/sv/querying/distributed-systems.mdx +++ b/website/pages/sv/querying/distributed-systems.mdx @@ -1,50 +1,50 @@ --- -title: Distributed Systems +title: Distribuerade System --- -The Graph is a protocol implemented as a distributed system. +The Graph är ett protokoll implementerat som ett distribuerat system. -Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. +Anslutningar misslyckas. Förfrågningar anländer i fel ordning. Olika datorer med osynkroniserade klockor och tillstånd bearbetar relaterade förfrågningar. Servrar startar om. Omorganiseringar inträffar mellan förfrågningar. Dessa problem är inneboende för alla distribuerade system men förvärras i system som opererar globalt. -Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. +Betrakta detta exempel på vad som kan inträffa om en klient undersöker en Indexer för den senaste datan under en omorganiseringar. -1. Indexer ingests block 8 -2. Request served to the client for block 8 -3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +1. Indexer bearbetar block 8 +2. Förfrågan serveras till klienten för block 8 +3. Indexer bearbetar block 9 +4. Indexer bearbetar block 10A +5. Förfrågan serveras till klienten för block 10A +6. Indexer upptäcker omorganiseringar till 10B och rullar tillbaka 10A +7. Förfrågan serveras till klienten för block 9 +8. Indexer bearbetar block 10B +9. Indexer bearbetar block 11 +10. Förfrågan serveras till klienten för block 11 -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +Ur Indexerns synvinkel fortskrider saker logiskt framåt. Tiden går framåt, även om vi var tvungna att rulla tillbaka ett uncle-block och spela in blocket under konsensus framåt ovanpå det. Under vägen serverar Indexern förfrågningar med den senaste informationen den känner till vid den tiden. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +Ur klientens synvinkel verkar dock saker kaotiska. Klienten observerar att svaren var för block 8, 10, 9 och 11 i den ordningen. Vi kallar detta "block wobble"-problemet. När en klient upplever block wobble kan data verka motsäga sig över tiden. Situationen förvärras när vi tänker på att Indexers inte alla bearbetar de senaste blocken samtidigt, och dina förfrågningar kan routas till flera Indexers. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +Det är klientens och serverns ansvar att samarbeta för att ge användaren konsekvent data. Olika metoder måste användas beroende på önskad konsekvens, eftersom det inte finns ett rätt program för varje problem. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Att resonera kring följderna av distribuerade system är svårt, men lösningen behöver inte vara det! Vi har etablerat API:er och mönster för att hjälpa dig hantera några vanliga användningsfall. Följande exempel illustrerar dessa mönster, men utesluter fortfarande detaljer som krävs av produktionskod (som felhantering och avbokning) för att inte försvåra huvudidéerna. -## Polling for updated data +## Polla efter uppdaterad data -The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. +The Graph tillhandahåller API`block: { number_gte: $minBlock }`, vilket säkerställer att svaret gäller ett enskilt block som är lika med eller högre än `$minBlock`. Om förfrågan görs till en instans av `graph-node` och det minsta blocket ännu inte har synkroniserats, kommer `graph-node` att returnera ett fel. Om `graph-node` har synkroniserat det minsta blocket kommer det att köra svaret för det senaste blocket. Om förfrågan görs till en Edge & Node Gateway kommer Gatewayen att filtrera bort Indexers som ännu inte har synkroniserat det minsta blocket och göra förfrågan för det senaste blocket som Indexern har synkroniserat. -We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: +Vi kan använda `number_gte` för att säkerställa att tiden aldrig går bakåt när vi pollar efter data i en loop. Här är ett exempel: ```javascript -/// Updates the protocol.paused variable to the latest -/// known value in a loop by fetching it using The Graph. +/// Uppdaterar variabeln protocol.paused till det senaste +/// kända värdet i en loop genom att hämta det med The Graph. async function updateProtocolPaused() { - // It's ok to start with minBlock at 0. The query will be served - // using the latest block available. Setting minBlock to 0 is the - // same as leaving out that argument. + // Det är okej att börja med minBlock på 0. Förfrågan kommer att serveras + // med den senaste tillgängliga blocket. Att sätta minBlock till 0 är detsamma som + // att utelämna det argumentet. let minBlock = 0 for (;;) { - // Schedule a promise that will be ready once - // the next Ethereum block will likely be available. + // Schemalägg ett löfte som kommer vara redo så snart + // nästa Ethereum-block sannolikt kommer att vara tillgängligt. const nextBlock = new Promise((f) => { setTimeout(f, 14000) }) @@ -65,20 +65,20 @@ async function updateProtocolPaused() { const response = await graphql(query, variables) minBlock = response._meta.block.number - // TODO: Do something with the response data here instead of logging it. + // TODO: Gör något med svarsdatan här istället för att logga den. console.log(response.protocol.paused) - // Sleep to wait for the next block + // Vänta för att nästa block ska behandlas await nextBlock } } ``` -## Fetching a set of related items +## Hämta en uppsättning relaterade objekt -Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. +Ett annat användningsfall är att hämta en stor uppsättning eller mer generellt att hämta relaterade objekt över flera förfrågningar. Till skillnad från fallet med att hämta uppdaterade data (där önskad konsekvens var att gå framåt i tiden), är önskad konsekvens här att få data från en enda tidpunkt. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Här kommer vi att använda argumentet `block: { hash: $blockHash }` för att binda alla våra resultat till samma block. ```javascript /// Gets a list of domain names from a single block using pagination @@ -131,4 +131,4 @@ async function getDomainNames() { } ``` -Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. +Observera att i händelse av en omorganisation måste klienten försöka igen från den första begäran för att uppdatera blockhashen till ett icke-farbrorblock. diff --git a/website/pages/sv/querying/graphql-api.mdx b/website/pages/sv/querying/graphql-api.mdx index 89cda460d58f..3823820de6eb 100644 --- a/website/pages/sv/querying/graphql-api.mdx +++ b/website/pages/sv/querying/graphql-api.mdx @@ -2,15 +2,15 @@ title: GraphQL API --- -This guide explains the GraphQL Query API that is used for the Graph Protocol. +Denna guide förklarar GraphQL Query API som används för Graph-protokollet. -## Queries +## Förfrågningar -In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. +I din delgrafig schema definierar du typer som kallas `Entiteter`. För varje typ av `Entitet` kommer ett `entitet`- och `entiteter`-fält att genereras på toppnivån av `Query`-typen. Observera att `query` inte behöver inkluderas högst upp i `graphql`-förfrågan när du använder The Graph. -### Examples +### Exempel -Query for a single `Token` entity defined in your schema: +Förfrågan efter en enda `Token` -entitet som är definierad i din schema: ```graphql { @@ -21,9 +21,9 @@ Query for a single `Token` entity defined in your schema: } ``` -> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. +> **Note:** Vid sökning efter en enskild enhet krävs fältet `id`, och det måste vara en sträng. -Query all `Token` entities: +Fråga alla `Token`-enheter: ```graphql { @@ -34,11 +34,11 @@ Query all `Token` entities: } ``` -### Sorting +### Sortering -When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. +När du frågar efter en samling kan parametern `orderBy` användas för att sortera efter ett specifikt attribut. Dessutom kan `orderDirection` användas för att ange sorteringsriktningen, `asc` för stigande eller `desc` för fallande. -#### Example +#### Exempel ```graphql { @@ -49,11 +49,11 @@ When querying a collection, the `orderBy` parameter may be used to sort by a spe } ``` -#### Example for nested entity sorting +#### Exempel på sortering av nästlade entiteter -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. +Från och med Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) kan entiteter sorteras på basis av nästlade entiteter. -In the following example, we sort the tokens by the name of their owner: +I följande exempel sorterar vi tokens efter namnet på deras ägare: ```graphql { @@ -66,19 +66,19 @@ In the following example, we sort the tokens by the name of their owner: } ``` -> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. +> För närvarande kan du sortera efter `String`- eller `ID`-typer på en djup nivå i fälten `@entity` och `@derivedFrom`. Tyvärr stöds ännu inte [ sortering efter gränssnitt på en nivå djupa enheter](https://github.com/graphprotocol/graph-node/pull/4058) sortering efter fält som är matriser och kapslade enheter. -### Pagination +### Paginering -When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. +När du frågar efter en samling kan parametern `first` användas för att paginera från början av samlingen. Det är värt att notera att standardsorteringsordningen är efter ID i stigande alfanumerisk ordning, inte efter skapelsetid. -Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +Vidare kan parametern `skip` användas för att hoppa över enheter och paginera. t.ex. `first:100` visar de första 100 enheterna och `first:100, skip:100` visar de nästa 100 enheterna. -Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. +Frågor bör undvika att använda mycket stora `skip`-värden eftersom de i allmänhet fungerar dåligt. För att hämta ett stort antal objekt är det mycket bättre att bläddra igenom entiteter baserat på ett attribut som visas i det sista exemplet. -#### Example using `first` +#### Exempel med `first` -Query the first 10 tokens: +Fråga efter de första 10 tokens: ```graphql { @@ -89,11 +89,11 @@ Query the first 10 tokens: } ``` -To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. +För att söka efter grupper av enheter i mitten av en samling kan parametern `skip` användas tillsammans med parametern `first` för att hoppa över ett angivet antal enheter med start i början av samlingen. -#### Example using `first` and `skip` +#### Exempel med `first` och `skip` -Query 10 `Token` entities, offset by 10 places from the beginning of the collection: +Fråga 10 `Token`-enheter, förskjutna med 10 platser från början av samlingen: ```graphql { @@ -104,9 +104,9 @@ Query 10 `Token` entities, offset by 10 places from the beginning of the collect } ``` -#### Example using `first` and `id_ge` +#### Exempel med `first` och `id_ge` -If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: +Om en klient behöver hämta ett stort antal entiteter är det mycket mer effektivt att basera frågor på ett attribut och filtrera efter det attributet. En klient kan till exempel hämta ett stort antal tokens med hjälp av den här frågan: ```graphql query manyTokens($lastID: String) { @@ -117,15 +117,15 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +Första gången skickas frågan med `lastID = ""`, och för efterföljande frågor sätts `lastID` till `id`-attributet för den sista entiteten i den föregående frågan. Detta tillvägagångssätt kommer att fungera betydligt bättre än att använda ökande `skip`-värden. -### Filtering +### Filtrering -You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. +Du kan använda parametern `where` i dina frågor för att filtrera efter olika egenskaper. Du kan filtrera på flera värden inom parametern `where`. -#### Example using `where` +#### Exempel med `where` -Query challenges with `failed` outcome: +Fråga utmaningar med `failed` resultat: ```graphql { @@ -139,9 +139,9 @@ Query challenges with `failed` outcome: } ``` -You can use suffixes like `_gt`, `_lte` for value comparison: +Du kan använda suffix som `_gt`, `_lte` för värdejämförelse: -#### Example for range filtering +#### Exempel på filtrering av intervall ```graphql { @@ -153,11 +153,11 @@ You can use suffixes like `_gt`, `_lte` for value comparison: } ``` -#### Example for block filtering +#### Exempel på blockfiltrering -You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. +Du kan också filtrera entiteter efter `_change_block(number_gte: Int)` - detta filtrerar entiteter som uppdaterades i eller efter det angivna blocket. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +Detta kan vara användbart om du bara vill hämta enheter som har ändrats, till exempel sedan den senaste gången du pollade. Eller alternativt kan det vara användbart för att undersöka eller felsöka hur enheter förändras i din undergraf (om det kombineras med ett blockfilter kan du isolera endast enheter som ändrades i ett visst block). ```graphql { @@ -169,11 +169,11 @@ This can be useful if you are looking to fetch only entities which have changed, } ``` -#### Example for nested entity filtering +#### Exempel på filtrering av inbäddade entiteter -Filtering on the basis of nested entities is possible in the fields with the `_` suffix. +Filtrering baserat på inbäddade entiteter är möjligt i fälten med `_` suffix. -This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. +Detta kan vara användbart om du vill hämta endast entiteter vars entiteter på barnnivå uppfyller de angivna villkoren. ```graphql { @@ -187,13 +187,13 @@ This can be useful if you are looking to fetch only entities whose child-level e } ``` -#### Logical operators +#### Logiska operatorer -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. +Från och med Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) kan du gruppera flera parametrar i samma `where`-argument med hjälp av `och` eller `eller` operatorer för att filtrera resultat baserat på mer än en kriterium. -##### `AND` Operator +##### `OCH` Operator -In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +I följande exempel filtrerar vi efter utmaningar med `utfall` `lyckades` och `nummer` större än eller lika med `100`. ```graphql { @@ -207,7 +207,7 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> **Syntactic sugar:** Du kan förenkla ovanstående fråga genom att ta bort `and`-operatorn och istället skicka ett underuttryck separerat med kommatecken. > > ```graphql > { @@ -221,9 +221,9 @@ In the following example, we are filtering for challenges with `outcome` `succee > } > ``` -##### `OR` Operator +##### `OR` Operatör -In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +I följande exempel filtrerar vi efter utmaningar med `outcome` `succeeded` eller `number` större än eller lika med `100`. ```graphql { @@ -237,11 +237,11 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. +> **Note**: När man konstruerar sökfrågor är det viktigt att ta hänsyn till hur användningen av operatorn `or` påverkar prestandan. Även om `or` kan vara ett användbart verktyg för att bredda sökresultaten, kan det också ha betydande kostnader. Ett av de största problemen med `or` är att det kan göra sökningar långsammare. Detta beror på att `or` kräver att databasen söker igenom flera index, vilket kan vara en tidskrävande process. För att undvika dessa problem rekommenderas att utvecklare använder och-operatorer istället för eller när det är möjligt. Detta möjliggör mer exakt filtrering och kan leda till snabbare och mer exakta frågor. -#### All Filters +#### Alla filter -Full list of parameter suffixes: +Fullständig lista över parametersuffix: ``` _ @@ -266,23 +266,23 @@ _not_ends_with _not_ends_with_nocase ``` -> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. +> Observera att vissa suffix endast stöds för specifika typer. Till exempel stöder `Boolean` endast `_not`, `_in` och `_not_in`, men `_` är endast tillgängligt för objekt- och gränssnittstyper. -In addition, the following global filters are available as part of `where` argument: +Dessutom är följande globala filter tillgängliga som en del av argumentet `where`: ```gr _change_block(number_gte: Int) ``` -### Time-travel queries +### Tidsreseförfrågningar -You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. +Du kan förfråga tillståndet för dina enheter inte bara för den senaste blocken, som är standard, utan också för en godtycklig block i det förflutna. Blocket vid vilket en förfrågan ska ske kan specifieras antingen med dess blocknummer eller dess blockhash genom att inkludera ett `block`-argument i toppnivåfälten för förfrågningar. -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +Resultatet av en sådan förfrågan kommer inte att ändras över tid, det vill säga, att förfråga vid en viss tidigare block kommer att returnera samma resultat oavsett när det utförs, med undantag för att om du förfrågar vid ett block mycket nära huvudet av kedjan, kan resultatet ändras om det visar sig att blocket inte är på huvudkedjan och kedjan omorganiseras. När ett block kan anses vara slutgiltigt kommer resultatet av förfrågan inte att ändras. -Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +Observera att den nuvarande implementationen fortfarande är föremål för vissa begränsningar som kan bryta mot dessa garantier. Implementeringen kan inte alltid avgöra om en given blockhash inte alls är på huvudkedjan eller om resultatet av en förfrågan med blockhash för ett block som ännu inte kan anses vara slutgiltigt kan påverkas av en samtidig omorganisering av block. De påverkar inte resultaten av förfrågningar med blockhash när blocket är slutgiltigt och känt att vara på huvudkedjan. [Detta problem](https://github.com/graphprotocol/graph-node/issues/1405) förklarar dessa begränsningar i detalj. -#### Example +#### Exempel ```graphql { @@ -296,9 +296,9 @@ Note that the current implementation is still subject to certain limitations tha } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. +Denna fråga kommer att returnera `Challenge`-enheter och deras tillhörande `Application`-enheter, så som de existerade direkt efter bearbetning av block nummer 8.000.000. -#### Example +#### Exempel ```graphql { @@ -312,26 +312,26 @@ This query will return `Challenge` entities, and their associated `Application` } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. +Denna förfrågan kommer att returnera `Challenge`-entiteter och deras associerade `Application`-entiteter så som de fanns direkt efter bearbetning av blocket med den angivna hashen. -### Fulltext Search Queries +### Fulltextsökförfrågningar -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Fält för fulltextsökning ger en uttrycksfull textsöknings-API som kan läggas till i undergrafens schema och anpassas. Hänvisa till [Definiera fält för fulltextsökning](/utveckling/skapa-en-undergraf#definiera-fält-för-fulltextsökning) för att lägga till fulltextsökning i din undergraf. -Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. +Fulltextsökförfrågningar har ett obligatoriskt fält, `text`, för att tillhandahålla söktermer. Flera specialiserade fulltextoperatorer finns tillgängliga att användas i detta `text`-sökfält. -Fulltext search operators: +Fulltextsökoperatorer: -| Symbol | Operator | Description | +| Symbol | Operatör | Beskrivning | | --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| `&` | `Och` | För att kombinera flera söktermer till ett filter för entiteter som inkluderar alla de angivna termerna | +| | | `Eller` | Förfrågningar med flera söktermer separerade av ellipsen kommer att returnera alla entiteter med en matchning från någon av de angivna termerna | +| `<->` | `Följs av` | Ange avståndet mellan två ord. | +| `:*` | `Prefix` | Använd prefixsöktermen för att hitta ord vars prefix matchar (2 tecken krävs.) | -#### Examples +#### Exempel -Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. +Med hjälp av operatorn `or` filtreras denna fråga till bloggenheter med variationer av antingen "anarchism" eller "crumpet" i sina fulltextfält. ```graphql { @@ -344,7 +344,7 @@ Using the `or` operator, this query will filter to blog entities with variations } ``` -The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" +Operatorn `follow by` anger ord som står på ett visst avstånd från varandra i fulltextdokumenten. Följande fråga kommer att returnera alla bloggar med variationer av "decentralisera" följt av "filosofi" ```graphql { @@ -357,7 +357,7 @@ The `follow by` operator specifies a words a specific distance apart in the full } ``` -Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". +Kombinera fulltextoperatorer för att skapa mer komplexa filter. Med en pretext-sökoperatör kombinerad med en follow by kommer detta exempel att matcha alla bloggenheter med ord som börjar med "lou" följt av "music". ```graphql { @@ -370,27 +370,27 @@ Combine fulltext operators to make more complex filters. With a pretext search o } ``` -### Validation +### Validering -Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. +Graph Node implementerar [specifikationsbaserad](https://spec.graphql.org/October2021/#sec-Validation) validering av de GraphQL-frågor den tar emot med hjälp av [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), som är baserad på [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Frågor som misslyckas med en valideringsregel får ett standardfel - besök [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) för mer information. ## Schema -The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +Schemat för din datakälla - det vill säga de entitetstyper, värden och relationer som är tillgängliga för frågor - definieras genom [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. +GraphQL-scheman definierar i allmänhet rottyper för `queries`, `subscriptions` och `mutations`. Grafen stöder endast `queries`. Rottypen `Query` för din subgraf genereras automatiskt från GraphQL-schemat som ingår i subgrafmanifestet. -> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> **Note:** Vårt API exponerar inte mutationer eftersom utvecklare förväntas utfärda transaktioner direkt mot den underliggande blockkedjan från sina applikationer. -### Entities +### Entiteter -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +Alla GraphQL-typer med `@entity`-direktiv i ditt schema kommer att behandlas som entiteter och måste ha ett `ID`-fält. -> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. +> **Note:** För närvarande måste alla typer i ditt schema ha ett `@entity`-direktiv. I framtiden kommer vi att behandla typer utan ett `@entity`-direktiv som värdeobjekt, men detta stöds ännu inte. -### Subgraph Metadata +### Metadata för undergrafer -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +Alla subgrafer har ett autogenererat `_Meta_`-objekt, som ger tillgång till subgrafens metadata. Detta kan efterfrågas på följande sätt: ```graphQL { @@ -406,14 +406,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +Om ett block anges är metadata från det blocket, om inte används det senast indexerade blocket. Om det anges måste blocket vara efter undergrafens startblock och mindre än eller lika med det senast indexerade blocket. -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` är ett unikt ID som motsvarar IPFS CID för filen `subgraph.yaml`. -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` ger information om det senaste blocket (med hänsyn till eventuella blockbegränsningar som skickas till `_meta`): -- hash: the hash of the block -- number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- hash: blockets hash +- nummer: blockets nummer +- timestamp: blockets timestamp, om tillgänglig (detta är för närvarande endast tillgängligt för undergrafer som indexerar EVM-nätverk) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` är en boolean som identifierar om undergrafen stötte på indexeringsfel vid något tidigare block diff --git a/website/pages/sv/querying/managing-api-keys.mdx b/website/pages/sv/querying/managing-api-keys.mdx index ee7c274bca10..1bd28a2d83e2 100644 --- a/website/pages/sv/querying/managing-api-keys.mdx +++ b/website/pages/sv/querying/managing-api-keys.mdx @@ -1,26 +1,26 @@ --- -title: Managing your API keys +title: Hantera dina API-nycklar --- -Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. +Oavsett om du är en dapp-utvecklare eller en subgraf-utvecklare kommer du att behöva hantera dina API-nycklar. Detta är viktigt för att kunna fråga subgrafer, eftersom API-nycklar säkerställer att anslutningar mellan applikationstjänster är giltiga och auktoriserade. Detta inkluderar autentisering av slutanvändare och enheten som använder applikationen. -The Studio will list out existing API keys, which will give you the ability to manage or delete them. +Studion listar befintliga API-nycklar, vilket ger dig möjlighet att hantera eller ta bort dem. -1. The **Overview** section will allow you to: - - Edit your key name - - Regenerate API keys - - View the current usage of the API key with stats: - - Number of queries - - Amount of GRT spent -2. Under **Security**, you’ll be able to opt into security settings depending on the level of control you’d like to have over your API keys. In this section, you can: - - View and manage the domain names authorized to use your API key - - Assign subgraphs that can be queried with your API key -3. Under **Indexer Preference**, you’ll be able to set different preferences for Indexers who are indexing subgraphs that your API key is used for. You can assign up to 5 points for each of these: - - **Fastest Speed**: Time between the query and the response from an indexer. If you mark this as important we will optimize for fast indexers. - - **Lowest Price**: The amount paid per query. If you mark this as important we will optimize for the less expensive indexers. - - **Data Freshness**: How recent the latest block an indexer has processed for the subgraph you are querying. If you mark this as important we will optimize to find the indexers with the freshest data. - - **Economic Security**: The amount of GRT an indexer can lose if they respond incorrectly to your query. If you mark this as important we will optimize for indexers with a large stake. -4. Under **Budget**, you’ll be able to update the maximum price per query. Note that we have a dynamic setting for that that's based on a volume discounting algorithm. **We strongly recommend using the default settings unless you are experiencing a specific problem.** Otherwise, you can update it under "Set a custom maximum budget". On this page you can also view different KPIs (in GRT and USD): - - Average cost per query - - Failed queries over max price - - Most expensive query +1. **Översikts**-sektionen kommer att ge dig möjlighet att: + - Redigera nyckelns namn + - Återgenerera API-nycklar + - Visa aktuell användning av API-nyckeln med statistik: + - Antal förfrågningar + - Mängd GRT spenderad +2. Under **Säkerhet** kan du välja säkerhetsinställningar beroende på vilken nivå av kontroll du vill ha över dina API-nycklar. I den här sektionen kan du: + - Visa och hantera domännamn som har auktoriserats att använda din API-nyckel + - Koppla subgrafer som kan frågas med din API-nyckel +3. Under ** Indexer Preferens ** kan du ställa in olika preferenser för indexers som indexerar subgrafer som din API-nyckel används för. Du kan tilldela upp till 5 poäng för var och en av följande: + - **Snabbaste Hastighet**: Tid mellan förfrågan och svar från en indexer. Om du markerar detta som viktigt kommer vi att optimera för snabba indexers. + - **Lägsta Pris**: Beloppet som betalas per förfrågan. Om du markerar detta som viktigt kommer vi att optimera för billigare indexers. + - ** Dataaktualitet **: Hur nyligen det senaste blocket en indexer har bearbetat för den subgraf du frågar efter. Om du markerar detta som viktigt kommer vi att optimera för att hitta indexers med färskaste data. + - **Ekonomisk Säkerhet**: Mängden GRT en indexer kan förlora om de svarar felaktigt på din förfrågan. Om du markerar detta som viktigt kommer vi att optimera för indexers med en stor insats. +4. Under **Budget** kan du uppdatera det maximala priset per förfrågan. Notera att vi har en dynamisk inställning baserad på en volymrabattalgoritm. **Vi rekommenderar starkt att du använder standardinställningarna om du inte upplever ett specifikt problem.** Alternativt kan du uppdatera det under "Ange ett anpassat maximalt budget". På den här sidan kan du också se olika KPI:er (i GRT och USD): + - Genomsnittlig kostnad per förfrågan + - Misslyckade förfrågningar över maximalt pris + - Dyraste förfrågan diff --git a/website/pages/sv/querying/querying-best-practices.mdx b/website/pages/sv/querying/querying-best-practices.mdx index 98c0ffb72c61..5d5201189515 100644 --- a/website/pages/sv/querying/querying-best-practices.mdx +++ b/website/pages/sv/querying/querying-best-practices.mdx @@ -1,22 +1,22 @@ --- -title: Querying Best Practices +title: Bästa praxis för förfrågningar --- -The Graph provides a decentralized way to query data from blockchains. +The Graph tillhandahåller ett decentraliserat sätt att hämta data från blockkedjor. -The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. +The Graph-nätverkets data exponeras genom ett GraphQL API, vilket gör det enklare att fråga data med GraphQL-språket. -This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. +Den här sidan kommer att guida dig genom de grundläggande reglerna för GraphQL-språket och bästa praxis för GraphQL-frågor. --- -## Querying a GraphQL API +## Att fråga ett GraphQL API -### The anatomy of a GraphQL query +### Anatomien av en GraphQL-fråga -Unlike REST API, a GraphQL API is built upon a Schema that defines which queries can be performed. +Till skillnad från REST API bygger ett GraphQL API på ett schema som definierar vilka frågor som kan utföras. -For example, a query to get a token using the `token` query will look as follows: +Till exempel kommer en fråga för att hämta en token med hjälp av frågan `token` att se ut som följer: ```graphql query GetToken($id: ID!) { @@ -27,7 +27,7 @@ query GetToken($id: ID!) { } ``` -which will return the following predictable JSON response (_when passing the proper `$id` variable value_): +som kommer att returnera följande förutsägbara JSON-svar (_när du skickar rätt `$id` variabelvärde_): ```json { @@ -38,47 +38,47 @@ which will return the following predictable JSON response (_when passing the pro } ``` -GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). +GraphQL-frågor använder GraphQL-språket, som definieras i [en specifikation](https://spec.graphql.org/). -The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): +Ovanstående `GetToken`-fråga består av flera språkdelar (ersätts nedan med `[...]` platshållare): ```graphql query [operationName]([variableName]: [variableType]) { [queryName]([argumentName]: [variableName]) { - # "{ ... }" express a Selection-Set, we are querying fields from `queryName`. + # "{ ... }" uttrycker en Selection-Set, vi frågar efter fält från `queryName`. [field] [field] } } ``` -While the list of syntactic do's and don'ts is long, here are the essential rules to keep in mind when it comes to writing GraphQL queries: +Listan över syntaktiska do's and don'ts är lång, men här är de viktigaste reglerna att tänka på när det gäller att skriva GraphQL-förfrågningar: -- Each `queryName` must only be used once per operation. -- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) -- Some `field`s or queries (like `tokens`) return complex types that require a selection of sub-field. Not providing a selection when expected (or providing one when not expected - for example, on `id`) will raise an error. To know a field type, please refer to [The Graph Explorer](/network/explorer). -- Any variable assigned to an argument must match its type. -- In a given list of variables, each of them must be unique. -- All defined variables must be used. +- Varje `queryName` får endast användas en gång per operation. +- Varje `field` får bara användas en gång i ett urval (vi kan inte fråga `id` två gånger under `token`) +- Vissa `field`s eller queries (som `tokens`) returnerar komplexa typer som kräver ett urval av sub-field. Att inte tillhandahålla ett urval när det förväntas (eller tillhandahålla ett när det inte förväntas - till exempel på `id`) kommer att leda till ett fel. För att veta vad en fälttyp är, se [The Graph Explorer](/network/explorer). +- Varje variabel som tilldelas ett argument måste matcha dess typ. +- I en given lista med variabler måste var och en av dem vara unik. +- Alla definierade variabler måste användas. -Failing to follow the above rules will end with an error from the Graph API. +Om du inte följer ovanstående regler kommer du att få ett felmeddelande från Graph API. -For a complete list of rules with code examples, please look at our GraphQL Validations guide. +För en fullständig lista över regler med kodexempel, se vår guide om GraphQL-valideringar. -### Sending a query to a GraphQL API +### Att skicka en fråga till ett GraphQL API -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL är ett språk och en uppsättning konventioner som transporteras över HTTP. -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +Det innebär att du kan ställa en fråga till ett GraphQL API med hjälp av standard `fetch` (nativt eller via `@whatwg-node/fetch` eller `isomorphic-fetch`). -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +Men, som det anges i ["Frågehantering från en applikation"](/querying/querying-from-an-application), rekommenderar vi att du använder vår `graph-client` som stöder unika funktioner som: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Hantering av subgrafer över olika blockkedjor: Frågehantering från flera subgrafer i en enda fråga +- [Automatisk blockspårning](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatisk paginering](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fullt typad resultat -Here's how to query The Graph with `graph-client`: +Så här ställer du en fråga till The Graph med `graph-client`: ```tsx import { execute } from '../.graphclient' @@ -95,24 +95,24 @@ const variables = { id: '1' } async function main() { const result = await execute(query, variables) - // `result` is fully typed! + // `result` är fullständigt typad! console.log(result) } main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +Fler GraphQL-klientalternativ behandlas i ["Querying from an Application"](/querying/querying-from-an-application). -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +Nu när vi har gått igenom de grundläggande reglerna för syntax för GraphQL-förfrågningar ska vi titta på bästa praxis för att skriva GraphQL-förfrågningar. --- -## Writing GraphQL queries +## Skriva GraphQL-frågor -### Always write static queries +### Skriv alltid statiska frågor -A common (bad) practice is to dynamically build query strings as follows: +En vanlig (dålig) praxis är att dynamiskt bygga upp frågesträngar enligt följande: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +Medan det tidigare avsnittet genererar en giltig GraphQL-fråga har den **många nackdelar**: -- it makes it **harder to understand** the query as a whole -- developers are **responsible for safely sanitizing the string interpolation** -- not sending the values of the variables as part of the request parameters **prevent possible caching on server-side** -- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) +- det gör det **svårare att förstå** frågan som helhet +- utvecklare är **ansvariga för att säkert sanera stränginterpolationen** +- att inte skicka värdena av variablerna som en del av förfrågningsparametrarna **förhindrar möjlig cache på servern** +- det **hindrar verktyg från statisk analys av frågan** (exempel: Linter eller typgenereringsverktyg) -For this reason, it is recommended to always write queries as static strings: +Av dessa skäl rekommenderas det alltid att skriva frågor som statiska strängar: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +Detta medför **många fördelar**: -- **Easy to read and maintain** queries -- The GraphQL **server handles variables sanitization** -- **Variables can be cached** at server-level -- **Queries can be statically analyzed by tools** (more on this in the following sections) +- **Lättlästa och underhållna** frågor +- GraphQL **server hanterar sanitet av variabler** +- **Variabler kan cachas** på serversidan +- **Frågor kan statiskt analyseras av verktyg** (mer om detta i följande avsnitt) -**Note: How to include fields conditionally in static queries** +**Observera: Hur man inkluderar fält villkorligt i statiska frågor** -We might want to include the `owner` field only on a particular condition. +Ibland vill vi inkludera fältet `owner` endast under vissa villkor. -For this, we can leverage the `@include(if:...)` directive as follows: +För detta kan vi utnyttja direktivet `@include(if:...)` på följande sätt: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,42 +191,42 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +Observera: Det motsatta direktivet är `@skip(if: ...)`. -### Performance tips +### Prestandatips -**"Ask for what you want"** +**"Be om det du vill ha"** -GraphQL became famous for its "Ask for what you want" tagline. +GraphQL blev känd för sitt motto "Be om det du vill ha". -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +Av den anledningen finns det ingen möjlighet i GraphQL att få alla tillgängliga fält utan att behöva lista dem individuellt. -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +När du frågar GraphQL API:er, tänk alltid på att endast fråga efter de fält som faktiskt kommer att användas. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +En vanlig orsak till överhämtning är samlingar av enheter. Som standard kommer frågor att hämta 100 enheter i en samling, vilket vanligtvis är mycket mer än vad som faktiskt kommer att användas, t.ex., för att visas för användaren. Därför bör frågor nästan alltid ange first explicit och se till att de bara hämtar så många enheter som de faktiskt behöver. Detta gäller inte bara för toppnivåsamlingar i en fråga, utan ännu mer för inbäddade samlingar av enheter. -For example, in the following query: +Till exempel, i följande fråga: ```graphql query listTokens { tokens { - # will fetch up to 100 tokens + # kommer att ge upp till 100 tokens id transactions { - # will fetch up to 100 transactions + # kommer att ge upp till 100 transaktioner id } } } ``` -The response could contain 100 transactions for each of the 100 tokens. +Svaret kan innehålla 100 transaktioner för varje av de 100 tokens. -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +Om applikationen bara behöver 10 transaktioner bör frågan explicit ange `first: 10` på transaktionsfältet. -**Combining multiple queries** +**Kombinera flera frågor** -Your application might require querying multiple types of data as follows: +Din applikation kan kräva att du ställer flera typer av datafrågor enligt följande: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +Medan denna implementation är helt giltig kräver den två rundturer med GraphQL API:n. -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +Lyckligtvis är det också giltigt att skicka flera frågor i samma GraphQL-begäran enligt följande: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +Detta tillvägagångssätt kommer att **förbättra den övergripande prestandan** genom att minska tiden som spenderas på nätverket (sparar en omväg till API:n) och kommer att ge en **mer koncis implementation**. -### Leverage GraphQL Fragments +### Dra nytta av GraphQL-fragment -A helpful feature to write GraphQL queries is GraphQL Fragment. +En användbar funktion för att skriva GraphQL-frågor är GraphQL-fragment. -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +Om vi tittar på följande fråga kommer du att märka att vissa fält upprepas över flera urvalssatser (`{ ... }`): ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +Sådana upprepade fält (`id`, `active`, `status`) medför många problem: -- harder to read for more extensive queries -- when using tools that generate TypeScript types based on queries (_more on that in the last section_), `newDelegate` and `oldDelegate` will result in two distinct inline interfaces. +- svårare att läsa för mer omfattande frågor +- när du använder verktyg som genererar TypeScript-typer baserat på frågor (_mer om det i den sista avsnittet_), kommer `newDelegate` och `oldDelegate` att resultera i två olika inline-gränssnitt. -A refactored version of the query would be the following: +En omstrukturerad version av frågan skulle vara följande: ```graphql query { @@ -325,8 +325,8 @@ query { } } -# we define a fragment (subtype) on Transcoder -# to factorize repeated fields in the query +# vi definierar ett fragment (subtyp) på Transcoder +# att faktorisera upprepade fält i frågan fragment DelegateItem on Transcoder { id active @@ -334,15 +334,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +Att använda GraphQL `fragment` kommer att förbättra läsbarheten (särskilt i större skala) och leda till bättre generering av TypeScript-typer. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +När du använder verktyget för typsgenerering kommer den ovanstående frågan att generera en korrekt typ av `DelegateItemFragment` (_se sista avsnittet_). -### GraphQL Fragment do's and don'ts +### Dos and Don'ts för GraphQL Fragment -**Fragment base must be a type** +**Fragmentbas måste vara en typ** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +Ett fragment kan inte baseras på en oanvändbar typ, kort sagt, **på en typ som inte har fält**: ```graphql fragment MyFragment on BigInt { @@ -350,20 +350,20 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt` är en **skalär** (inbyggd "vanlig" typ) som inte kan användas som grund för ett fragment. -**How to spread a Fragment** +**Hur man sprider ett fragment** -Fragments are defined on specific types and should be used accordingly in queries. +Fragment är definierade på specifika typer och bör användas i enlighet med det i frågor. -Example: +Exempel: ```graphql query { bondEvents { id newDelegate { - ...VoteItem # Error! `VoteItem` cannot be spread on `Transcoder` type + ...VoteItem # Fel! `VoteItem` kan inte spridas på `Transcoder` typ } oldDelegate { ...VoteItem @@ -377,20 +377,20 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` och `oldDelegate` är av typen `Transcoder`. -It is not possible to spread a fragment of type `Vote` here. +Det är inte möjligt att sprida ett fragment av typ `Vote` här. -**Define Fragment as an atomic business unit of data** +**Definiera fragment som en atomisk affärsenhet för data** -GraphQL Fragment must be defined based on their usage. +GraphQL Fragment måste definieras baserat på deras användning. -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +För de flesta användningsfall är det tillräckligt att definiera ett fragment per typ (i fallet med upprepade fält eller typgenerering). -Here is a rule of thumb for using Fragment: +Här är en tumregel för användning av fragment: -- when fields of the same type are repeated in a query, group them in a Fragment -- when similar but not the same fields are repeated, create multiple fragments, ex: +- när fält av samma typ upprepas i en fråga, gruppera dem i ett fragment +- när liknande men inte samma fält upprepas, skapa flera fragment, t.ex. ```graphql # base fragment (mostly used in listing) @@ -399,7 +399,7 @@ fragment Voter on Vote { voter } -# extended fragment (when querying a detailed view of a vote) +# utökat fragment (vid förfrågan om en detaljerad vy av en omröstning) fragment VoteWithPoll on Vote { id voter @@ -413,51 +413,51 @@ fragment VoteWithPoll on Vote { --- -## The essential tools +## De väsentliga verktygen -### GraphQL web-based explorers +### Webbaserade GraphQL-upptäckare -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +Att iterera över frågor genom att köra dem i din applikation kan vara besvärligt. Av denna anledning bör du inte tveka att använda [The Graph Explorer](https://thegraph.com/explorer) för att testa dina frågor innan du lägger till dem i din applikation. The Graph Explorer kommer att ge dig en förkonfigurerad GraphQL-lekplats för att testa dina frågor. -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +Om du letar efter ett mer flexibelt sätt att felsöka/testa dina frågor finns andra liknande webbaserade verktyg tillgängliga, såsom [Altair](https://altair.sirmuel.design/) och [GraphiQL](https://graphiql-online.com/graphiql). ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +För att hålla dig uppdaterad med de tidigare nämnda bästa praxis och syntaktiska regler rekommenderas det starkt att använda följande arbetsflöde och IDE-verktyg. **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) hjälper dig att följa GraphQL bästa praxis utan ansträngning. -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +[Installera "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs)-konfigurationen kommer att tvinga fram väsentliga regler som: -- `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? -- `@graphql-eslint/no-unused variables`: should a given variable stay unused? -- and more! +- `@graphql-eslint/fields-on-correct-type`: används ett fält på en korrekt typ? +- `@graphql-eslint/no-unused variables`: bör en given variabel förbli oanvänd? +- och mer! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +Detta kommer att tillåta dig att **upptäcka fel utan ens att testa frågor** på lekplatsen eller köra dem i produktion! -### IDE plugins +### IDE-tillägg -**VSCode and GraphQL** +**VSCode och GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +[GraphQL VSCode-tillägget](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) är ett utmärkt komplement till din utvecklingsarbetsflöde för att få: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets -- go to definition for fragments and input types +- syntaxmarkering +- autokompletteringsförslag +- validering mot schema +- snuttar +- gå till definition för fragment och inmatningstyper -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +Om du använder `graphql-eslint` är [ESLint VSCode-tillägget](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) ett måste för att visualisera fel och varningar korrekt infogade i din kod. -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij och GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQL-tillägget](https://plugins.jetbrains.com/plugin/8097-graphql/) kommer att förbättra din upplevelse av att arbeta med GraphQL genom att tillhandahålla: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets +- syntaxmarkering +- autokompletteringsförslag +- validering mot schema +- snuttar -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +Mer information om denna [WebStorm-artikel](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) som visar upp alla tilläggets huvudfunktioner. diff --git a/website/pages/sv/querying/querying-from-an-application.mdx b/website/pages/sv/querying/querying-from-an-application.mdx index 30b6c2264d64..b25e2b3868c0 100644 --- a/website/pages/sv/querying/querying-from-an-application.mdx +++ b/website/pages/sv/querying/querying-from-an-application.mdx @@ -1,43 +1,43 @@ --- -title: Querying from an Application +title: Att göra förfrågningar från en Applikation --- -Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: +När en undergraf har distribuerats till Subgraph Studio eller till The Graph Explorer, kommer du att få slutpunkten för din GraphQL API som bör se ut något liknande detta: -**Subgraph Studio (testing endpoint)** +**Subgraph Studio (testslutpunkt)** ```sh Queries (HTTP) https://api.studio.thegraph.com/query/// ``` -**Graph Explorer** +**Graf Utforskaren** ```sh Queries (HTTP) https://gateway.thegraph.com/api//subgraphs/id/ ``` -Using the GraphQL endpoint, you can use various GraphQL Client libraries to query the subgraph and populate your app with the data indexed by the subgraph. +Genom att använda GraphQL-slutpunkten kan du använda olika GraphQL-klientbibliotek för att göra förfrågningar till undergrafen och fylla din app med de data som indexeras av undergrafen. -Here are a couple of the more popular GraphQL clients in the ecosystem and how to use them: +Här är ett par av de populärare GraphQL-klienterna i ekosystemet och hur du använder dem: -## GraphQL clients +## GraphQL-klienter ### Graph client -The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: +The Graph tillhandahåller sin egen GraphQL-klient, graph-client, som stödjer unika funktioner som: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Hantering av undergrafer över blockkedjor: Förfrågan från flera undergrafer i en enda förfrågan +- [Automatisk Blockspårning](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Automatisk Paginering](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Fullständigt typad resultat -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +Även integrerad med populära GraphQL-clients som Apollo och URQL och kompatibel med alla miljöer (React, Angular, Node.js, React Native), kommer användning av `graph-client` att ge dig den bästa upplevelsen för interaktion med The Graf. -Let's look at how to fetch data from a subgraph with `graphql-client`. +Låt oss titta på hur du hämtar data från en undergraf med `graphql-client`. -To get started, make sure to install The Graph Client CLI in your project: +För att komma igång, se till att installera The Graph Client CLI i ditt projekt: ```sh yarn add -D @graphprotocol/client-cli @@ -45,17 +45,17 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +Definiera din förfrågan i en `.graphql`-fil (eller inline i din `.js` eller `.ts`-fil): ```graphql query ExampleQuery { - # this one is coming from compound-v2 + # den här kommer från compound-v2 markets(first: 7) { borrowRate cash collateralFactor } - # this one is coming from uniswap-v2 + # den här kommer från uniswap-v2 pair(id: "0x00004ee988665cdda9a1080d5792cecd16dc1220") { id token0 { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +Därefter, skapa en konfigurationsfil (kallad `.graphclientrc.yml`) och peka till dina GraphQL-slutpunkter som tillhandahålls av The Graph, till exempel: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +Kör följande The Graph Client CLI-kommando för att generera typade och klara att använda JavaScript-kod: ```sh graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +Slutligen, uppdatera din `.ts`-fil för att använda de genererade typade GraphQL-dokumenten: ```tsx import React, { useEffect } from 'react' @@ -134,33 +134,33 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ Viktig uppmärksamhet** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client` är perfekt integrerad med andra GraphQL-klienter som Apollo-klient, URQL eller React Query; du kommer att [finna exempel i det officiella arkivet](https://github.com/graphprotocol/graph-client/tree/main/examples). -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +Men om du väljer att gå med en annan klient, kom ihåg att **du inte kommer att kunna använda Cross-chain Subgraph Handling eller Automatic Pagination, som är kärnfunktionerna för att fråga The Graph**. ### Apollo client -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[Apollo client](https://www.apollographql.com/docs/) är den vanliga GraphQL-klienten i front-end-ekosystemet. -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +Tillgänglig för React, Angular, Vue, Ember, iOS och Android, bringar Apollo Client, även om den tyngsta klienten, många funktioner för att bygga avancerad UI ovanpå GraphQL: -- advanced error handling -- pagination -- data prefetching -- optimistic UI -- local state management +- avancerad felhantering +- paginering +- datavisning i förväg +- optimistiskt UI +- lokal statshantering -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +Låt oss titta på hur du hämtar data från en undergraf med Apollo-klienten i ett webbprojekt. -First, install `@apollo/client` and `graphql`: +Först installerar du `@apollo/client` och `graphql`: ```sh npm install @apollo/client graphql ``` -Then you can query the API with the following code: +Sedan kan du göra en förfrågan till API:et med följande kod: ```javascript import { ApolloClient, InMemoryCache, gql } from '@apollo/client' @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +För att använda variabler kan du skicka in ett `variables` argument till förfrågan: ```javascript const tokensQuery = ` @@ -226,22 +226,22 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +Ett annat alternativ är [URQL](https://formidable.com/open-source/urql/) som är tillgänglig inom Node.js, React/Preact, Vue och Svelte-miljöer, med fler avancerade funktioner: -- Flexible cache system -- Extensible design (easing adding new capabilities on top of it) -- Lightweight bundle (~5x lighter than Apollo Client) -- Support for file uploads and offline mode +- Flexibelt cachelagersystem +- Utbyggbar design (förenklar tillägg av nya funktioner på toppen) +- Lättviktsbundle (~5 gånger lättare än Apollo-klienten) +- Stöd för filöverföringar och offline-läge -Let's look at how to fetch data from a subgraph with URQL in a web project. +Låt oss titta på hur du hämtar data från en undergraf med URQL i ett webbprojekt. -First, install `urql` and `graphql`: +Först installerar du `urql` och `graphql`: ```sh npm install urql graphql ``` -Then you can query the API with the following code: +Sedan kan du göra en förfrågan till API:et med följande kod: ```javascript import { createClient } from 'urql' diff --git a/website/pages/sv/querying/querying-the-graph.mdx b/website/pages/sv/querying/querying-the-graph.mdx index af9dcaaf2477..1ce0df3011e4 100644 --- a/website/pages/sv/querying/querying-the-graph.mdx +++ b/website/pages/sv/querying/querying-the-graph.mdx @@ -1,14 +1,14 @@ --- -title: Querying The Graph +title: Fråga The Graph --- -With the subgraph deployed, visit the [Graph Explorer](https://thegraph.com/explorer) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +När subgrafiken har distribuerats besök [Graf Explorer](https://thegraph.com/explorer) för att öppna en [GraphiQL](https://github.com/graphql/graphiql)-gränssnitt där du kan utforska den distribuerade GraphQL API:n för subgrafiken genom att skicka frågor och visa schemat. -An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. +Ett exempel ges nedan, men se [Fråge-API](/querying/graphql-api) för en komplett referens om hur du frågar subgrafens enheter. -## Example +## Exempel -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Denna fråga listar alla räknare som vår mappning har skapat. Eftersom vi endast skapar en, kommer resultatet endast att innehålla vår enda `default-counter`: ```graphql { @@ -19,14 +19,14 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Graph Explorer +## Använda The Graf Explorer -Each subgraph published to the decentralized Graph Explorer has a unique query URL that you can find by navigating to the subgraph details page and clicking on the "Query" button on the top right corner. This will open a side pane that will give you the unique query URL of the subgraph as well as some instructions about how to query it. +Varje subgraf som publiceras till den decentraliserade Graph Explorer har en unik fråge-URL som du kan hitta genom att navigera till subgrafens detaljsida och klicka på knappen "Fråga" längst upp till höger. Detta kommer att öppna en sidopanel som ger dig den unika fråge-URL:en för subgrafiken samt några instruktioner om hur du frågar den. -![Query Subgraph Pane](/img/query-subgraph-pane.png) +![Panel för att fråga subgraf](/img/query-subgraph-pane.png) -As you can notice, this query URL must use a unique API key. You can create and manage your API keys in the [Subgraph Studio](https://thegraph.com/studio) in the "API Keys" section. Learn more about how to use Subgraph Studio [here](/deploying/subgraph-studio). +Som du kan märka måste denna fråge-URL använda en unik API-nyckel. Du kan skapa och hantera dina API-nycklar i [Subgraf Studio](https://thegraph.com/studio) under avsnittet "API-nycklar". Läs mer om hur du använder Subgraf Studio [här](/deploying/subgraph-studio). -Querying subgraphs using your API keys will generate query fees that will be paid in GRT. You can learn more about billing [here](/billing). +Att fråga subgrafer med dina API-nycklar kommer att generera frågeavgifter som betalas i GRT. Du kan lära dig mer om fakturering [här](/billing). -You can also use the GraphQL playground in the "Playground" tab to query a subgraph within The Graph Explorer. +Du kan även använda GraphQL-lekplatsen i fliken "Lekplats" för att fråga en subgraf inom The Graph Explorer. diff --git a/website/pages/sv/querying/querying-the-hosted-service.mdx b/website/pages/sv/querying/querying-the-hosted-service.mdx index 14777da41247..474b3c1b2a05 100644 --- a/website/pages/sv/querying/querying-the-hosted-service.mdx +++ b/website/pages/sv/querying/querying-the-hosted-service.mdx @@ -1,14 +1,14 @@ --- -title: Querying the Hosted Service +title: Förfrågan om hostad tjänst --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. -An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. +Ett exempel ges nedan, men se [Fråge-API](/querying/graphql-api) för en komplett referens om hur du frågar subgrafens enheter. -## Example +## Exempel -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Denna fråga listar alla räknare som vår mappning har skapat. Eftersom vi endast skapar en, kommer resultatet endast att innehålla vår enda `default-counter`: ```graphql { @@ -19,10 +19,10 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. -Some of the main features are detailed below: +Några av de viktigaste funktionerna beskrivs nedan: -![Explorer Playground](/img/explorer-playground.png) +![Lekplats för utforskare](/img/explorer-playground.png) diff --git a/website/pages/sv/querying/querying-with-python.mdx b/website/pages/sv/querying/querying-with-python.mdx new file mode 100644 index 000000000000..4588292c59a8 --- /dev/null +++ b/website/pages/sv/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Komma igång + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/sv/quick-start.mdx b/website/pages/sv/quick-start.mdx new file mode 100644 index 000000000000..7e997dda0d2d --- /dev/null +++ b/website/pages/sv/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Snabbstart +--- + +Den här guiden kommer snabbt att ta dig igenom hur du initierar, skapar och distribuerar din subgraf till Subgraf Studio eller [värdtjänst](#hosted-service). + +Se till att din subgraf kommer att indexera data från ett [nätverk som stöds] \(/developing/supported-networks). + +Den här guiden är skriven förutsatt att du har: + +- En smart kontraktsadress på det nätverk du väljer +- GRT för att kurera din subgraf +- En kryptoplånbok + +## 1. Skapa en subgraf på Subgraph Studio + +Gå till [Subgraph Studio](https://thegraph.com/studio/) och anslut din plånbok. + +När du är ansluten kan du börja med att klicka på "skapa en subgraf". Välj det nätverk du vill ha och klicka på fortsätt. + +## 2. Installera Graph CLI + +Graph CLI är skrivet i JavaScript och du måste ha antingen `npm` eller `yarn` installerat för att kunna använda det. + +Kör ett av följande kommandon på din lokala dator: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initiera din subgraf + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +När du initierar din subgraf kommer CLI verktyget att be dig om följande information: + +- Protokoll: välj det protokoll som din subgraf ska indexera data från +- Subgragh slug: skapa ett namn för din subgraf. Din subgraf snigel är en identifierare för din subgraf. +- Katalog att skapa subgrafen i: välj din lokala katalog +- Ethereum nätverk (valfritt): du kan behöva ange vilket EVM kompatibelt nätverk din subgraf kommer att indexera data från +- Kontraktsadress: Leta upp den smarta kontraktsadress som du vill fråga data från +- ABI: Om ABI inte fylls i automatiskt måste du mata in det manuellt som en JSON fil +- Startblock: det föreslås att du matar in startblocket för att spara tid medan din subgraf indexerar blockkedjedata. Du kan hitta startblocket genom att hitta blocket där ditt kontrakt distribuerades. +- Kontraktsnamn: ange namnet på ditt kontrakt +- Indexera kontraktshändelser som entiteter: det föreslås att du ställer in detta till sant eftersom det automatiskt lägger till mappningar till din subgraf för varje emitterad händelse +- Lägg till ett annat kontrakt (valfritt): du kan lägga till ett annat kontrakt + +Initiera din subgraf från ett befintligt kontrakt genom att köra följande kommando: + +```sh +graph init --studio +``` + +Se följande skärmdump för ett exempel för vad du kan förvänta dig när du initierar din subgraf: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Skriv din subgraf + +De tidigare kommandona skapar en ställnings undergraf som du kan använda som utgångspunkt för att bygga din undergraf. När du gör ändringar i subgrafen kommer du huvudsakligen att arbeta med tre filer: + +- Manifest (subgraph.yaml) - Manifestet definierar vilka datakällor dina subgrafer kommer att indexera. +- Schema (schema.graphql) - GraphQL schemat definierar vilken data du vill hämta från subgrafen. +- AssemblyScript mappningar (mapping.ts) - Detta är koden som översätter data från dina datakällor till de enheter som definieras i schemat. + +För mer information om hur du skriver din subgraf, se [Skapa en subgraf](/developing/creating-a-subgraph). + +## 5. Distribuera till Subgraph Studio + +När din subgraf är skriven, kör följande kommandon: + +```sh +$ graph codegen +$ graph build +``` + +- Autentisera och distribuera din subgraf. Implementeringsnyckeln finns på Subgraph sidan i Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Testa din subgraf + +Du kan testa din subgraf genom att göra en exempelfråga i lekplatssektionen. + +Loggarna kommer att berätta om det finns några fel med din subgraf. Loggarna för en operativ subgraf kommer att se ut så här: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publicera din subgraf till The Graph's decentraliserade nätverk + +När din subgraf har distribuerats till Subgraph Studio, du har testat den och är redo att sätta den i produktion, du kan sedan publicera den till det decentraliserade nätverket. + +I Subgraph Studio klickar du på din undergraf. På subgrafens sida kan du klicka på publiceringsknappen längst upp till höger. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Innan du kan fråga efter din subgraf måste indexerare börja skicka frågor på den. För att effektivisera denna process kan du kurera din egen subgraf med GRT. + +I skrivande stund rekommenderas det att du kurerar din egen subgraf med 10 000 GRT för att säkerställa att den är indexerad och tillgänglig för sökning så snart som möjligt. + +För att spara på gaskostnaderna kan du kurera din subgraf i samma transaktion som du publicerade den genom att välja den här knappen när du publicerar din subgraf till The Graphs decentraliserade nätverk: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Fråga din subgraf + +Nu kan du fråga din subgraf genom att skicka GraphQL frågor till din subgrafs fråge URL, som du kan hitta genom att klicka på frågeknappen. + +Du kan fråga från din dapp om du inte har din API nyckel via den kostnadsfria, hastighetsbegränsade tillfälliga sökadressen som kan användas för utveckling och iscensättning. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/sv/release-notes/assemblyscript-migration-guide.mdx b/website/pages/sv/release-notes/assemblyscript-migration-guide.mdx index 85f6903a6c69..97c6bb95635a 100644 --- a/website/pages/sv/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/sv/release-notes/assemblyscript-migration-guide.mdx @@ -1,50 +1,50 @@ --- -title: AssemblyScript Migration Guide +title: AssemblyScript Migrationsguide --- -Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Hittills har undergrafar använt en av de [första versionerna av AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Äntligen har vi lagt till stöd för den [nyaste tillgängliga versionen](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 -That will enable subgraph developers to use newer features of the AS language and standard library. +Det kommer att möjliggöra för undergrafutvecklare att använda nyare funktioner i AS-språket och standardbiblioteket. -This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 +Denna guide är tillämplig för alla som använder `graph-cli`/`graph-ts` version `0.22.0` eller lägre. Om du redan är på en högre version än (eller lika med) det, har du redan använt version `0.19.10` av AssemblyScript 🙂 -> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. +> Observera: Från och med `0.24.0` kan `graph-node` stödja båda versionerna, beroende på `apiVersion` som anges i undergrafens manifest. -## Features +## Funktioner -### New functionality +### Ny funktionalitet -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`s kan nu skapas från `ArrayBuffer`s med hjälp av [det nya `wrap`-statiska metoden](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- Nya standardbiblioteksfunktioner: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare` och `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Lagt till stöd för x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Lagt till `StaticArray`, en mer effektiv varian av en array ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Lagt till `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Implementerat `radix`-argumentet på `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Lagt till stöd för avskiljare i flyttal ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Lagt till stöd för funktioner av första klass ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Lägg till inbyggda funktioner: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- Implementera `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Lagt till stöd för mallliteralsträngar ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Lägg till `encodeURI(Component)` och `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Lägg till `toString`, `toDateString` och `toTimeString` för `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Lägg till `toUTCString` för `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Lägg till inbyggd typ `nonnull/NonNullable` ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### Optimizations +### Optimeringar -- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `Math`-funktioner som `exp`, `exp2`, `log`, `log2` och `pow` har ersatts med snabbare varianter ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Lätt optimering av `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Cachea fler fältåtkomster i std Map och Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Optimering för potenser av två i `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### Other +### Annat -- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Typen för en arrayliteral kan nu härledas från dess innehåll ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Uppdaterad stdlib till Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## How to upgrade? +## Hur uppgraderar du? -1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: +1. Ändra dina mappningar `apiVersion` i `subgraph.yaml` till `0.0.6`: ```yaml ... @@ -56,30 +56,30 @@ dataSources: ... ``` -2. Update the `graph-cli` you're using to the `latest` version by running: +2. Uppdatera `graph-cli` som du använder till den `nyaste` versionen genom att köra: ```bash -# if you have it globally installed +# om du har den globalt installerad npm install --global @graphprotocol/graph-cli@latest -# or in your subgraph if you have it as a dev dependency +# eller i din subgraf om du har det som ett utvecklingsberoende npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: +3. Gör samma sak för `graph-ts`, men istället för att installera globalt, spara den i dina huvudberoenden: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. Follow the rest of the guide to fix the language breaking changes. -5. Run `codegen` and `deploy` again. +4. Följ resten av guiden för att åtgärda språkbrytande ändringar. +5. Kör `codegen` och `deploy` igen. -## Breaking changes +## Språkbrytande ändringar -### Nullability +### Nullbarhet -On the older version of AssemblyScript, you could create code like this: +I den äldre versionen av AssemblyScript kunde du skapa kod som detta: ```typescript function load(): Value | null { ... } @@ -88,7 +88,7 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -However on the newer version, because the value is nullable, it requires you to check, like this: +Men i den nyare versionen, eftersom värdet är nullable, måste du kontrollera, så här: ```typescript let maybeValue = load() @@ -98,19 +98,19 @@ if (maybeValue) { } ``` -Or force it like this: +Eller gör så här: ```typescript -let maybeValue = load()! // breaks in runtime if value is null +let maybeValue = load()! // bryts i runtime om värdet är null maybeValue.aMethod() ``` -If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. +Om du är osäker på vilken du ska välja, rekommenderar vi alltid att använda den säkra versionen. Om värdet inte finns kanske du bara vill göra ett tidigt villkorligt uttalande med en retur i din undergrafshanterare. -### Variable Shadowing +### Variabelskuggning -Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: +Tidigare kunde du använda [variabelskuggning](https://en.wikipedia.org/wiki/Variable_shadowing) och kod som detta skulle fungera: ```typescript let a = 10 @@ -118,7 +118,7 @@ let b = 20 let a = a + b ``` -However now this isn't possible anymore, and the compiler returns this error: +Men nu är detta inte längre möjligt, och kompilatorn returnerar detta fel: ```typescript ERROR TS2451: Cannot redeclare block-scoped variable 'a' @@ -128,11 +128,11 @@ ERROR TS2451: Cannot redeclare block-scoped variable 'a' in assembly/index.ts(4,3) ``` -You'll need to rename your duplicate variables if you had variable shadowing. +Du måste döpa om dina duplicerade variabler om du hade variabelskuggning. -### Null Comparisons +### Jämförelser med nollvärden -By doing the upgrade on your subgraph, sometimes you might get errors like these: +När du gör uppgraderingen av din subgraf kan du ibland få fel som dessa: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -To solve you can simply change the `if` statement to something like this: +För att lösa problemet kan du helt enkelt ändra `if`-satsen till något i den här stilen: ```typescript if (!decimals) { @@ -151,23 +151,23 @@ To solve you can simply change the `if` statement to something like this: if (decimals === null) { ``` -The same applies if you're doing != instead of ==. +Samma gäller om du använder != istället för ==. -### Casting +### Kasting -The common way to do casting before was to just use the `as` keyword, like this: +Det vanliga sättet att göra kasting tidigare var att bara använda nyckelordet `as`, som så här: ```typescript let byteArray = new ByteArray(10) -let uint8Array = byteArray as Uint8Array // equivalent to: byteArray +let uint8Array = byteArray as Uint8Array // motsvarande: byteArray ``` -However this only works in two scenarios: +Detta fungerar dock endast i två scenarier: -- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting on class inheritance (subclass → superclass) +- Primitiv kasting (mellan typer som `u8`, `i32`, `bool`; t.ex. `let b: isize = 10; b as usize`); +- Uppkasting vid klassarv (underklass → överklass) -Examples: +Exempel: ```typescript // primitive casting @@ -184,13 +184,13 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: +Det finns två scenarier där du kan vilja casta, men att använda `as`/`var` **är inte säkert**: -- Downcasting on class inheritance (superclass → subclass) -- Between two types that share a superclass +- Downcasting vid arv av klasser (superklass → subklass) +- Mellan två typer som delar en superklass ```typescript -// downcasting on class inheritance +// downcasting om klassarv class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -198,7 +198,7 @@ let uint8Array = new Uint8Array(2) ``` ```typescript -// between two types that share a superclass +// mellan två typer som delar en superklass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -206,10 +206,10 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -For those cases, you can use the `changetype` function: +I dessa fall kan du använda funktionen `changetype`: ```typescript -// downcasting on class inheritance +// downcasting om klassarv class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -217,7 +217,7 @@ changetype(uint8Array) // works :) ``` ```typescript -// between two types that share a superclass +// mellan två typer som delar en superklass class Bytes extends Uint8Array {} class ByteArray extends Uint8Array {} @@ -225,10 +225,10 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. +Om du bara vill ta bort nullability kan du fortsätta använda `as`-operatorn (eller `variable`), men se till att du vet att värdet inte kan vara null, annars kommer det att bryta. ```typescript -// remove nullability +// ta bort ogiltighet let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null if (previousBalance != null) { @@ -238,18 +238,18 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 +För nullbarhetsfallet rekommenderar vi att du tittar på [nullbarhetskontrollfunktionen](https://www.assemblyscript.org/basics.html#nullability-checks), den kommer att göra din kod renare 🙂 -Also we've added a few more static methods in some types to ease casting, they are: +Vi har också lagt till några fler statiska metoder i vissa typer för att underlätta kastning, de är: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### Nullability check with property access +### Kontroll av nollställbarhet med tillgång till egendom -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +För att använda [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) kan du använda antingen `if`-satser eller den ternära operatorn (`?` och `:`) så här: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: +Men det fungerar bara när du gör `if` / ternary på en variabel, inte på en egenskapstillgång, som den här: ```typescript class Container { @@ -277,10 +277,10 @@ class Container { let container = new Container() container.data = 'data' -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +let somethingOrElse: string = container.data ? container.data : 'else' // Kompilerar inte ``` -Which outputs this error: +Vilket ger detta fel: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -289,7 +289,7 @@ ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/s ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` -To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: +För att åtgärda problemet kan du skapa en variabel för den egenskapen så att kompilatorn kan utföra den magiska nollbarhetskontrollen: ```typescript class Container { @@ -301,12 +301,12 @@ container.data = 'data' let data = container.data -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +let somethingOrElse: string = data ? data : 'else' // kompilerar helt okej :) ``` -### Operator overloading with property access +### Operatörsöverladdning med egenskapsaccess -If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. +Om du försöker summera (till exempel) en nullable typ (från en property access) med en non nullable, kommer AssemblyScript-kompilatorn istället för att ge en kompileringsfelsvarning om att ett av värdena är nullable, bara att kompilera tyst, vilket gör att koden kan gå sönder vid körning. ```typescript class BigInt extends Uint8Array { @@ -323,14 +323,14 @@ class Wrapper { let x = BigInt.fromI32(2) let y: BigInt | null = null -x + y // give compile time error about nullability +x + y // ge kompileringsfel om ogiltighet let wrapper = new Wrapper(y) -wrapper.n = wrapper.n + x // doesn't give compile time errors as it should +wrapper.n = wrapper.n + x // ger inte kompileringsfel som det borde ``` -We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. +Vi har öppnat en fråga om AssemblyScript-kompilatorn för detta, men om du gör den här typen av operationer i dina subgraf-mappningar bör du ändra dem så att de gör en null-kontroll innan den. ```typescript let wrapper = new Wrapper(y) @@ -339,12 +339,12 @@ if (!wrapper.n) { wrapper.n = BigInt.fromI32(0) } -wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt +wrapper.n = wrapper.n + x // nu är `n` garanterat ett BigInt ``` -### Value initialization +### Initialisering av värde -If you have any code like this: +Om du har någon kod som denna: ```typescript var value: Type // null @@ -352,7 +352,7 @@ value.x = 10 value.y = 'content' ``` -It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: +Det kommer att kompilera men brytas vid körning, det händer eftersom värdet inte har initialiserats, så se till att din subgraf har initialiserat sina värden, så här: ```typescript var value = new Type() // initialized @@ -360,7 +360,7 @@ value.x = 10 value.y = 'content' ``` -Also if you have nullable properties in a GraphQL entity, like this: +Även om du har nullable properties i en GraphQL-entitet, som denna: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -And you have code similar to this: +Och du har en kod som liknar den här: ```typescript let total = Total.load('latest') @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: +Du måste se till att initialisera värdet `total.amount`, för om du försöker komma åt som i den sista raden för summan, kommer det att krascha. Så antingen initialiserar du det först: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 +Eller så kan du bara ändra ditt GraphQL-schema för att inte använda en nullable-typ för den här egenskapen, då initierar vi den som noll i `codegen` -steget 😉 ```graphql type Total @entity { @@ -407,15 +407,15 @@ type Total @entity { let total = Total.load('latest') if (total === null) { - total = new Total('latest') // already initializes non-nullable properties + total = new Total('latest') // initierar redan icke-nullställbara egenskaper } total.amount = total.amount + BigInt.fromI32(1) ``` -### Class property initialization +### Initialisering av klassegenskaper -If you export any classes with properties that are other classes (declared by you or by the standard library) like this: +Om du exporterar några klasser med egenskaper som är andra klasser (deklarerade av dig eller av standardbiblioteket) på det här sättet: ```typescript class Thing {} @@ -425,7 +425,7 @@ export class Something { } ``` -The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: +Kompilatorn kommer att göra fel eftersom du antingen måste lägga till en initialiserare för de egenskaper som är klasser, eller lägga till operatorn `!`: ```typescript export class Something { @@ -449,9 +449,9 @@ export class Something { } ``` -### Array initialization +### Initialisering av Array -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: +Klassen `Array` accepterar fortfarande ett tal för att initiera längden på listan, men du bör vara försiktig eftersom operationer som `.push` faktiskt ökar storleken istället för att lägga till i början, till exempel: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -459,13 +459,13 @@ let arr = new Array(5) // ["", "", "", "", ""] arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ``` -Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: +Beroende på vilka typer du använder, t.ex. nullable-typer, och hur du kommer åt dem, kan du stöta på ett runtime-fel som det här: ``` ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: +För att faktiskt trycka i början bör du antingen initiera `Array` med storlek noll, så här: ```typescript let arr = new Array(0) // [] @@ -473,7 +473,7 @@ let arr = new Array(0) // [] arr.push('something') // ["something"] ``` -Or you should mutate it via index: +Eller så bör du mutera den via index: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -481,11 +481,11 @@ let arr = new Array(5) // ["", "", "", "", ""] arr[0] = 'something' // ["something", "", "", "", ""] ``` -### GraphQL schema +### GraphQL-schema -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. +Detta är inte en direkt AssemblyScript-ändring, men du kan behöva uppdatera din `schema.graphql`-fil. -Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: +Nu kan du inte längre definiera fält i dina typer som är Non-Nullable Lists. Om du har ett schema som detta: ```graphql type Something @entity { @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -You'll have to add an `!` to the member of the List type, like this: +Du måste lägga till en `!` till medlemmen i List-typen, så här: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). +Detta ändrades på grund av skillnader i nullbarhet mellan AssemblyScript-versioner, och det är relaterat till filen `src/generated/schema.ts` (standardväg, du kanske har ändrat detta). -### Other +### Annat -- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Jämnade `Map#set` och `Set#add` med specifikationen, som returnerar `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Arrayer ärver inte längre från ArrayBufferView, men är nu distinkta ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Klasser som initialiseras från objektlitteraler kan inte längre definiera en konstruktor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Resultatet av en binär `**`-operation är nu det gemensamma nämnaren för heltal om båda operanderna är heltal. Tidigare var resultatet ett flyttal som om man anropade `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Tvinga `NaN` till `false` vid kastning till `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- När du skiftar en liten heltalsvärde av typ `i8`/`u8` eller `i16`/`u16`, påverkar endast de 3 respektive 4 minst signifikanta bitarna i RHS-värdet resultatet, analogt med resultatet av en `i32.shl` som endast påverkas av de 5 minst signifikanta bitarna i RHS-värdet. Exempel: `someI8 << 8` producerade tidigare värdet `0`, men producerar nu `someI8` på grund av maskeringen av RHS som `8 & 7 = 0` (3 bitar) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Buggfix för relationella strängjämförelser när storlekarna skiljer sig ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/pages/sv/release-notes/graphql-validations-migration-guide.mdx b/website/pages/sv/release-notes/graphql-validations-migration-guide.mdx index f8cf8a3c2ed3..b95f0916c0bb 100644 --- a/website/pages/sv/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/sv/release-notes/graphql-validations-migration-guide.mdx @@ -1,91 +1,91 @@ --- -title: GraphQL Validations migration guide +title: Migrationsguide för GraphQL-validering --- -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). +Snart kommer `graph-node` att stödja 100 % täckning av [GraphQL Valideringsspecifikationen](https://spec.graphql.org/June2018/#sec-Validation). -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. +Tidigare versioner av `graph-node` stödde inte alla valideringar och gav mer graciösa svar - så, i fall av oklarheter, ignorerade `graph-node` ogiltiga komponenter för GraphQL-operationer. -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. +Stöd för GraphQL Validering är grundläggande för de kommande nya funktionerna och prestanda vid skala för The Graph Network. -It will also ensure determinism of query responses, a key requirement on The Graph Network. +Det kommer också att säkerställa determinism för frågesvar, en nyckelkrav på The Graph Nätverk. -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. +**Att aktivera GraphQL Validering kommer att bryta några befintliga frågor** som skickas till The Graph API. -To be compliant with those validations, please follow the migration guide. +För att vara i linje med dessa valideringar, följ migrationsguiden. -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. +> ⚠️ Om du inte migrerar dina frågor innan valideringarna tas i bruk kommer de att returnera fel och eventuellt bryta dina frontends/klienter. -## Migration guide +## Migrationsguide -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. +Du kan använda CLI-migrationsverktyget för att hitta eventuella problem i dina GraphQL-operationer och åtgärda dem. Alternativt kan du uppdatera ändpunkten för din GraphQL-klient att använda ändpunkten `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME`. Att testa dina frågor mot denna ändpunkt kommer att hjälpa dig att hitta problemen i dina frågor. -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. +> Inte alla subgrafer behöver migreras, om du använder [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) eller [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), ser de redan till att dina frågor är giltiga. -## Migration CLI tool +## Migrations-CLI-verktyg -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** +**De flesta felen i GraphQL-operationer kan hittas i din kodbas i förväg.** -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. +Av den anledningen erbjuder vi en smidig upplevelse för validering av dina GraphQL-operationer under utveckling eller i CI. -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) är ett enkelt CLI-verktyg som hjälper till att validera GraphQL-operationer mot ett givet schema. -### **Getting started** +### **Komma igång** -You can run the tool as follows: +Du kan köra verktyget enligt följande: ```bash npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql ``` -**Notes:** +**Noteringar:** -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). +- Ange eller ersätt $GITHUB_USER, $SUBGRAPH_NAME med lämpliga värden. Som: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) +- Förhandsgranskningsschema-URL:en (https://api-next.thegraph.com/) som tillhandahålls har en hög begränsning för antal begäranden och kommer att fasas ut när alla användare har migrerat till den nya versionen. **Använd den inte i produktion.** +- Operationer identifieras i filer med följande filändelser [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` alternativ). -### CLI output +### CLI-utdata -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: +Verktyget `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` för CLI kommer att ge utdata för eventuella fel i GraphQL-operationer enligt följande: -![Error output from CLI](https://i.imgur.com/x1cBdhq.png) +![Felutdata från CLI](https://i.imgur.com/x1cBdhq.png) -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). +För varje fel hittar du en beskrivning, filväg och position, samt en länk till ett exempel på lösning (se följande avsnitt). -## Run your local queries against the preview schema +## Kör dina lokala frågor mot förhandsgranskningschemat -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. +Vi tillhandahåller en ändpunkt `https://api-next.thegraph.com/` som kör en `graph-node`-version med aktiverad validering. -You can try out queries by sending them to: +Du kan prova att skicka frågor till: - `https://api-next.thegraph.com/subgraphs/id/` -or +eller - `https://api-next.thegraph.com/subgraphs/name//` -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. +För att arbeta med frågor som har markerats med valideringsfel kan du använda din favorit-GraphQL-frågeverktyg, som Altair eller [GraphiQL](https://cloud.hasura.io/public/graphiql), och testa din fråga. Dessa verktyg kommer även att markera dessa fel i sitt användargränssnitt, även innan du kör det. -## How to solve issues +## Hur man löser problem -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. +Här nedan finner du alla fel för validering av GraphQL som kan uppstå i dina befintliga GraphQL-operationer. -### GraphQL variables, operations, fragments, or arguments must be unique +### GraphQL-variabler, operationer, fragment eller argument måste vara unika -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. +Vi har tillämpat regler för att säkerställa att en operation inkluderar en unik uppsättning GraphQL-variabler, operationer, fragment och argument. -A GraphQL operation is only valid if it does not contain any ambiguity. +En GraphQL-operation är endast giltig om den inte innehåller någon oklarhet. -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. +För att uppnå detta måste vi säkerställa att vissa komponenter i din GraphQL-operation måste vara unika. -Here's an example of a few invalid operations that violates these rules: +Här är ett exempel på några ogiltiga operationer som bryter mot dessa regler: -**Duplicate Query name (#UniqueOperationNamesRule)** +**Dubbel frågenamn (#UniqueOperationNamesRule)** ```graphql -# The following operation violated the UniqueOperationName -# rule, since we have a single operation with 2 queries -# with the same name +# Följande åtgärd bröt mot UniqueOperationName +# regeln, eftersom vi har en enda åtgärd med 2 frågor +# med samma namn query myData { id } @@ -103,16 +103,16 @@ query myData { } query myData2 { - # rename the second query - name + # Byt namn på den andra sökningen + namn } ``` -**Duplicate Fragment name (#UniqueFragmentNamesRule)** +**Duplikat Fragmentets namn (#UniqueFragmentNamesRule)** ```graphql -# The following operation violated the UniqueFragmentName -# rule. +# Följande åtgärd bröt mot regeln UniqueFragmentName +# regel. query myData { id ...MyFields @@ -136,19 +136,19 @@ query myData { ...MyFieldsMetadata } -fragment MyFieldsMetadata { # assign a unique name to fragment +fragment MyFieldsMetadata { # tilldela ett unikt namn till fragmentet metadata } -fragment MyFieldsName { # assign a unique name to fragment - name +fragment MyFieldsName { # tilldela ett unikt namn till fragmentet + namn } ``` -**Duplicate variable name (#UniqueVariableNamesRule)** +**Dubbla variabelnamn (#UniqueVariableNamesRule)** ```graphql -# The following operation violates the UniqueVariables +# Följande operation strider mot UniqueVariables query myData($id: String, $id: Int) { id ...MyFields @@ -159,16 +159,16 @@ _Solution:_ ```graphql query myData($id: String) { - # keep the relevant variable (here: `$id: String`) + # behålla den relevanta variabeln (here: `$id: String`) id ...MyFields } ``` -**Duplicate argument name (#UniqueArgument)** +**Dubbelnamn på argument (#UniqueArgument)** ```graphql -# The following operation violated the UniqueArguments +# Följande åtgärd bröt mot UniqueArguments query myData($id: ID!) { userById(id: $id, id: "1") { id @@ -186,13 +186,13 @@ query myData($id: ID!) { } ``` -**Duplicate anonymous query (#LoneAnonymousOperationRule)** +**Dubbel anonym fråga (#LoneAnonymousOperationRule)** -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: +Att använda två anonyma operationer bryter också mot regeln `LoneAnonymousOperation` på grund av konflikt i svarsstrukturen: ```graphql -# This will fail if executed together in -# a single operation with the following two queries: +# Detta kommer att misslyckas om det utförs tillsammans i +# en enda operation med följande två frågor: query { someField } @@ -211,7 +211,7 @@ query { } ``` -Or name the two queries: +Eller namnge de två frågorna: ```graphql query FirstQuery { @@ -223,20 +223,20 @@ query SecondQuery { } ``` -### Overlapping Fields +### Överlappande fält -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. +En GraphQL-urvalsuppsättning anses endast vara giltig om den korrekt löser den slutliga resultatuppsättningen. -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. +Om en specifik urvalsuppsättning, eller ett fält, skapar tvetydighet antingen genom det valda fältet eller genom de argument som används, kommer GraphQL-tjänsten att misslyckas med att validera operationen. -Here are a few examples of invalid operations that violate this rule: +Här är några exempel på ogiltiga operationer som bryter mot denna regel: -**Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** +**Aliaser för motstridiga fält (#OverlappingFieldsCanBeMergedRule)** ```graphql -# Aliasing fields might cause conflicts, either with -# other aliases or other fields that exist on the -# GraphQL schema. +# Aliasfält kan orsaka konflikter, antingen med +# andra alias eller andra fält som finns i +# GraphQL-schema. query { dogs { name: nickname @@ -256,11 +256,11 @@ query { } ``` -**Conflicting fields with arguments (#OverlappingFieldsCanBeMergedRule)** +**Motstridiga fält med argument (#OverlappingFieldsCanBeMergedRule)** ```graphql -# Different arguments might lead to different data, -# so we can't assume the fields will be the same. +# Olika argument kan leda till olika data, +# så vi kan inte anta att fälten kommer att vara desamma. query { dogs { doesKnowCommand(dogCommand: SIT) @@ -280,12 +280,12 @@ query { } ``` -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: +I mer komplexa användningsfall kan du också bryta mot denna regel genom att använda två fragment som kan orsaka en konflikt i den slutligen förväntade uppsättningen: ```graphql query { - # Eventually, we have two "x" definitions, pointing - # to different fields! + # Till slut har vi två "x"-definitioner, som pekar + # till olika fält! ...A ...B } @@ -299,7 +299,7 @@ fragment B on Type { } ``` -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: +Dessutom kan GraphQL-direktiv på klientsidan som `@skip` och `@include` leda till tvetydigheter, till exempel: ```graphql fragment mergeSameFieldsWithSameDirectives on Dog { @@ -308,18 +308,18 @@ fragment mergeSameFieldsWithSameDirectives on Dog { } ``` -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) +[Du kan läsa mer om algoritmen här] \(https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) -### Unused Variables or Fragments +### Oanvända variabler eller fragment -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. +En GraphQL-operation anses också vara giltig endast om alla operationsdefinierade komponenter (variabler, fragment) används. -Here are a few examples for GraphQL operations that violates these rules: +Här är några exempel på GraphQL-operationer som bryter mot dessa regler: -**Unused variable** (#NoUnusedVariablesRule) +**Oanvänd variabel** (#NoUnusedVariablesRule) ```graphql -# Invalid, because $someVar is never used. +# Ogiltig, eftersom $someVar aldrig används. query something($someVar: String) { someData } @@ -333,10 +333,10 @@ query something { } ``` -**Unused Fragment** (#NoUnusedFragmentsRule) +**Oanvänt fragment** (#NoUnusedFragmentsRule) ```graphql -# Invalid, because fragment AllFields is never used. +# Invalid, eftersom fragmentet AllFields aldrig används. query something { someData } @@ -350,22 +350,22 @@ fragment AllFields { # unused :( _Solution:_ ```graphql -# Invalid, because fragment AllFields is never used. +# Invalid, eftersom fragmentet AllFields aldrig används. query something { someData } -# remove the `AllFields` fragment +# ta bort `AllFields` fragmentet ``` -### Invalid or missing Selection-Set (#ScalarLeafsRule) +### Ogiltig eller saknad urvalsuppsättning (#ScalarLeafsRule) -Also, a GraphQL field selection is only valid if the following is validated: +Dessutom är ett GraphQL-fältval endast giltigt om följande är validerat: -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. +- Ett objektfält måste ha en valuppsättning angiven. +- Ett edge-fält (scalar, enum) får inte ha en specificerad urvalsuppsättning. -Here are a few examples of violations of these rules with the following Schema: +Här är några exempel på brott mot dessa regler med följande Schema: ```graphql schema { @@ -384,12 +384,12 @@ schema { } ``` -**Invalid Selection-Set** +**Ogiltig urvalsuppsättning** ```graphql query { user { - id { # Invalid, because "id" is of type ID and does not have sub-fields + id { # Invalid, eftersom "id" är av typen ID och inte xhar underfält } } @@ -406,13 +406,13 @@ query { } ``` -**Missing Selection-Set** +**Missande urvalsgrupp** ```graphql query { user { id - image # `image` requires a Selection-Set for sub-fields! + image # `image` kräver en urvalssats för underfält! } } ``` @@ -430,49 +430,49 @@ query { } ``` -### Incorrect Arguments values (#VariablesInAllowedPositionRule) +### Felaktiga argumentvärden (#VariablesInAllowedPositionRule) -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. +GraphQL-operationer som skickar hårdkodade värden till argument måste vara giltiga, baserat på det värde som definieras i schemat. -Here are a few examples of invalid operations that violate these rules: +Här följer några exempel på ogiltiga operationer som bryter mot dessa regler: ```graphql query purposes { - # If "name" is defined as "String" in the schema, - # this query will fail during validation. + # Om "name" är definierat som "String" i schemat, + # kommer denna fråga att misslyckas under valideringen. purpose(name: 1) { id } } -# This might also happen when an incorrect variable is defined: +# Detta kan också hända när en felaktig variabel definieras: query purposes($name: Int!) { - # If "name" is defined as `String` in the schema, - # this query will fail during validation, because the - # variable used is of type `Int` + # Om "name" är definierat som `String` i schemat, + # kommer denna fråga att misslyckas under valideringen, eftersom + # variabeln som används är av typen `Int` purpose(name: $name) { id } } ``` -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) +### Okänd typ, variabel, fragment eller direktiv (#UnknownX) -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. +GraphQL API kommer att ge ett felmeddelande om någon okänd typ, variabel, fragment eller direktiv används. -Those unknown references must be fixed: +Dessa okända referenser måste åtgärdas: -- rename if it was a typo -- otherwise, remove +- Byt namn om det var ett stavfel +- annars, ta bort -### Fragment: invalid spread or definition +### Fragment: ogiltig spridning eller definition -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** +**Ogiltig spridning av fragment (#PossibleFragmentSpreadsRule)** -A Fragment cannot be spread on a non-applicable type. +Ett Fragment kan inte spridas på en icke tillämplig typ. -Example, we cannot apply a `Cat` fragment to the `Dog` type: +Exempel: Vi kan inte tillämpa ett `Cat`-fragment på `Dog`-typen: ```graphql query { @@ -486,33 +486,33 @@ fragment CatSimple on Cat { } ``` -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** +**Ogiltig fragmentdefinition (#FragmentsOnCompositeTypesRule)** -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. +Alla Fragment måste definieras på (med `on ...`) en sammansatt typ, kort sagt: objekt, gränssnitt eller union. -The following examples are invalid, since defining fragments on scalars is invalid. +Följande exempel är ogiltiga, eftersom det är ogiltigt att definiera fragment på skalärer. ```graphql fragment fragOnScalar on Int { - # we cannot define a fragment upon a scalar (`Int`) - something +# vi kan inte definiera ett fragment på en skalär (`Int`) + något } fragment inlineFragOnScalar on Dog { ... on Boolean { - # `Boolean` is not a subtype of `Dog` + # `Boolean` är inte en subtyp av `Dog` somethingElse } } ``` -### Directives usage +### Användning av direktiv -**Directive cannot be used at this location (#KnownDirectivesRule)** +**Direktiv kan inte användas på denna plats (#KnownDirectivesRule)** -Only GraphQL directives (`@...`) supported by The Graph API can be used. +Endast GraphQL-direktiv (`@...`) som stöds av The Graph API kan användas. -Here is an example with The GraphQL supported directives: +Här är ett exempel med de GraphQL-direktiv som stöds: ```graphql query { @@ -523,13 +523,13 @@ query { } ``` -_Note: `@stream`, `@live`, `@defer` are not supported._ +_Note: `@stream`, `@live`, `@defer` stöds inte._ -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** +**Direktivet kan endast användas en gång på denna plats (#UniqueDirectivesPerLocationRule)** -The directives supported by The Graph can only be used once per location. +De direktiv som stöds av The Graf kan endast användas en gång per plats. -The following is invalid (and redundant): +Följande är ogiltigt (och överflödigt): ```graphql query { diff --git a/website/pages/sv/substreams.mdx b/website/pages/sv/substreams.mdx index d0354f06bab1..40d22e3c415d 100644 --- a/website/pages/sv/substreams.mdx +++ b/website/pages/sv/substreams.mdx @@ -1,9 +1,44 @@ --- -title: Substreams +title: Underströmmar --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Komma igång + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/sv/sunrise.mdx b/website/pages/sv/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/sv/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/sv/tokenomics.mdx b/website/pages/sv/tokenomics.mdx index 949796a99983..a0c4bf28643e 100644 --- a/website/pages/sv/tokenomics.mdx +++ b/website/pages/sv/tokenomics.mdx @@ -1,110 +1,110 @@ --- -title: Tokenomics of The Graph Network -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token works. +title: Tokenomics för The Graf Nätverk +description: The Graf Nätverk drivs av kraftfull tokenomics. Här är hur GRT, The Grafs nativa arbetsnyttighets-token, fungerar. --- -- GRT Token Address: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) +- GRT Tokenadress: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- GRT Tokenadress på Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -The Graph is a decentralized protocol that enables easy access to blockchain data. +The Graf är en decentraliserad protokoll som möjliggör enkel åtkomst till blockkedjedata. -It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. +Det liknar en B2B2C-modell, förutom att den drivs av ett decentraliserat nätverk av deltagare. Nätverksdeltagare samarbetar för att tillhandahålla data till slutanvändare i utbyte mot GRT-belöningar. GRT är arbetsnyttighetstoken som samordnar dataleverantörer och konsumenter. GRT fungerar som en nytta för att samordna dataleverantörer och konsumenter inom nätverket och stimulerar protokolldeltagare att organisera data effektivt. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. -The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! +The Graph indexerar blockkedjedata på ett liknande sätt som Google indexerar webben. Faktum är att du kanske redan använder The Graph utan att inse det. Om du har sett framsidan av en dapp som får sina data från en subgraf, har du frågat data från en subgraf! -The Graph plays a crucial role in making blockchain data more accessible and enabling a marketplace for its exchange. +The Graph spelar en avgörande roll för att göra blockkedjedata mer tillgängligt och möjliggöra en marknad för dess utbyte. -## The Roles of Network Participants +## Nätverksdeltagarnas Roller -There are four primary network participants: +Det finns fyra primära nätverksdeltagare: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegater - Delegera GRT till Indexers & säkra nätverket -2. Curators - Find the best subgraphs for Indexers +2. Kuratorer - Hitta de bästa subgrafterna för Indexers -3. Developers - Build & query subgraphs +3. Utvecklare - Bygg & fråga subgrafter -4. Indexers - Backbone of blockchain data +4. Indexers - Grundvalen för blockkedjedata -Fishermen and Arbitrators are also integral to the network’s success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +Fiskare och Skiljedomare är också integrerade för nätverkets framgång genom andra bidrag, och de stöder arbetet för de andra primära deltagarrollerna. För mer information om nätverksroller, [läs den här artikeln](https://thegraph.com/blog/the-graph-grt-token-economics/). ![Tokenomics diagram](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## Delegater (Passivt tjäna GRT) -Indexers are delegated GRT by Delegators increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +Indexers delegeras GRT av Delegater som ökar Indexers insats i subgrafter på nätverket. Som ett resultat tjänar Delegater en procentandel av alla frågeavgifter och indexbelöningar från Indexer. Varje Indexer ställer in den andel som kommer att belönas till Delegater oberoende, vilket skapar konkurrens mellan Indexers för att locka Delegater. De flesta Indexers erbjuder mellan 9-12% årligen. -For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1500 GRT in rewards annually. +Till exempel, om en Delegat skulle delegera 15k GRT till en Indexer som erbjuder 10%, skulle Delegaten få ~1500 GRT i belöningar årligen. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +Det finns en delegatskatt på 0,5% som bränns när en Delegat delegerar GRT på nätverket. Om en Delegat väljer att dra tillbaka sitt delegerade GRT måste Delegaten vänta på en 28-epoch obundenhetsperiod. Varje epoch är 6,646 block, vilket innebär att 28 epocher slutar vara ungefär 26 dagar. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +Om du läser detta kan du bli en Delegat just nu genom att gå till [nätverksdeltagar-sidan](https://thegraph.com/explorer/participants/indexers) och delegera GRT till en Indexer av ditt val. -## Curators (Earn GRT) +## Kuratorer (Tjäna GRT) -Curators identify high-quality subgraphs, and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Kuratorer identifierar högkvalitativa subgrafter och "kuraterar" dem (dvs. signalerar GRT på dem) för att tjäna andelar av kurering, vilket garanterar en procentandel av alla framtida frågeavgifter som genereras av subgrafen. Även om vilken oberoende nätverksdeltagare som helst kan vara en Kurator är det vanligtvis subgrafsutvecklare som först blir Kuratorer för sina egna subgrafter eftersom de vill se till att deras subgraf indexerades. -As of December 2022, subgraph developers are encouraged to curate their subgraph with at least 10,000 GRT. However, this number may be impacted by network activity and community participation. +Från och med december 2022 uppmuntras subgrafsutvecklare att kurera sin subgraf med minst 10 000 GRT. Men detta nummer kan påverkas av nätverksaktivitet och samhällsdeltagande. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Kuratorer betalar en kuratorskatt på 1% när de kurar en ny subgraf. Denna kuratorskatt bränns och minskar tillgången på GRT. -## Developers +## Utvecklare -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Utvecklare bygger och frågar subgrafter för att hämta blockkedjedata. Eftersom subgrafter är öppen källkod kan utvecklare fråga befintliga subgrafter för att ladda blockkedjedata i sina dappar. Utvecklare betalar för de frågor de gör med GRT, som distribueras till nätverksdeltagare. -### Creating a subgraph +### Skapa en subgraf -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Utvecklare kan [skapa en subgraf](/developing/creating-a-subgraph/) för att indexera data på blockkedjan. Subgrafer är instruktioner för Indexers om vilka data som ska serveras till konsumenter. -Once developers have built and tested their subgraph, they can [publish their subgraph](/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +När utvecklare har byggt och testat sin subgraf kan de [publicera sin subgraf](/publishing/publishing-a-subgraph/) på The Graphs decentraliserade nätverk. -### Querying an existing subgraph +### Fråga en befintlig subgraf -Once a subgraph is [published](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +När en subgraf är [publicerad](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) på The Graphs decentraliserade nätverk kan vem som helst skapa en API-nyckel, lägga till GRT i sin faktureringsbalans och fråga subgrafen. -Subgraphs are [queried using GraphQL](/querying/querying-the-graph/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +Subgrafter [frågas med GraphQL](/querying/querying-the-graph/) och frågeavgifterna betalas med GRT i [Subgraf Studio](https://thegraph.com/studio/). Frågeavgifterna distribueras till nätverksdeltagare baserat på deras bidrag till protokollet. -1% of the query fees paid to the network are burned. +1% av frågeavgifterna som betalas till nätverket bränns. -## Indexers (Earn GRT) +## Indexers (Tjäna GRT) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +Indexers är grundvalen för The Graf. De driver oberoende maskinvara och programvara som driver The Graphs decentraliserade nätverk. Indexers serverar data till konsumenter baserat på instruktioner från subgrafter. -Indexers can earn GRT rewards in two ways: +Indexers kan tjäna GRT-belöningar på två sätt: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Frågeavgifter: GRT som betalas av utvecklare eller användare för frågor om subgrafsdata. Frågeavgifter distribueras direkt till Indexers enligt den exponentiella rabattfunktionen (se GIP [här](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). -2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. +2. Indexeringsbelöningar: den årliga utfärdandet på 3% distribueras till Indexers baserat på antalet subgrafter de indexerar. Dessa belöningar stimulerar Indexers att indexera subgrafter, ibland innan frågeavgifterna börjar, för att ackumulera och skicka in indexbevis (POIs) som verifierar att de har indexerat data korrekt. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Varje subgraf tilldelas en del av den totala nätverkets tokenutfärdandet, baserat på subgrafens kureringssignal. Den mängden belönas sedan till Indexers baserat på deras tilldelade insats på subgrafen. -In order to run an indexing node, Indexers must stake 100,000 GRT or more with the network. Indexers are incentivized to stake GRT in proportion to the amount of queries they serve. +För att köra en indexeringsnod måste Indexers satsa 100 000 GRT eller mer med nätverket. Indexers stimuleras att satsa GRT i proportion till antalet frågor de utför. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial stake), they will not be able to use the additional GRT from Delegators until they increase their stake in the network. +Indexers kan öka sina GRT-tilldelningar på subgrafter genom att acceptera GRT-delegering från Delegater, och de kan acceptera upp till 16 gånger sin ursprungliga insats. Om en Indexer blir "överdelegerad" (dvs. mer än 16 gånger sin ursprungliga insats) kommer de inte att kunna använda det extra GRT från Delegater tills de ökar sin insats i nätverket. -The amount of rewards an Indexer receives can vary based on the initial stake, accepted delegation, quality of service, and many more factors. The following chart is publicly available data from an active Indexer on The Graph's decentralized network. +Mängden belöningar som en Indexer får kan variera beroende på ursprunglig insats, accepterad delegering, kvalitet på tjänst och många andra faktorer. Följande diagram är offentlig tillgänglig data från en aktiv Indexer på The Graphs decentraliserade nätverk. -### The Indexer stake & reward of allnodes-com.eth +### Indexerns insats & belöning för allnodes-com.eth -![Indexing stake and rewards](/img/indexing-stake-and-income.png) +![Indexeringsinsats och belöning](/img/indexing-stake-and-income.png) -This data is from February 2021 to September 2022. +Denna data är från februari 2021 till september 2022. -> Please note, this will improve when the [Arbitrum migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) is complete, making gas costs a significantly lower burden for participating on the network. +> Observera att detta kommer att förbättras när [Arbitrum-migrationen](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) är klar och gör gasavgifter till en betydligt lägre börda för deltagande i nätverket. -## Token Supply: Burning & Issuance +## Tokensupply: Bränning och Utfärdande -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +Den ursprungliga tokensupplyn är 10 miljarder GRT, med en målsättning om 3% ny utfärdande årligen för att belöna Indexers för att tilldela insats på subgrafter. Detta innebär att den totala tokensupplyn för GRT kommer att öka med 3% varje år när nya token utfärdas till Indexers för deras bidrag till nätverket. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +The Graf är utformad med flera brännmekanismer för att motverka ny tokensutfärdande. Ungefär 1% av GRT-tillgången bränns årligen genom olika aktiviteter på nätverket, och detta nummer har ökat när nätverksaktiviteten fortsätter att växa. Dessa brännaktiviteter inkluderar en 0,5% delegatsskatt när en Delegat delegerar GRT till en Indexer, en 1% kuratorskatt när Kuratorer signalerar på en subgraf, och 1% av frågeavgifterna för blockkedjedata. -![Total burned GRT](/img/total-burned-grt.jpeg) +![Totalt brända GRT](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +Utöver dessa regelbundet förekommande brännaktiviteter har GRT-tokens också en mekanism för att straffa skadligt eller ansvarslöst beteende av Indexers. Om en Indexer straffas bränns 50% av deras indexeringsbelöningar för epoken (medan den andra halvan går till fiskaren), och deras självinsats straffas med 2,5%, varav hälften bränns. Detta hjälper till att säkerställa att Indexers har ett starkt incitament att agera i nätverkets bästa intresse och bidra till dess säkerhet och stabilitet. -## Improving the Protocol +## Förbättring av Protokollet -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +The Graph Nätverk utvecklas ständigt och förbättringar av protokollets ekonomiska design görs kontinuerligt för att ge den bästa upplevelsen för alla nätverksdeltagare. The Graph-rådet övervakar protokolländringar och samhällsmedlemmar uppmanas att delta. Delta i förbättringar av protokollet i [The Graph Forum](https://forum.thegraph.com/). diff --git a/website/pages/tr/about.mdx b/website/pages/tr/about.mdx index c1f7c886900f..d57cfcd72ca8 100644 --- a/website/pages/tr/about.mdx +++ b/website/pages/tr/about.mdx @@ -1,47 +1,47 @@ --- -title: About The Graph +title: Graph Hakkında --- -This page will explain what The Graph is and how you can get started. +Bu sayfa Graph'in ne olduğunu ve nasıl başlayabileceğinizi açıklayacaktır. -## What is The Graph? +## Graph Nedir? -The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. +Graph, blockchain verilerini indekslemek ve sorgulamak için merkeziyetsiz bir protokoldür. Graph, doğrudan sorgulanması zor olan verileri sorgulamayı mümkün kılar. -Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. +[Uniswap](https://uniswap.org/) gibi karmaşık akıllı sözleşmelere sahip projeler ve [Bored Ape Yacht Club< gibi NFT girişimleri ](https://boredapeyachtclub.com/) verileri Ethereum blok zincirinde depolayarak, doğrudan blok zincirinden temel veriler dışındaki herhangi bir şeyi okumayı gerçekten zorlaştırır. -In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. +Bored Ape Yacht Club örneğinde olduğu gibi, belirli bir Ape'nin sahibini bulmak, kimliğine veya toplam arza dayalı olarak bir Ape'in içerik URI'sini almak gibi temel okuma işlemlerini [kontrat adresi](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) üzerinde gerçekleştirebiliriz, ancak toplama, arama, ilişkiler ve önemli filtreleme gibi daha gelişmiş gerçek dünya sorguları ve işlemleri mümkün değildir. -To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. +Bu verileri elde etmek için şimdiye kadar yayınlanan her bir [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) etkinliği işlemeniz, Token ID ve IPFS hash kullanarak IPFS'den gelen meta verileri okumanız ve ardından bunları aggregate etmeniz gerekir. Bu tür nispeten basit sorular için bile, bir tarayıcıda çalışan merkeziyetsiz bir uygulamanın (dApp) yanıt alması **saatler hatta günler** sürerdi. -You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. +Ayrıca, verileri sorgulamak için kendi sunucunuzu oluşturabilir, işlemleri orada işleyebilir, bunları bir veritabanına kaydedebilir ve tüm bunların üzerine bir API uç noktası oluşturabilirsiniz. Ancak bu seçenek [yoğun bir kaynaktır](/network/benefits/) ve bakım gerektirir, tek bir hata noktası sunar ve yerelleştirme için gerekli olan önemli güvenlik özelliklerini bozar. -**Indexing blockchain data is really, really hard.** +**Blockchain verilerini indekslemek gerçekten çok zordur.** -Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. +Kesinlik, zincir yeniden düzenlemeleri veya temizlenmemiş bloklar gibi blok zinciri özellikleri, bu süreci daha da karmaşık hale getirir. Ayrıca bu sadece zaman alan bir şey değil, aynı zamanda blok zinciri verilerinden doğru sorgu sonuçlarının alınmasını kavramsal olarak zorlaştırır. -The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). +Graph, bu sorunu blok zinciri verilerinin performanslı ve verimli bir şekilde sorgulanmasını indeksleyen ve sağlayan merkeziyetsiz bir protokolle çözer. Bu API'ler (indekslenmiş "subgraph'ler") daha sonra standart bir GraphQL API ile sorgulanabilir. Bugün, barındırılan bir hizmetin yanı sıra aynı yeteneklere sahip merkeziyetsiz bir protokol var. Her ikisi de [Graph Node](https://github.com/graphprotocol/graph-node)'un açık kaynak uygulaması tarafından desteklenmektedir. -## How The Graph Works +## Graph Nasıl Çalışır -The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. +Graph, subgraph bildirimi olarak bilinen subgrpah açıklamalarına dayalı olarak Ethereum verilerini neyin ve nasıl indeksleneceğini öğrenir. Subgraph açıklaması, bir subgraph için ilgili akıllı sözleşmeleri, bu sözleşmelerde dikkat edilmesi gereken olayları ve olay verilerinin Graph'in veritabanında depolayacağı verilerle nasıl eşleneceğini tanımlar. -Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. +Bir `subgraph bildirimi` yazdıktan sonra, tanımı IPFS'de depolamak için Graph CLI'yi kullanırsınız ve indeksleyiciye bu subgraph için verileri indekslemeye başlamasını söylersiniz. -This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: +Bu şema, Ethereum işlemleriyle ilgili bir subgraph bildirimi dağıtıldıktan sonra veri akışı hakkında daha fazla ayrıntı verir: -![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) +![Graph'in veri tüketicilerine sorgular sunmak için Graph node'unu nasıl kullandığını açıklayan bir grafik](/img/graph-dataflow.png) -The flow follows these steps: +İşleyiş şu şekildedir: -1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. -5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. +1. Merkeziyetsiz uygulama, akıllı sözleşmedeki bir işlem aracılığıyla Ethereum'a veri ekler. +2. Akıllı sözleşme, işlemi işlerken bir veya daha fazla olay yayar. +3. Graph Node, Ethereum'u yeni bloklar ve veriler için subgraph'iniz adına sürekli olarak tarar. +4. Graph Node, bu bloklardaki subgraph'iniz için Ethereum etkinliklerini bulur ve sağladığınız eşleme işleyicilerini çalıştırır. Eşleme, Graph Node'un Ethereum etkinliklerine yanıt olarak depoladığı veri varlıklarını oluşturan veya güncelleyen bir WASM modülüdür. +5. Merkeziyetsiz uygulama, node'un [GraphQL uç noktası](https://graphql.org/learn/)'nı kullanarak, blok zincirinden indekslenen veriler için Graph Node'unu sorgular. Graph Node'u, sırayla GraphQL sorgularını, bu verileri getirmek için, mağazanın indeksleme yeteneklerinden yararlanarak, temel veri deposu için sorgulara çevirir. Merkeziyetsiz uygulama, bu verileri son kullanıcılar için Ethereum'da yeni işlemler yapmak adına kullandıkları zengin bir kullanıcı arayüzünde görüntüler. Döngü ise tekrar eder. -## Next Steps +## Sonraki Adımlar -In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. +Aşağıdaki bölümlerde, subgraph'lerin nasıl tanımlanacağı, bunların nasıl depoloy edileceği ve Graph Node'un oluşturduğu dizinlerden verilerin nasıl sorgulanacağı hakkında daha fazla ayrıntıya giriş yapacağız. -Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. +Kendi subgraph'inizi yazmaya başlamadan önce, Graph Gezgini'ne bir göz atmak ve hâlihazırda inşa edilmiş olan bazı subgraph'leri keşfetmek isteyebilirsiniz. Her subgraph'in sayfası, o subgraph'in verilerini GraphQL ile sorgulamanıza izin veren bir oyun alanı içerir. diff --git a/website/pages/tr/arbitrum/arbitrum-faq.mdx b/website/pages/tr/arbitrum/arbitrum-faq.mdx index b57ff11459a4..c6eedf606901 100644 --- a/website/pages/tr/arbitrum/arbitrum-faq.mdx +++ b/website/pages/tr/arbitrum/arbitrum-faq.mdx @@ -2,60 +2,60 @@ title: Arbitrum SSS --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Arbitrum Faturalama SSS bölümüne geçmek istiyorsanız [buraya](#billing-on-arbitrum-faqs) tıklayın. -## The Graph neden bir L2 Çözümü uyguluyor? +## Graph neden bir Katman2 Çözümü uyguluyor? -L2 üzerinde The Graph'ı ölçeklendirdiğinde, ağ katılımcıları şunları bekleyebilir: +Katman2'de Graph'ı ölçeklendirerek, ağ katılımcıları şunları bekleyebilir: -- Upwards of 26x savings on gas fees +- Gas ücretlerinde 26 kata kadar tasarruf - Daha hızlı işlemler -- Security inherited from Ethereum +- Ethereum'dan aktarılmış güvenlik -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Protokol akıllı sözleşmelerinin Katman2'ye ölçeklendirilmesi, ağ katılımcılarının gas ücretlerinde daha düşük bir maliyetle daha sık etkileşime girmesine olanak tanır. Örneğin, İndeksleyiciler daha fazla sayıda subgraph'ı daha sık indekslemek için tahsisleri açıp kapatabilir, geliştiriciler subgraphları daha kolay bir şekilde dağıtabilir ve güncelleyebilir, Delegatörler GRT'yi daha sık bir şekilde delege edebilir ve Küratörler daha önce gas nedeniyle sık sık gerçekleştirilemeyecek kadar maliyetli olduğu düşünülen, daha fazla sayıda subgraph'a sinyal ekleyebilir veya çıkarabilir. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +Graph topluluğu, geçen yıl [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) tartışmasının sonucuna göre Arbitrum ile çalışmaya karar verdi. -## The Graph'ı L2'de kullanmak için ne yapmam gerekiyor? +## Graph'ı Katman2'de kullanmak için ne yapmam gerekiyor? Kullanıcılar, aşağıdaki yöntemlerden birini kullanarak GRT ve ETH'lerini köprüler: -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Arbitrum'daki Graph Köprüsü](https://bridge.arbitrum.io/?l2ChainId=42161) - [TransferTo](https://transferto.xyz/swap) - [Connext Bridge](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -The Graph'ı L2'de kullanmanın avantajlarından yararlanmak için, zincirler arasında geçiş yapma noktasında bu açılır anahtarı kullanın. +Graph'ı Katman2'de kullanmanın avantajlarından yararlanmak için, zincirler arasında geçiş yapma noktasında bu açılır anahtarı kullanın. ![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) ## Bir subgraph geliştiricisi, veri tüketicisi, indeksleyici, küratör veya delegatör olarak şimdi ne yapmam gerekiyor? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Hemen yapılması gereken bir eylem yok, ancak ağ katılımcılarına Katman2'nin faydalarından yararlanmaları için Arbitrum'a geçmeye başlamaları önerilir. -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Çekirdek geliştirici ekipleri, delegasyon, kürasyon ve subgraphları Arbitrum'a taşımayı önemli ölçüde kolaylaştıracak Katman2 transfer araçları oluşturmak için çalışıyor. Ağ katılımcıları, Katman2 aktarım araçlarının 2023 yazına kadar kullanıma sunulmasını bekleyebilirler. 10 Nisan 2023 itibarıyla, tüm endeksleme ödüllerinin %5'i Arbitrum'da üretilmektedir. Ağ katılımı arttıkça ve Konsey onayladıkça, endeksleme ödülleri Ethereum'dan Arbitrum'a doğru yavaşça kayacaktır ve nihayetinde tamamen Arbitrum'a geçecektir. -## L2'deki ağa katılmak istersem ne yapmalıyım? +## Katman2'deki ağa katılmak istersem ne yapmalıyım? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Lütfen Katman2'deki [ağı test etmeye](https://testnet.thegraph.com/explorer) yardımcı olun ve deneyiminizle ilgili [Discord](https://discord.gg/graphprotocol)'da geri bildirimde bulunun. -## Ağı L2'ye ölçeklendirmekle ilgili herhangi bir risk var mı? +## Ağı Katman2'ye ölçeklendirmekle ilgili herhangi bir risk var mı? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Tüm akıllı sözleşmeler kapsamlı bir şekilde [denetlendi](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Güvenli ve sorunsuz bir geçiş sağlamak için her şey kapsamlı bir şekilde test edilmiş ve bir acil durum planı hazırlanmıştır. Ayrıntıları [burada](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20) bulabilirsiniz. ## Ethereum'daki mevcut subgraph'lar çalışmaya devam edecek mi? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Evet, The Graph Ağı sözleşmeleri, daha sonraki bir tarihte tamamen Arbitrum'a taşınana kadar hem Ethereum hem de Arbitrum üzerinde paralel olarak çalışacaktır. ## GRT'nin Arbitrum'da dağıtılan yeni bir akıllı sözleşmesi olacak mı? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Evet, GRT'nin Arbitrum üzerinde ek bir [akıllı sözleşmesi](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) bulunmaktadır. Ancak, Ethereum ana ağında bulunan [GRT sözleşmesi](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) çalışmaya devam edecektir. ## Arbitrum'da Faturalandırma SSS'leri @@ -65,14 +65,14 @@ Hiçbir şey! GRT'niz güvenli bir şekilde Arbitrum'a taşındı ve siz bunu ok ## Varlıklarımın güvenli bir şekilde Arbitrum'a taşındığını nasıl bilebilirim? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Tüm GRT faturalandırma bakiyeleri Arbitrum'a başarıyla taşınmıştır. Arbitrum'daki faturalandırma sözleşmesini [buradan](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a) görüntüleyebilirsiniz. ## Arbitrum köprüsünün güvenli olduğunu nasıl bilebilirim? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Köprü, tüm kullanıcılar için emniyet ve güvenliği sağlamak amacıyla [kapsamlı bir şekilde denetlenmiştir](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest). ## Ethereum mainnet cüzdanımdan yeni GRT ekliyorsam ne yapmam gerekir? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +GRT'yi Arbitrum faturalandırma bakiyenize eklemek [Subgraph Stüdyo'da](https://thegraph.com/studio/) tek bir tık ile yapılabilir. GRT'nizi Arbitrum'a kolayca köprüleyebilecek ve API anahtarlarınızı tek bir işlemle doldurabileceksiniz. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +GRT ekleme, çekme veya alma hakkında daha ayrıntılı talimatlar için [Faturalandırma sayfasını](https://thegraph.com/docs/en/billing/) ziyaret edin. diff --git a/website/pages/tr/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/tr/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..b6975b53dc78 100644 --- a/website/pages/tr/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/tr/arbitrum/l2-transfer-tools-faq.mdx @@ -1,315 +1,411 @@ --- -title: L2 Transfer Tools FAQ +title: Katman2 Transfer Araçları SSS --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## Genel -## What are L2 Transfer Tools? +### Katman2 Transfer Araçları Nedir? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +Graph, protokolü Arbitrum One'a dağıtarak katkıda bulunanların ağa katılımını 26 kat daha ucuz hale getirdi. Katman2 Transfer Araçları, çekirdek geliştiriciler tarafından Katman2'ye geçişi kolaylaştırmak için oluşturuldu. -## Can I use the same wallet I use on Ethereum mainnet? +Her ağ katılımcısı için, Katman2'ye geçişteki deneyimi sorunsuz hale getirmek, serbest kalma sürelerinden kaçınmak veya GRT'yi manuel olarak geri çekmek ve köprülemek zorunda kalmamak için bir dizi Katman2 Transfer Aracı mevcuttur. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Bu araçlar, Graph içindeki rolünüzün ne olduğuna ve Katman2'ye ne transfer ettiğinize bağlı olarak belirli bir dizi adımı izlemenizi gerektirecektir. -## Subgraph Transfer +### Ethereum ana ağında kullandığım aynı cüzdanı kullanabilir miyim? -## How do I transfer my subgraph? +Eğer bir [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) (Harici Olarak Sahip Olunan Hesap) cüzdanı kullanıyorsanız aynı adresi kullanabilirsiniz. Ethereum ana ağ cüzdanınız bir sözleşme ise (örneğin bir çoklu imza), transferinizin gönderileceği bir [Arbitrum cüzdan adresi] \(/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) belirtmelisiniz. Yanlış bir adrese yapılan transferler kalıcı kayıplara neden olabileceğinden lütfen adresi dikkatlice kontrol edin. Katman2'de bir çoklu imza cüzdanı kullanmak istiyorsanız, Arbitrum One'da bir çoklu imza sözleşmesi kurduğunuzdan emin olun. -To transfer your subgraph, you will need to complete the following steps: +Ethereum ve Arbitrum gibi EVM blok zincirlerindeki cüzdanlar, blok zinciri ile etkileşime girmenize gerek kalmadan oluşturduğunuz bir çift anahtardır (genel ve özel). Dolayısıyla, Ethereum için oluşturulan herhangi bir cüzdan, başka bir işlem yapmanıza gerek kalmadan Arbitrum üzerinde de çalışacaktır. -1. Initiate the transfer on Ethereum mainnet +Bunun istisnası çoklu imza gibi akıllı sözleşme cüzdanlarıdır. Bunlar her bir zincire ayrı ayrı dağıtılan ve dağıtıldıklarında adreslerini alan akıllı sözleşmelerdir. Bir çoklu imza Ethereum'a dağıtılmışsa, Arbitrum'da aynı adresle var olmayacaktır. Yeni bir çoklu imza ilk olarak Arbitrum'da oluşturulmalıdır ve farklı bir adres alabilir. -2. Wait 20 minutes for confirmation +### Transferimi 7 gün içinde tamamlayamazsam ne olur? -3. Confirm subgraph transfer on Arbitrum\* +L2 Transfer Araçları, Katman1'den Katman2'ye mesaj göndermek için Arbitrum'un yerel mekanizmasını kullanır. Bu mekanizma "yeniden denenebilir bilet" olarak adlandırılır ve Arbitrum GRT köprüsü de dahil olmak üzere tüm yerel token köprüleri tarafından kullanılır. Tekrar denenebilir biletler hakkında daha fazla bilgiyi [Arbitrum dökümantasyonunda] \(https://docs.arbitrum.io/arbos/l1-to-l2-messaging) okuyabilirsiniz. -4. Finish publishing subgraph on Arbitrum +Varlıklarınızı (subgraph, stake, delegasyon veya kürasyon) Katman2'ye aktardığınızda, Katman2'de yeniden denenebilir bir bilet oluşturan Arbitrum GRT köprüsü aracılığıyla bir mesaj gönderilir. Transfer aracı, işlemde 1) bileti oluşturmak için ödeme yapmak ve 2) bileti Katman2'de yürütmek üzere gas için ödeme yapmak amacıyla kullanılan bir miktar ETH içerir. Ancak, bilet Katman2'de yürütülmeye hazır olana kadar geçen sürede gas fiyatları değişebileceğinden ötürü, bu otomatik yürütme girişiminin başarısız olma ihtimali vardır. Bu durumda, Arbitrum köprüsü yeniden denenebilir bileti 7 güne kadar kullanılabilir tutacaktır ve herkes bileti "kullanmayı" yeniden deneyebilir (bunun için Arbitrum'a köprülenmiş bir miktar ETH'ye sahip bir cüzdan gereklidir). -5. Update Query URL (recommended) +Bu, tüm transfer araçlarında "Onayla" adımı olarak adlandırdığımız adımdır - otomatik yürütme çoğu zaman başarılı olduğu için çoğu durumda otomatik olarak çalışacaktır, ancak başarılı bir şekilde gerçekleştiğinden emin olmak için tekrar kontrol etmeniz önemlidir. Başarılı olmazsa ve 7 gün içinde başarılı bir yeniden deneme gerçekleşmezse, Arbitrum köprüsü bileti iptal edecek ve varlıklarınız (subgraph, stake, delegasyon veya kürasyon) kaybolacak ve kurtarılamayacaktır. Graph çekirdek geliştiricileri bu durumları tespit etmek ve çok geç olmadan biletleri kurtarmaya çalışmak için bir izleme sistemine sahiptir, ancak transferinizin zamanında tamamlanmasını sağlamak nihayetinde sizin sorumluluğunuzdadır. İşleminizi onaylamakta sorun yaşıyorsanız, lütfen [bu formu](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) kullanarak bize ulaşın; çekirdek geliştiriciler size yardımcı olacaktır. -\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Delegasyon/stake/kürasyon transferimi başlattım ve Katman2'ye ulaşıp ulaşmadığından emin değilim, doğru şekilde transfer edilip edilmediğini nasıl teyit edebilirim? -## Where should I initiate my transfer from? +Profilinizde transferi tamamlamanızı isteyen bir banner görmüyorsanız, muhtemelen işlem güvenli bir şekilde Katman2'ye ulaşmıştır ve başka bir işlem yapmanız gerekmiyordur. Herhangi bir şüpheniz varsa, Gezgin'in Arbitrum One'da delegasyonunuzu, stake'inizi veya kürasyonunuzu gösterip göstermediğini kontrol edebilirsiniz. -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +Katman1 işlem hash'ına sahipseniz (cüzdanınızdaki son işlemlere bakarak bulabilirsiniz), mesajı Katman2'ye taşıyan "yeniden denenebilir biletin" burada kullanılıp kullanılmadığını da doğrulayabilirsiniz: https://retryable-dashboard.arbitrum.io/.Otomatik kurtarma başarısız olduysa, cüzdanınızı oraya bağlayabilir ve kullanabilirsiniz. Çekirdek geliştiricilerin de takılan mesajları izlediğinden ve süresi dolmadan önce bunları kurtarmaya çalışacağından emin olabilirsiniz. -## How long do I need to wait until my subgraph is transferred +## Subgraph Transferi -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +### Subgraph'ımı nasıl transfer edebilirim? -## Will my subgraph still be discoverable after I transfer it to L2? + -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Subgraph'ınızı transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: -## Does my subgraph need to be published to transfer it? +1. Ethereum ana ağında transferi başlatın -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +2. Onaylanması için 20 dakika bekleyin -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +3. Arbitrum\* üzerinde subgraph transferini onaylayın -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +4. Arbitrum üzerinde subgraph'ı yayınlamayı bitirin -## After I transfer, do I also need to re-publish on Arbitrum? +5. Sorgu URL'sini Güncelle (önerilir) -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +\*Transferi 7 gün içinde onaylamanız gerektiğini unutmayın, aksi takdirde subgraph'ınız kaybolabilir. Çoğunlukla, bu adım otomatik olarak çalışacaktır, ancak Arbitrum'da gas fiyatlarında bir artış varsa manuel bir onay gerekebilir. Bu süreç sırasında herhangi bir sorun yaşanırsa, yardımcı olacak kaynaklar olacaktır: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden destek ile iletişime geçin. -## Will there be a down-time to my endpoint while re-publishing? +### Transferimi nereden başlatmalıyım? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Transferinizi [Subgraph Stüdyo](https://thegraph.com/studio/), [Gezgin](https://thegraph.com/explorer) veya herhangi bir subgraph ayrıntıları sayfasından başlatabilirsiniz. Transferi başlatmak için subgraph ayrıntıları sayfasındaki "Subgraph Transfer" butonuna tıklayın. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Subgraph'ım transfer edilene kadar ne kadar beklemem gerekir? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Transfer süresi yaklaşık 20 dakika alır. Arbitrum köprüsü, köprü transferini otomatik olarak tamamlamak için arka planda çalışmaktadır. Bazı durumlarda gaz maliyetleri artabilir ve işlemi tekrar onaylamanız gerekebilir. -## Will my subgraph's curation move with my subgraph? +### Katman2'ye transfer ettikten sonra subgraph'ım hala keşfedilebilir olacak mı? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Subgraph'ınız yalnızca yayınlandığı ağda keşfedilebilir olacaktır. Örneğin, subgraph'ınız Arbitrum One üzerindeyse, onu yalnızca Arbitrum One üzerindeki Gezgin'de bulabilirsiniz, Ethereum'da aradığınızda bulamazsınız. Doğru ağda olduğunuzdan emin olmak için lütfen sayfanın üst kısmındaki ağ değiştiricisinde Arbitrum One'ın seçili olduğundan emin olun. Transferden sonra, Katman1 subgraph'ı kullanımdan kaldırılmış olarak görünecektir. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +### Transfer etmek için subgraph'ımın yayınlanmış olması gerekiyor mu? -## Can I move my subgraph back to Ethereum mainnet after I transfer? +Subgraph transfer aracından yararlanmak için, subgraph'ınızın Ethereum ana ağı'nda yayınlanmış olması ve subgraph'ın sahibi olan cüzdanın, belirli miktarda kürasyon sinyaline sahip olması gerekmektedir. Eğer subgraph'ınız yayınlanmamışsa, doğrudan Arbitrum One'da yayınlamanız önerilir böylece ilgili gas ücretleri önemli ölçüde daha düşük olacaktır. Yayınlanmış bir subgraph'ı transfer etmek istiyorsanız, ancak sahip hesap üzerinde herhangi bir sinyal kürasyonu yapılmamışsa, bu hesaptan küçük bir miktar (örneğin 1 GRT) sinyal verebilirsiniz; "otomatik geçiş" sinyalini seçtiğinizden emin olun. -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +### Arbitrum'a transfer olduktan sonra subgraph'ımın Ethereum ana ağ versiyonuna ne olur? -## Why do I need bridged ETH to complete my transfer? +Subgraph'ınızı Arbitrum'a transfer ettikten sonra, Ethereum ana ağ versiyonu kullanımdan kaldırılacaktır. Sorgu URL'nizi 48 saat içinde güncellemenizi öneririz. Bununla birlikte, herhangi bir üçüncü taraf merkeziyetsiz uygulama desteğinin güncellenebilmesi için ana ağ URL'nizin çalışmasını sağlayan bir ödemesiz dönem vardır. -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +### Transferi tamamladıktan sonra Arbitrum'da da yeniden yayınlamam gerekiyor mu? -## Curation Signal +20 dakikalık transfer aralığından sonra, transferi tamamlamak için kullanıcı arayüzünde bir işlemle transferi onaylamanız gerekecektir ve transfer aracı size bu konuda rehberlik edecektir. Katman1 uç noktanız transfer aralığı ve sonrasındaki ödemesiz dönem boyunca desteklenmeye devam edecektir. Uç noktanızı sizin için uygun olduğunda güncellemeniz önerilir. -## How do I transfer my curation? +### Yeniden yayınlama sırasında uç noktam kesinti yaşar mı? -To transfer your curation, you will need to complete the following steps: +Olası değildir, fakat Katman1'de hangi İndeksleyicilerin subgraph'ı desteklediğine ve subgraph Katman2'de tam olarak desteklenene kadar indekslemeye devam edip etmediklerine bağlı olarak kısa bir kesinti yaşanması mümkündür. -1. Initiate signal transfer on Ethereum mainnet +### Yayınlama ve sürüm oluşturma Katman2'de Ethereum ana ağı ile aynı mı? -2. Specify an L2 Curator address\* +Evet. Subgraph Stüdyo'da yayınlarken, yayınlanan ağınız olarak Arbitrum One'ı seçin. Stüdyo'da, subgprah'ın en son güncellenmiş sürümüne yönlendiren en son uç nokta mevcut olacaktır. -3. Wait 20 minutes for confirmation +### Subgraph'ımın kürasyonu subgraph'ımla birlikte hareket edecek mi? -\*If necessary - i.e. you are using a contract address. +Otomatik geçiş sinyalini seçtiyseniz, kendi kürasyonunuzun %100'ü subgraph'ınızla birlikte Arbitrum One'a taşınacaktır. Subgraph'ın tüm kürasyon sinyali, aktarım sırasında GRT'ye dönüştürülecek ve kürasyon sinyalinize karşılık gelen GRT, Katman2 subgraph'ında sinyal basmak için kullanılacaktır. -## How will I know if the subgraph I curated has moved to L2? +Diğer Küratörler kendilerne ait GRT miktarını geri çekmeyi ya da aynı subgraph üzerinde sinyal basmak için Katman2'ye transfer etmeyi seçebilirler. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Transferden sonra subgraph'ımı Ethereum ana ağı'na geri taşıyabilir miyim? -## What if I do not wish to move my curation to L2? +Transfer edildikten sonra, bu subgraph'ınızın Ethereum ana ağı sürümü kullanımdan kaldırılacaktır. Ana ağa geri dönmek isterseniz, ana ağa yeniden dağıtmanız ve geri yayınlamanız gerekecektir. Öte yandan, indeksleme ödülleri eninde sonunda tamamen Arbitrum One üzerinde dağıtılacağından, Ethereum ana ağına geri transfer kesinlikle önerilmez. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Transferimi tamamlamak için neden köprülenmiş ETH'ye ihtiyacım var? -## How do I know my curation successfully transferred? +Arbitrum One'daki gas ücretleri köprülenmiş ETH (yani Arbitrum One'a köprülenmiş ETH) kullanılarak ödenir. Bununla birlikte, gas ücretleri Ethereum ana ağına kıyasla önemli ölçüde daha düşüktür. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +## Delegasyon -## Can I transfer my curation on more than one subgraph at a time? +### Delegasyonumu nasıl transfer edebilirim? -There is no bulk transfer option at this time. + -## Indexer Stake +Delegasyonunuzu transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: -## How do I transfer my stake to Arbitrum? +1. Ethereum ana ağında delegasyon transferi başlatın +2. Onaylanması için 20 dakika bekleyin +3. Arbitrum\* üzerinde delegasyon transferini onaylayın -To transfer your stake, you will need to complete the following steps: +\*\*\*\*Arbitrum üzerinde delegasyon transferini tamamlamak için işlemi onaylamanız gerekir. Bu adım 7 gün içinde tamamlanmalıdır, aksi takdirde delegasyon kaybolabilir. Çoğunlukla, bu adım otomatik olarak çalışacaktır, ancak Arbitrum'da gas fiyatlarında bir artış varsa manuel bir onay gerekebilir. Bu süreç sırasında herhangi bir sorun yaşanırsa, yardımcı olacak kaynaklar olacaktır: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden destek ile iletişime geçin. -1. Initiate stake transfer on Ethereum mainnet +### Ethereum ana ağında açık bir tahsis mevcutken transfer başlatırsam ödüllerime ne olur? -2. Wait 20 minutes for confirmation +Delege ettiğiniz İndeksleyici hala Katman1'de çalışıyorsa, Arbitrum'a transfer ettiğinizde Ethereum ana ağındaki açık tahsislerden elde ettiğiniz tüm delegasyon ödüllerini kaybedersiniz. Bu, maksimum son 28 günlük döneme ait ödülleri kaybedeceğiniz anlamına gelir. Transferi, İndeksleyici tahsisleri kapattıktan hemen sonra yaparsanız, bunun mümkün olan en düşük miktar olduğundan emin olabilirsiniz. İndeksleyici(ler)inizle bir iletişim kanalınız varsa, transferinizi yapmak için en iyi zamanı bulmak için onlarla görüşmeyi düşünün. -3. Confirm stake transfer on Arbitrum +### Halihazırda delege ettiğim İndeksleyici Arbitrum One'da değilse ne olur? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +Katman2 transfer aracı yalnızca yetki verdiğiniz İndeksleyici kendi stake'ini Arbitrum'a transfer etmişse etkinleştirilecektir. -## Will all of my stake transfer? +### Delegatörlerin başka bir İndeksleyiciye delege etme seçeneği var mı? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Başka bir İndeksleyiciye delege etmek isterseniz, Arbitrum'da aynı İndeksleyiciye aktarabilir, ardından delege işlemini geri alabilir ve serbest kalma süresini bekleyebilirsiniz. Bundan sonra, delege etmek için başka bir aktif İndeksleyici seçebilirsiniz. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Delege ettiğim İndeksleyiciyi Katman2'de bulamazsam ne olur? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +Katman2 transfer aracı, daha önce delege ettiğiniz İndeksleyiciyi otomatik olarak tespit edecektir. -## How much time do I have to confirm my stake transfer to Arbitrum? +### Delegasyonumu önceki İndeksleyici yerine yeni veya birkaç İndeksleyici arasında karıştırabilecek, eşleştirebilecek veya 'yayabilecek' miyim? -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +Katman2 transfer aracı, delegasyonunuzu her zaman daha önce delege ettiğiniz İndeksleyici'ye taşıyacaktır. Katman2'ye taşındıktan sonra, delegasyonu geri alabilir, serbest kalma süresini bekleyebilir ve delegasyonunuzu bölmek isteyip istemediğinize karar verebilirsiniz. -## What if I have open allocations? +### Bekleme süresine tabi miyim yoksa Katman2 delegasyon transfer aracını kullandıktan hemen sonra geri çekebilir miyim? -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +Transfer aracı Katman2'ye hemen geçmenizi sağlar. Eğer delegasyonunuzu bozmak isterseniz, serbest kalma süresini beklemeniz gerekecektir. Ancak, bir İndeksleyici tüm stake'ini Katman2'ye transfer ettiyse, Ethereum ana ağında hemen geri çekebilirsiniz. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Delegasyonumu transfer etmezsem ödüllerim olumsuz etkilenebilir mi? -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +Gelecekte tüm ağ katılımının Arbitrum One'a taşınması beklenmektedir. -## How long will it take to transfer my stake? +### Delegasyonumun Katman2'ye transferinin tamamlanması ne kadar sürer? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +Delegasyon transferi için 20 dakikalık bir onay gereklidir. Lütfen 20 dakikalık sürenin ardından 7 gün içinde geri gelip transfer sürecinin 3. adımını tamamlamanız gerektiğini unutmayın. Bunu yapmazsanız, delegasyonunuz kaybolabilir. Çoğu durumda transfer aracının bu adımı sizin için otomatik olarak tamamlayacağını unutmayın. Otomatik denemenin başarısız olması durumunda, bunu manuel olarak tamamlamanız gerekecektir. Bu işlem sırasında herhangi bir sorun ortaya çıkarsa endişelenmeyin, size yardımcı olmak için burada olacağız: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden bizimle iletişime geçin. -## Do I have to index on Arbitrum before I transfer my stake? +### GRT hak ediş sözleşmesi / token kilit cüzdanı kullanıyorsam delegasyonumu transfer edebilir miyim? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +Evet! Süreç biraz farklıdır çünkü hak ediş sözleşmeleri Katman2 gas'ı için ödeme yapmak adına gereken ETH'yi iletemez, bu nedenle önceden yatırmanız gerekir. Hak ediş sözleşmeniz tam olarak hak edilmemişse, önce Katman2'de bir muadil hak ediş sözleşmesi başlatmanız gerekecek ve delegasyonu yalnızca bu Katman2 hak ediş sözleşmesine aktarabileceksiniz. Gezgin'deki kullanıcı arayüzü, hak ediş kilidi cüzdanını kullanarak Gezgin'a bağlandığınızda bu işlem boyunca size rehberlik edebilir. -## Can Delegators move their delegation before I move my indexing stake? +### Arbitrum hak ediş sözleşmem ana ağdaki gibi GRT'nin serbest bırakılmasına izin verir mi? -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +Hayır, Arbitrum'da oluşturulan hak ediş sözleşmesi, hak ediş zaman çizelgesinin sonuna kadar, yani sözleşmeniz tamamen hak edilene kadar herhangi bir GRT'nin serbest bırakılmasına izin vermeyecektir. Bu, aksi takdirde her iki katmanda da aynı miktarları serbest bırakmayı mümkün hale getireceğinden ötürü çifte harcamayı önlemek içindir. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +GRT'yi hak ediş sözleşmesinden çıkarmak isterseniz, Gezgin'i kullanarak bunları Katman1 hak ediş sözleşmesine geri transfer edebilirsiniz: Arbitrum One profilinizde, GRT'yi ana ağ hak ediş sözleşmesine geri transfer edebileceğinizi söyleyen bir başlık göreceksiniz. Bu, GRT köprüsündeki aynı yerel köprüleme mekanizmasını kullandığından ötürü, Arbitrum One'da bir işlem, 7 gün bekleme ve ana ağda son bir işlem gerektirir. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Herhangi bir delegasyon vergisi var mı? -## Delegation +Hayır. Katman2'de alınan tokenler, belirtilen Delegatör adına belirtilen İndeksleyiciye bir delegasyon vergisi alınmadan delege edilir. -## How do I transfer my delegation? +### Delegasyonumu transfer ettiğimde henüz gerçekleşmemiş ödüllerim de transfer edilecek mi? -To transfer your delegation, you will need to complete the following steps: +Evet! Transfer edilemeyen tek ödül açık tahsisler için olanlardır, çünkü bunlar İndeksleyici tahsisleri kapatana kadar (genellikle her 28 günde bir) var olmayacaktır. Bir süredir delege ediyorsanız, bu muhtemelen ödüllerin yalnızca küçük bir kısmına denk gelmektedir. -1. Initiate delegation transfer on Ethereum mainnet +Akıllı sözleşme düzeyinde, gerçekleşmemiş ödüller zaten delegasyon bakiyenizin bir parçasıdır, bu nedenle delegasyonunuzu Katman2'ye transfer ettiğinizde bunlar da transfer edilecektir. -2. Wait 20 minutes for confirmation +### Delegasyonların Katman2'ye taşınması zorunlu mu? Bunun için bir son tarih var mı? -3. Confirm delegation transfer on Arbitrum +Delegasyonun Katman2'ye taşınması zorunlu değildir, ancak indeksleme ödülleri [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193)'de açıklanan zaman çizelgesini izleyerek Katman2'de artmaktadır. Nihayetinde, Konsey artışları onaylamaya devam ederse, tüm ödüller Katman2'de dağıtılacak ve Katman1'de İndeksleyiciler ve Delegatörler için indeksleme ödülü kalmayacaktır. -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Stake'ini halihazırda Katman2'ye transfer etmiş olan bir İndeksleyiciye delege ediyorsam, Katman1'de ödül almayı bırakacak mıyım? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Birçok İndeksleyici stake'lerini kademeli olarak transfer etmektedir, bu nedenle Katman1'deki İndeksleyiciler Katman1'de ödüller ve ücretler kazanmaya devam edecek ve bunlar daha sonra Delegatörlerle paylaşılacaktır. Bir İndeksleyici tüm stake'lerini transfer ettiğinde, Katman1'de faaliyet göstermeyi bırakacak, böylece Delegatörler Katman2'ye transfer etmedikleri sürece hiçbir ödül alamayacaklar. -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +Nihayetinde, Konsey artışları onaylamaya devam ederse, tüm ödüller Katman2'de dağıtılacak ve Katman1'de İndeksleyiciler ve Delegatörler için indeksleme ödülü kalmayacaktır. -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +### Delegasyonumu transfer etmek için bir düğme göremiyorum. Niçin böyle? -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +Muhtemelen İndeksleyiciniz henüz stake'lerini transfer etmek için Katman2 transfer araçlarını kullanmamıştır. -## Do Delegators have the option to delegate to another Indexer? +İndeksleyici ile iletişime geçebilir ve onları Katman2 Transfer Araçlarını kullanmaya teşvik edebilirsiniz. böylece Delegatörler delegasyonları Katman2 İndeksleyici adreslerine transfer edebilirler. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### İndeksleyicim de Arbitrum'da, ancak profilimde delegasyonu transfer etmek için bir düğme göremiyorum. Niçin böyle? -## What if I can't find the Indexer I'm delegating to on L2? +İndeksleyicinin Katman2 üzerinde işlemler başlatmış olması ancak stake transferi için Katman2 transfer araçlarını kullanmamış olması olası bir durumdur. Bu nedenle Katman1 akıllı sözleşmeleri İndeksleyicinin Katman2 adresi hakkında bilgi sahibi olmayacaktır. İndeksleyici ile iletişime geçebilir ve onları Katman2 Transfer Araçlarını kullanmaya teşvik edebilirsiniz. Böylece Delegatörler delegasyonları Katman2 İndeksleyici adreslerine transfer edebilirler. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Delegasyonumu geri alma sürecini başlattıysam ve henüz geri çekmediysem delegasyonumu Katman2'ye transfer edebilir miyim? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Hayır. Delegasyonunuz serbest kalma sürecindeyse, 28 gün beklemeniz ve sonra geri çekmeniz gerekir. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +Delegeden çıkarılmış statüsünde olan tokenler "kilitli" durumdadır ve bu nedenle Katman2'ye transfer edilemezler. -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +## Kürasyon Sinyali -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Kürasyonumu nasıl transfer edebilirim? -## Can my rewards be negatively impacted if I do not transfer my delegation? +Kürasyonunuzu transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: -It is anticipated that all network participation will move to Arbitrum One in the future. +1. Ethereum ana ağında sinyal transferini başlatın -## How long does it take to complete the transfer of my delegation to L2? +2. Bir Katman2 Kürasyon adresi belirleyin\* -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +3. Onaylanması için 20 dakika bekleyin -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +\*Gerekliyse - yani bir sözleşme adresi kullanıyorsanız. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Küratörlüğünü yaptığım subgraph'ın Katman2'ye taşınıp taşınmadığını nasıl bileceğim? -## Is there any delegation tax? +Subgraph ayrıntıları sayfasını görüntülerken, bir afiş size bu subgraph'ın transfer edildiğini bildirecektir. Kürasyonunuzu transfer etmek için komut istemini takip edebilirsiniz. Bu bilgiyi taşınan herhangi bir subgraph'ın subgraph ayrıntıları sayfasında da bulabilirsiniz. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Kürasyonumu Katman2'ye taşımak istemezsem ne olur? -## Vesting Contract Transfer +Bir subgraph kullanımdan kaldırıldığında sinyalinizi geri çekme opsiyonu bulunmaktadır. Benzer şekilde, bir subgraph Katman2'ye taşındıysa, sinyalinizi Ethereum ana ağı'nda geri çekmeyi veya sinyali Katman2'ye göndermeyi seçebilirsiniz. -## How do I transfer my vesting contract? +### Kürasyonumun başarıyla transfer edildiğini nasıl bilebilirim? -To transfer your vesting, you will need to complete the following steps: +Sinyal ayrıntıları, Katman2 transfer aracı başlatıldıktan yaklaşık 20 dakika sonra Gezgin üzerinden erişilebilir olacaktır. -1. Initiate the vesting transfer on Ethereum mainnet +### Kürasyonumu aynı anda birden fazla subgraph'a transfer edebilir miyim? -2. Wait 20 minutes for confirmation +Şu anda toplu transfer seçeneği bulunmamaktadır. -3. Confirm vesting transfer on Arbitrum +## İndeksleyici Stake'i -## How do I transfer my vesting contract if I am only partially vested? +### Payımı Arbitrum'a nasıl transfer edebilirim? -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +> Uyarı: Stake ettiğiniz GRT'nizin herhangi bir bölümünü İndeksleyicinizden kaldırıyorsanız Katman2 Transfer Araçlarını kullanamazsınız. -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. + -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +Stake'inizi transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: -4. Withdraw any remaining ETH from the transfer tool contract +1. Ethereum ana ağında stake transferi başlatın -## How do I transfer my vesting contract if I am fully vested? +2. Onaylanması için 20 dakika bekleyin -For those that are fully vested, the process is similar: +3. Arbitrum\* üzerinde stake transferini onaylayın -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +\*Transferi 7 gün içinde onaylamanız gerektiğini unutmayın, aksi takdirde stake'iniz kaybolabilir. Çoğunlukla, bu adım otomatik olarak çalışacaktır, ancak Arbitrum'da gas fiyatlarında bir artış varsa manuel bir onay gerekebilir. Bu süreç sırasında herhangi bir sorun yaşanırsa, yardımcı olacak kaynaklar olacaktır: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden destek ile iletişime geçin. -2. Set your L2 address with a call to the transfer tool contract +### Stake'imin tamamı transfer edilecek mi? -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +Stake'inizin ne kadarını transfer edeceğinizi seçebilirsiniz. Tamamını tek seferde transfer etmeyi seçerseniz, önce açık tahsisleri kapatmanız gerekecektir. -4. Withdraw any remaining ETH from the transfer tool contract +Stake'inizin bir kısmını birden fazla işlem üzerinden transfer etmeyi planlıyorsanız, her zaman aynı faydalanıcı adresi belirtmelisiniz. -## Can I transfer my vesting contract to Arbitrum? +Not: Transfer aracını ilk kez kullandığınızda Katman2'deki minimum stake gereksinimlerini karşılamanız gerekir. İndeksleyiciler minimum 100 bin GRT göndermelidir (bu fonksiyonu ilk kez çağırırken). Katman1'de stake'in bir kısmını bırakıyorsanız, bu miktarda minimum 100 bin GRT'nin üzerinde olmalı ve açık tahsislerinizi karşılamak için (delegasyonlarınızla birlikte) yeterli olmalıdır. -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +### Arbitrum'a gerçekleştirdiğim stake transferimi onaylamak için ne kadar zamanım var? -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +\*\*\* Arbitrum'da stake transferini tamamlamak için işleminizi onaylamanız gerekir. Bu adım 7 gün içinde tamamlanmalıdır, aksi takdirde stake kaybolabilir. -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +### Açık tahsisatlarım varsa ne olur? -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +Stake'inizin tamamını göndermiyorsanız, Katman2 transfer aracı Ethereum ana ağında en az 100 bin GRT kaldığını ve kalan stake ve delegasyonunuzun açık tahsisleri karşılamak için yeterli olduğunu doğrulayacaktır. GRT bakiyeniz minimumları + açık tahsisleri karşılamıyorsa açık tahsisleri kapatmanız gerekebilir. -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +### Transfer araçlarını kullanarak, transfer etmeden önce Ethereum ana ağındaki stake'imin kaldırılması için 28 gün beklemek gerekli mi? -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +Hayır, stake'inizi hemen Katman2'ye transfer edebilirsiniz, transfer aracını kullanmadan önce stake'i kaldırmanıza ve beklemenize gerek yoktur. 28 günlük bekleme süresi yalnızca stake'i Ethereum ana ağındaki veya Katman2'deki cüzdanınıza geri çekmek istediğinizde geçerlidir. -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Stake'imi transfer etmek ne kadar sürer? -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +Katman2 transfer aracının stake'inizi transfer etmeyi tamamlaması yaklaşık 20 dakika sürecektir. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +### Stake'imi transfer etmeden önce Arbitrum'da indekslemem gerekiyor mu? -## Can I specify a different beneficiary for my vesting contract on L2? +İndekslemeyi oluşturmadan önce hissenizi etkin bir şekilde aktarabilirsiniz, ancak Katman2'deki subgraph'lara tahsis edene, bunları indeksleyene ve POI'leri sunana kadar Katman2'de herhangi bir ödül talep edemezsiniz. -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. +### Ben indeksleme stake'imi taşımadan önce Delegatörler delegasyonlarını taşıyabilir mi? -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Hayır, Delegatörlerin delege ettikleri GRT'yi Arbitrum'a transfer edebilmeleri için delege ettikleri İndeksleyicinin Katman2'de aktif olması gerekir. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### GRT hak ediş sözleşmesi / token kilit cüzdanı kullanıyorsam stake'imi transfer edebilir miyim? -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +Evet! Süreç biraz farklıdır, çünkü hak ediş sözleşmeleri Katman2 gas'ı için ödeme yapmak adına gereken ETH'yi iletemez, bu nedenle önceden yatırmanız gerekir. Hak ediş sözleşmeniz tam olarak hak edilmemişse, önce Katman2'de bir muadil hak ediş sözleşmesi başlatmanız gerekecek ve stake'i yalnızca bu Katman2 hak ediş sözleşmesine aktarabileceksiniz. Gezgin'deki kullanıcı arayüzü, hak ediş kilidi cüzdanını kullanarak Gezgin'a bağlandığınızda bu işlem boyunca size rehberlik edebilir. -This allows you to transfer your stake or delegation to any L2 address. +### Halihazırda Katman2'de stake'im var. Transfer araçlarını ilk kez kullanıyorsam, yine de 100 bin GRT göndermem gerekiyor mu? -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +Evet. Katman1 akıllı sözleşmeleri Katman2 stake'lerinizden bihaber olacaktır, bu nedenle ilk kez transfer yaptığınızda bu en az 100 bin GRT transfer etmenizi gerektirecektir. -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### Stake ettiğim GRT'yi kaldırma sürecindeysem, stake'imi Katman2'ye transfer edebilir miyim? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +Hayır. Stake'inizin herhangi bir kısmı serbest kalma sürecindeyse, 28 gün beklemeniz ve stake'inizi transfer etmeden önce çekmeniz gerekir. Stake edilen tokenler "kilitlenmiş" durumdadır ve bu Katman2'ye herhangi bir transfer veya stake yapılmasını engeller. -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +## Hak ediş Sözleşmesi Transferi -2. Revoke protocol access to the vesting contract (needed for the next step) +### Hak ediş sözleşmemi nasıl transfer edebilirim? -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Hak edişinizi transfer etmek için aşağıdaki adımları tamamlamanız gerekecektir: -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +1. Ethereum ana ağında hak ediş transferi başlatın -5. Wait 20 minutes for confirmation +2. Onaylanması için 20 dakika bekleyin -6. Confirm the balance transfer on L2 +3. Arbitrum\* üzerinde hak ediş transferini onaylayın -\*If necessary - i.e. you are using a contract address. +### Sadece kısmen hak kazanmışsam hak ediş sözleşmemi nasıl transfer edebilirim? -\*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + -## Can I move my vesting contract back to L1? +1. Transfer aracı sözleşmesine bir miktar ETH yatırın (kullanıcı arayüzü makul bir miktar tahmin etmenize yardımcı olabilir) -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +2. Katman2 hak ediş kilidini başlatmak için transfer aracı sözleşmesi aracılığıyla bir miktar kilitli GRT'yi Katman2'ye gönderin. Bu aynı zamanda Katman2 faydalanıcı adresini de belirleyecektir. -## Why do I need to move my vesting contract to begin with? +3. Katman1 Stake etme sözleşmesindeki "kilitli" transfer aracı fonksiyonları aracılığıyla onların stake/delegasyonlarını Katman2'ye gönderin. -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +4. Transfer aracı sözleşmesinde kalan ETH'yi geri çekin -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Tamamen hak kazanmışsam hak ediş sözleşmemi nasıl transfer edebilirim? -This is not a possibility. You can move funds back to L1 and withdraw them there. + -## What if I don't want to move my vesting contract to L2? +Tamamen hak sahibi olanlar için süreç benzerdir: -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +1. Transfer aracı sözleşmesine bir miktar ETH yatırın (kullanıcı arayüzü makul bir miktar tahmin etmenize yardımcı olabilir) + +2. Transfer aracı sözleşmesine bir çağrı yaparak Katman2 adresinizi ayarlayın + +3. Katman1 Stake etme sözleşmesindeki "kilitli" transfer aracı fonksiyonları aracılığıyla stake/delegasyonlarınızı Katman2'ye gönderin. + +4. Transfer aracı sözleşmesinde kalan ETH'yi geri çekin + +### Hak ediş sözleşmemi Arbitrum'a transfer edebilir miyim? + +Hak ediş sözleşmenizin GRT bakiyesini Katman2'deki bir hak ediş sözleşmesine aktarabilirsiniz. Bu, hak ediş sözleşmenizden Katman2'ye stake veya delegasyon aktarmak için bir ön koşuldur. Hak ediş sözleşmesi GRT miktarınız sıfırdan farklı olmalıdır (gerekirse 1 GRT gibi küçük bir miktarı aktarabilirsiniz). + +GRT'yi Katman1 hak ediş sözleşmenizden Katman2'ye transfer ettiğinizde, gönderilecek miktarı seçebilir ve bunu istediğiniz kadar yapabilirsiniz. Katman2 hak ediş sözleşmesi, GRT'yi ilk kez transfer ettiğinizde başlatılacaktır. + +Transferler, hak ediş sözleşmesi hesabına bağlandığınızda Gezgin profilinizde görülebilecek bir Transfer Aracı kullanılarak yapılır. + +Sözleşmeniz tamamen hak edildiğinde hak ediş zaman çizelgenizin sonuna kadar Katman2 hak ediş sözleşmesinden GRT'yi serbest bırakamayacağınızı/çekemeyeceğinizi lütfen unutmayın. GRT'yi bundan önce serbest bırakmanız gerekiyorsa, bu amaçla kullanılabilen başka bir transfer aracını kullanarak GRT'yi Katman1 hak ediş sözleşmesine geri transfer edebilirsiniz. + +Katman2'ye herhangi bir hakediş sözleşmesi bakiyesi transfer etmediyseniz ve hakediş sözleşmeniz tamamen hakedilmişse, hakediş sözleşmenizi Katman2'ye transfer etmemelisiniz. Bunun yerine, bir Katman2 cüzdan adresi belirlemek için transfer araçlarını kullanabilir ve stake'inizi veya delegasyonunuzu doğrudan Katman2'deki bu normal cüzdana transfer edebilirsiniz. + +### Ana ağda stake etmek için hak ediş sözleşmemi kullanıyorum. Stake'imi Arbitrum'a transfer edebilir miyim? + +Evet, ancak sözleşmeniz hala hak ediş sürecindeyse, stake'i yalnızca Katman2 hak ediş sözleşmenize ait olacak şekilde transfer edebilirsiniz. Öncelikle Gezgin'deki hak ediş sözleşmesi transfer aracını kullanarak bir miktar GRT bakiyesi transfer ederek bu Katman2 sözleşmesini başlatmalısınız. Sözleşmeniz tamamen hak edilmişse, stake'inizi Katman2'deki herhangi bir adrese transfer edebilirsiniz, ancak bunu önceden ayarlamanız ve Katman2 transfer aracının Katman2 gas'ına ödeme yapılması için bir miktar ETH yatırmanız gerekir. + +### Ana ağda delege etmek için hak ediş sözleşmemi kullanıyorum. Delegasyonumu Arbitrum'a transfer edebilir miyim? + +Evet, ancak sözleşmeniz hala hak ediş sürecindeyse, delegasyonu yalnızca Katman2 hak ediş sözleşmenize ait olacak şekilde transfer edebilirsiniz. Öncelikle Gezgin'deki hak ediş sözleşmesi transfer aracını kullanarak bir miktar GRT bakiyesi transfer ederek bu Katman2 sözleşmesini başlatmalısınız. Sözleşmeniz tamamen hak edilmişse, delegasyonunuzu Katman2'deki herhangi bir adrese transfer edebilirsiniz, ancak bunu önceden ayarlamanız ve Katman2 transfer aracının Katman2 gas'ına ödeme yapılması için bir miktar ETH yatırmanız gerekir. + +### Katman2'deki hak ediş sözleşmem için farklı bir faydalanıcı belirleyebilir miyim? + +Evet, bir bakiyeyi ilk kez transfer ettiğinizde ve Katman2 hak ediş sözleşmenizi kurduğunuzda, bir Katman2 faydalanıcısı belirleyebilirsiniz. Bu faydalanıcının Arbitrum One'da işlem yapabilen bir cüzdan olduğundan emin olun, yani bir Harici Olarak Sahip Olunan Hesap(EOA) veya Arbitrum One'a dağıtılmış bir çoklu imza cüzdanı olmalıdır. + +Sözleşmeniz tamamen hak edilmişse, Katman2 üzerinde bir hak ediş sözleşmesi oluşturmayacaksınız; bunun yerine, bir Katman2 cüzdan adresi belirleyeceksiniz ve bu, Arbitrum'daki stake'iniz veya delegasyonunuz için alıcı cüzdan olacak. + +### Sözleşmem tamamen hak edilmiş durumda. Stake'imi veya delegasyonumu Katman2 hak ediş sözleşmesi olmayan başka bir adrese transfer edebilir miyim? + +Evet. Katman2'ye herhangi bir hak ediş sözleşmesi bakiyesi transfer etmediyseniz ve hak ediş sözleşmeniz tamamen hak edilmişse, hak ediş sözleşmenizi Katman2'ye transfer etmemelisiniz. Bunun yerine, bir Katman2 cüzdan adresi belirlemek için transfer araçlarını kullanabilir ve stake'inizi veya delegasyonunuzu doğrudan Katman2'deki bu normal cüzdana transfer edebilirsiniz. + +Bu, stake'inizi veya delegasyonunuzu herhangi bir Katman2 adresine transfer etmenize olanak tanır. + +### Hak ediş sözleşmem hala devam ediyor. Hak ediş sözleşmesi bakiyemi Katman2'ye nasıl transfer edebilirim? + +Bu adımlar yalnızca sözleşmeniz hala devam ediyorsa veya bu süreci daha önce sözleşmeniz hala devam ederken kullandıysanız geçerlidir. + +Hak ediş sözleşmenizi Katman2'ye transfer etmek için, Katman2 hak ediş sözleşmenizi başlatacak olan transfer araçlarını kullanarak herhangi bir GRT bakiyesini Katman2'ye göndereceksiniz: + +1. Transfer aracı sözleşmesine bir miktar ETH yatırın (bu Katman2 gas'ını ödemek için kullanılacaktır) + +2. Hak ediş sözleşmesine protokol erişimini kaldırın (bir sonraki adım için gereklidir) + +3. Hak ediş sözleşmesine protokol erişimi verin (sözleşmenizin transfer aracıyla etkileşime girmesini sağlayacaktır) + +4. Bir Katman2 faydalanıcı adresi\* belirleyin ve Ethereum ana ağında bakiye transferini başlatın + +5. Onaylanması için 20 dakika bekleyin + +6. Katman2'de bakiye transferini onaylayın + +\*Gerekliyse - yani bir sözleşme adresi kullanıyorsanız. + +\*\*\*\*Arbitrum üzerinde bakiye transferinizi tamamlamak için işlemi onaylamanız gerekir. Bu adım 7 gün içinde tamamlanmalıdır, aksi takdirde bakiyeniz kaybolabilir.Çoğunlukla, bu adım otomatik olarak çalışacaktır, ancak Arbitrum'da gas fiyatlarında bir artış varsa manuel bir onay gerekebilir. Bu süreç sırasında herhangi bir sorun yaşanırsa, yardımcı olacak kaynaklar olacaktır: support@thegraph.com veya [Discord](https://discord.gg/graphprotocol) üzerinden destek ile iletişime geçin. + +### Hak ediş sözleşmem 0 GRT gösteriyor, bu yüzden transfer edemiyorum, bunun nedeni nedir ve nasıl düzeltebilirim? + +Katman2 hak ediş sözleşmenizi başlatmak için sıfırdan farklı bir GRT miktarını Katman2'ye transfer etmeniz gerekir. Bu, Katman2 Transfer Araçları tarafından kullanılan Arbitrum GRT köprüsü nedeniyle gereklidir. GRT, hak ediş sözleşmesinin bakiyesinden gelmelidir, bu nedenle stake edilmiş veya delege edilmiş GRT'yi kapsamaz. + +Hak ediş sözleşmesinden tüm GRT'nizi stake veya delege ettiyseniz, başka bir yerden (örneğin başka bir cüzdandan veya borsadan) hak ediş sözleşmesi adresine manuel olarak 1 GRT gibi cüzi bir miktar gönderebilirsiniz. + +### Stake veya delegasyon için bir hak ediş sözleşmesi kullanıyorum, ancak stake'imi veya delegasyonumu Katman2'ye transfer etmek için bir düğme göremiyorum, ne yapmalıyım? + +Eğer hak ediş sözleşmenizin vade süresi dolmadıysa, öncelikle Katman2 stake veya delegasyonunuzu alacak olan bir hak ediş sözleşmesi oluşturmanız gerekecek. Bu hak ediş sözleşmesi, vade süresinin sonuna kadar Katman2'de tokenlerin serbest bırakılmasına izin vermeyecektir, ancak GRT'yi Katman1 hak ediş sözleşmesine geri transfer etmenize izin verecektir ve bu sayede tokenleriniz orada serbest bırakılabilir. + +Gezgin üzerindeki hak ediş sözleşmesine bağlandığınızda, Katman2 hak ediş sözleşmenizi başlatmak için bir düğme görmelisiniz. İlk olarak bu süreci takip edin, ardından profilinizde stake veye delegasyonunuzu transfer etmeye yarayan düğmeleri göreceksiniz. + +### Katman2 hak ediş sözleşmemi başlatırsam, bu aynı zamanda delegasyonumu otomatik olarak katman2'ye transfer edecek mi? + +Hayır, hak ediş sözleşmesinden stake veya delegasyon transfer etmek için Katman2 hak ediş sözleşmenizi başlatmak bir ön koşuldur, ancak yine de bunları ayrı olarak transfer etmeniz gerekmektedir. + +Katman2 hak ediş sözleşmenizi başlattıktan sonra profilinizde stake'nizi veya delegasyonunuzu transfer etmenizi isteyen bir banner göreceksiniz. + +### Hak ediş sözleşmemi Katman1'e geri taşıyabilir miyim? + +Hak ediş sözleşmeniz hala Katman1'de olduğu için bunu yapmanıza gerek yoktur. Transfer araçlarını kullandığınızda, Katman2'de Katman1 hak ediş sözleşmenize bağlı yeni bir sözleşme oluşturursunuz ve ikisi arasında GRT gönderip alabilirsiniz. + +### Neden başlangıçta hak ediş sözleşmemi taşımam gerekiyor? + +Bu hesabın Katman2'deki stake veya delegasyonunuza sahip olabilmesi için bir Katman2 hak ediş sözleşmesi oluşturmanız gerekir. Aksi takdirde, hak ediş sözleşmesinden " kurtulmadan " stake'i / delegasyonu Katman2'ye transfer etmenizin bir yolu olmaz. + +### Sözleşmem sadece kısmen hak edilmişken nakde çevirmeye çalışırsam ne olur? Bu mümkün müdür? + +Bu mümkün değildir. Fonları Katman1'e geri taşıyabilir ve oradan çekebilirsiniz. + +### Hak ediş sözleşmemi Katman2'ye taşımak istemezsem ne olur? + +Katman1'de stake etme/delegasyon yapmaya devam edebilirsiniz. Zamanla, protokol Arbitrum'da ölçeklendikçe ödülleri orada etkinleştirmek için Katman2'ye geçmeyi düşünebilirsiniz. Bu transfer araçlarının protokolde stake etme ve delege etmeye izin verilen hakediş sözleşmeleri için olduğunu unutmayın. Sözleşmeniz stake etmeye veya delege etmeye izin vermiyorsa yada iptal edilebilirse, transfer aracı kullanılamaz. Kullanılabilir olduğunda GRT'nizi Katman1'den çekmeye devam edebileceksiniz. diff --git a/website/pages/tr/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/tr/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..045263636ff7 100644 --- a/website/pages/tr/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/tr/arbitrum/l2-transfer-tools-guide.mdx @@ -1,165 +1,165 @@ --- -title: L2 Transfer Tools Guide +title: Katman2 Transfer Araçları Rehberi --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +Graph, Arbitrum One üzerinde Katman2'ye geçişi kolaylaştırmıştır. Her protokol katılımcısı için, tüm ağ katılımcıları adına Katman2'ye transferi sorunsuz hale getirmek için bir dizi Katman2 Transfer Aracı vardır. Bu araçlar, ne transfer ettiğinize bağlı olarak belirli bir dizi adımı izlemenizi gerektirecektir. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Bu araçlarla ilgili sıkça sorulan bazı sorular [Katman2 Transfer Araçları SSS](/arbitrum/l2-transfer-tools-faq) bölümünde yanıtlanmaktadır. SSS, araçların nasıl kullanılacağı, nasıl çalıştıkları ve kullanırken akılda tutulması gerekenler hakkında derinlemesine açıklamalar içermektedir. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Subgraph'ınızı Arbitrum'a nasıl transfer edebilirsiniz (Katman2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Subgraphlar'ınızı transfer etmenin faydaları -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +Graph topluluğu ve çekirdek geliştiricileri geçtiğimiz yıl boyunca Arbitrum'a geçmek için [hazırlanıyordu] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305). Bir katman 2 veya "L2" blok zinciri olan Arbitrum, güvenliği Ethereum'dan devralmakla birlikte büyük ölçüde daha düşük gaz ücretleri sağlamaktadır. -When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. +Subgraph'ınızı Graph Ağı'nda yayınladığınızda veya yükselttiğinizde, protokol üzerindeki akıllı sözleşmelerle etkileşime girersiniz ve bu ETH kullanarak gas ödemesi yapmayı gerektirir. Subgraphlar'ınızı Arbitrum'a taşıdığınızda, gelecekte subgraphlar'ınızda yapılacak tüm güncellemeler çok daha düşük gas ücretleri gerektirecektir. Daha düşük ücretler ve Katman2'deki kürasyon bağlanma eğrilerinin sabit olması, diğer Küratörlerin subgraph'ınızda kürasyon yapmasını kolaylaştırır ve subgraph'ınızdaki İndeksleyiciler için ödülleri artırır. Bu düşük maliyetli ortam, İndeksleyicilerin subgraph'ınızı indekslemesini ve hizmet vermesini de daha ucuz hale getirmektedir.. Önümüzdeki aylarda İndeksleme ödülleri Arbitrum'da artacak ve Ethereum ana ağında azalacaktır, bu nedenle gittikçe daha fazla İndeksleyici mevcut stake'lerini transfer edecek ve operasyonlarını Katman2'de başlatacaktır. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Bir subgraph'ı Arbitrum'a transfer etmek için Arbitrum GRT köprüsü kullanılmaktadır, bu köprüde subgraph'ı Katman2'ye göndermek için yerel Arbitrum köprüsünü kullanır. "transfer", ana ağdaki subgraph'ı kullanımdan kaldıracak ve köprüyü kullanarak Katman2'de subgraph'ı yeniden oluşturmak için bilgi gönderecektir. Aynı zamanda, köprünün transferi kabul etmesi için subgraph sahibinin sinyallenmiş GRT'sini de dahil edecektir ve bu değer sıfırdan büyük olmalıdır. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Subgraph transfer etmeyi seçtiğinizde, bu, subgraph'ın tüm kürasyon sinyalini GRT'ye dönüştürecektir. Bu, ana ağdaki subgraph'ı "kullanımdan kaldırmaya" eşdeğerdir. Kürasyonunuza karşılık gelen GRT, subgraphla birlikte Katman2'ye gönderilecek ve burada sizin adınıza sinyal basmak için kullanılacaktır. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Diğer Küratörler, GRT tokenlerinin bir bölümünü geri çekmeyi ya da aynı subgraph üzerinde sinyal basmak için Katman2'ye transfer etmeyi tercih edebilirler. Bir subgraph sahibi subgraph'ını Katman2'ye transfer edemezse ve bir sözleşme çağrısı yoluyla manuel olarak kullanımdan kaldırırsa, Küratörler bilgilendirilecek ve kürasyonlarını geri çekebileceklerdir. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Subgraph transfer edilir edilmez, tüm kürasyon GRT'ye dönüştürüldüğünden, İndeksleyiciler artık subgraph'ı indekslemek için ödül almayacaktır. Ancak, 1) aktarılan subgraphlar'ı 24 saat boyunca sunmaya devam edecek ve 2) hemen Katman2'de subgraph'ı indekslemeye başlayacak İndeksleyiciler olacaktır. Bu İndeksleyiciler subgraph'ı zaten indekslediğinden, subgraph'ın senkronize olmasını beklemeye gerek kalmayacak ve Katman2 subgraph'ını neredeyse anında sorgulamak mümkün olacaktır. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Katman2 subgraph'ına yönelik sorgular farklı bir URL üzerinden yapılmalıdır (arbitrum-gateway.thegraph.com). Ancak Katman1 URL'si en az 48 saat boyunca çalışmaya devam edecektir. Bu sürenin ardından, Katman1 ağ geçidi sorguları (bir süre için) Katman2 ağ geçidine iletecektir, fakat bu gecikmeye neden olacağından ötürü mümkün olan en kısa sürede tüm sorgularınızı yeni URL'ye geçirmeniz önerilir. -## Choosing your L2 wallet +## Katman2 cüzdanınızın seçimi -When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. +Subgraph'ınızı ana ağ üzerinde yayınladığınızda, subgraph'ı oluşturmak için bağlı bir cüzdan kullandınız ve bu cüzdan, bu subgraph'ı temsil eden ve güncellemeleri yayınlamanıza izin veren NFT'nin sahibidir. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +Subgraph'ı Arbitrum'a transfer ederken, Katman2 üzerinde bu subgraph NFT'ye sahip olacak farklı bir cüzdan seçebilirsiniz. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +MetaMask gibi "genel" bir cüzdan (Harici Olarak Sahip Olunan Hesap veya EOA, yani akıllı sözleşme olmayan bir cüzdan) kullanıyorsanız, bu opsiyoneldir ve Katman1'deki ile aynı sahip adresini kullanmanız önerilir. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Çoklu imza (örneğin Safe) gibi bir akıllı sözleşme cüzdanı kullanıyorsanız, farklı bir Katman2 cüzdan adresi seçmek zorunludur, çünkü büyük olasılıkla bu hesap yalnızca ana ağ üzerinde kullanılabilir ve bu cüzdanı kullanarak Arbitrum'da işlem yapamazsınız. Bir akıllı sözleşme cüzdanı veya çoklu imza cüzdanı kullanmaya devam etmek istiyorsanız, Arbitrum'da yeni bir cüzdan oluşturun ve adresini subgraph'ınızın Katman2 sahibi olarak kullanın. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +**Sizin kontrolünüzde ve Arbitrum üzerinde işlem yapabilen bir cüzdan adresi kullanmak oldukça önemlidir. Aksi takdirde, subgraph kaybolacak ve kurtarılamayacaktır.** -## Preparing for the transfer: bridging some ETH +## Transfer için hazırlık: Bir miktar ETH köprüleme -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Subgraph'ın transfer edilmesi, köprü üzerinden bir işlemin gönderilmesini ve ardından Arbitrum'da başka bir işlemin yürütülmesini içermektedir. İlk işlem ana ağda ETH kullanır ve mesaj Katman2'de alındığında gas için ödeme yapmak üzere bir miktar ETH içerir. Ancak, bu gas yetersizse, işlemi yeniden denemeniz ve gas için doğrudan Katman2'de ödeme yapmanız gerekecektir (bu, aşağıdaki "Adım 3: Transferi onaylama" dır). Bu adım **transferin başlamasından sonraki 7 gün içinde gerçekleştirilmelidir**. Ayrıca, ikinci işlem ("Adım 4: Katman2'de transferin tamamlanması") doğrudan Arbitrum'da gerçekleştirilecektir. Bu nedenlerden dolayı, Arbitrum cüzdanında bir miktar ETH'ye ihtiyacınız olacak. Bir çoklu imzalı veya akıllı sözleşme hesabı kullanıyorsanız, ETH'nin çoklu imza değil, işlemleri gerçekleştirmek için kullandığınız normal harici hesap (EOA) cüzdanında olması gerekecektir. -You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. +Bazı borsalardan ETH satın alabilir ve doğrudan Arbitrum'a çekebilir veya bir ana ağ cüzdanından Katman2'ye ETH göndermek için Arbitrum köprüsünü kullanabilirsiniz: [bridge.arbitrum.io](http://bridge.arbitrum.io). Arbitrum'daki gas ücretleri daha düşük olduğundan, yalnızca küçük bir miktara ihtiyacınız olacaktır. İşleminizin onaylanması için düşük bir eşikten (ör. 0.01 ETH) başlamanız önerilir. -## Finding the subgraph Transfer Tool +## Subgraph Transfer Aracını bulma -You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: +Subgraph Stüdyo'da subgraph'ınızın sayfasına bakarak Katman2 Transfer Aracını bulabilirsiniz: ![transfer tool](/img/L2-transfer-tool1.png) -It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: +Ayrıca, bir subgraph'ın sahibi olan cüzdana bağlıysanız Gezgin'de ve Gezgin'deki subgraph'ın sayfasında da bulunmaktadır: ![Transferring to L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +Katman2'ye Transfer düğmesine tıkladığınızda transfer işlemini başlatabileceğiniz transfer aracı açılacaktır. -## Step 1: Starting the transfer +## Adım 1: Transferin başlatılması -Before starting the transfer, you must decide which address will own the subgraph on L2 (see "Choosing your L2 wallet" above), and it is strongly recommend having some ETH for gas already bridged on Arbitrum (see "Preparing for the transfer: bridging some ETH" above). +Transfere başlamadan önce, Katman2'de hangi adresin subgraph'a sahip olacağına karar vermelisiniz (yukarıdaki "Katman2 cüzdanınızın seçimi" bölümüne bakın) ve Arbitrum'da halihazırda köprülenmiş gas için kullanacağınız bir miktar ETH bulundurmanız şiddetle tavsiye edilir (yukarıdaki "Transfer için hazırlık: Bir miktar ETH köprüleme" bölümüne bakın). -Also please note transferring the subgraph requires having a nonzero amount of signal on the subgraph with the same account that owns the subgraph; if you haven't signaled on the subgraph you will have to add a bit of curation (adding a small amount like 1 GRT would suffice). +Ayrıca, subgraph'ın sahibi olan hesabın bir subgraph transferi gerçekleştirebilmesi için ilgili subgraph üzerinde belirli bir sinyale sahip olması gerektiğini göz önünde bulundurun; eğer subgraph üzerinde sinyal vermediyseniz, biraz kürasyon eklemeniz gerekecektir (1 GRT gibi küçük bir miktar eklemek yeterli olacaktır). -After opening the Transfer Tool, you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Subgraph will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer and deprecate your L1 subgraph (see "Understanding what happens with signal, your L1 subgraph and query URLs" above for more details on what goes on behind the scenes). +Transfer Aracını açtıktan sonra, Katman2 cüzdan adresini "Alıcı cüzdan adresi" alanına girebileceksiniz - **buraya doğru adresi girdiğinizden emin olun**. Subgraph'ı Transfer Et'e tıkladığınızda, cüzdanınızda işlemi gerçekleştirmeniz istenecektir (Katman2 gas'ı için ödeme yapmak üzere bir miktar ETH'nin dahil edildiğini unutmayın); bu, transferi başlatacak ve Katman1 subgraph'ınızı kullanımdan kaldıracaktır (perde arkasında neler olup bittiğine ilişkin daha fazla ayrıntı için yukarıdaki "Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama" bölümüne bakın). -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Bu adımı uygularsanız, **3. adımı tamamlamak için yedi günden daha kısa bir sürede ilerlediğinizden mutlaka emin olmalısınız; aksi halde subgraph ve sinyal GRT'nizi kaybedeceksiniz.** Bunun nedeni Arbitrum'da Katman1-Katman2 mesajlaşmasının çalışma şeklidir: köprü üzerinden gönderilen mesajlar 7 gün içinde yürütülmesi gereken "yeniden denenebilir biletler"dir ve Arbitrum'da gas fiyatında ani artışlar olması durumunda ilk yürütmenin yeniden denenmesi gerekebilir. ![Start the trnasfer to L2](/img/startTransferL2.png) -## Step 2: Waiting for the subgraph to get to L2 +## Adım 2: Subgraph'ın Katman2'ye ulaşmasını bekleme -After you start the transfer, the message that sends your L1 subgraph to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Transferi başlattıktan sonra, Katman1 subgraph'ınızı Katman2'ye gönderen mesajın Arbitrum köprüsü üzerinden yayılması gerekir. Bu işlem yaklaşık 20 dakika sürer (köprü, işlemi içeren ana ağ bloğunun olası zincir yeniden düzenlemelerine karşı "güvenli" olmasını bekler). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Bu bekleme süresi sona erdiğinde Arbitrum, Katman2 sözleşmelerinde transferi otomatik olarak yürütmeye çalışacaktır. ![Wait screen](/img/screenshotOfWaitScreenL2.png) -## Step 3: Confirming the transfer +## Adım 3: Transferi onaylama -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the subgraph on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your subgraph to L2 will be pending and require a retry within 7 days. +Çoğu durumda, bu adım otomatik olarak yürütülecektir çünkü 1. adımda yer alan Katman2 gas'ı Arbitrum sözleşmelerinde subgraph'ı içeren işlemi yürütmek için yeterli olacaktır. Ancak bazı durumlarda, Arbitrum'daki gas fiyatlarındaki bir artış bu otomatik yürütmenin başarısız olmasına neden olabilir. Bu durumda, subgraph'ınızı Katman2'ye gönderen "bilet" beklemede olacak ve 7 gün içinde yeniden denenmesi gerekecektir. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Durum buysa, Arbitrum'da bir miktar ETH bulunan bir Katman2 cüzdanı bağlanmanız, cüzdan ağınızı Arbitrum'a geçirmeniz ve işlemi yeniden denemek için "Transferi Onayla" seçeneğine tıklamanız gerekecektir. ![Confirm the transfer to L2](/img/confirmTransferToL2.png) -## Step 4: Finishing the transfer on L2 +## Adım 4: Katman2'de transferin tamamlanması -At this point, your subgraph and GRT have been received on Arbitrum, but the subgraph is not published yet. You will need to connect using the L2 wallet that you chose as the receiving wallet, switch your wallet network to Arbitrum, and click "Publish Subgraph." +Bu noktada, subgraph'ınız ve GRT'niz Arbitrum'a ulaşmıştır, ancak subgraph henüz yayınlanmamıştır. Alıcı cüzdan olarak seçtiğiniz Katman2 cüzdanını bağlanmanız, cüzdan ağınızı Arbitrum'a geçirmeniz ve "Subgraph'ı Yayınla" seçeneğine tıklamanız gerekecektir. ![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) ![Wait for the subgraph to be published](/img/waitForSubgraphToPublishL2TransferTools.png) -This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. +Bu, Arbitrum üzerinde çalışan İndeksleyicilerin hizmet vermeye başlayabilmesi için subgraph'ı yayınlayacaktır. Ayrıca Katman1'den aktarılan GRT'yi kullanarak kürasyon sinyalini de basacaktır. -## Step 5: Updating the query URL +## Adım 5: Sorgu URL'sini güncelleme -Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : +Subgraph'ınız Arbitrum'a başarıyla transfer edildi! Subgraph'ı sorgulamak için yeni URL şu şekilde olacaktır: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -Note that the subgraph ID on Arbitrum will be a different than the one you had on mainnet, but you can always find it on Explorer or Studio. As mentioned above (see "Understanding what happens with signal, your L1 subgraph and query URLs") the old L1 URL will be supported for a short while, but you should switch your queries to the new address as soon as the subgraph has been synced on L2. +Arbitrum'daki subgraph kimliğinin ana ağda sahip olduğunuzdan farklı olacağını unutmayın, ancak bunu her zaman Gezgin veya Stüdyo aracılığıyla bulabilirsiniz. Yukarıda belirtildiği gibi ("Sinyal, Katman1 subgraph'ınız ve sorgu URL'leri ile neler gerçekleştiğini anlama" bölümüne bakın) eski Katman1 URL'si kısa bir süre için desteklenecektir, ancak subgraph Katman2'de senkronize edilir edilmez sorgularınızı yeni adrese geçirmelisiniz. -## How to transfer your curation to Arbitrum (L2) +## Kürasyonunuzu Arbitrum'a nasıl transfer edebilirsiniz (Katman2) -## Understanding what happens to curation on subgraph transfers to L2 +## Katman2'ye subgraph transferlerinde kürasyona ne olduğunu anlama -When the owner of a subgraph transfers a subgraph to Arbitrum, all of the subgraph's signal is converted to GRT at the same time. This applies to "auto-migrated" signal, i.e. signal that is not specific to a subgraph version or deployment but that follows the latest version of a subgraph. +Bir subgraph'ın sahibi subgraph'ı Arbitrum'a transfer ettiğinde, subgrpah'ın tüm sinyali aynı anda GRT'ye dönüştürülür. Bu, "otomatik olarak taşınan" sinyal, yani bir subgraph sürümüne veya dağıtımına özgü olmayan ancak bir subgraph'ın en son sürümünü takip eden sinyal için geçerlidir. -This conversion from signal to GRT is the same as what would happen if the subgraph owner deprecated the subgraph in L1. When the subgraph is deprecated or transferred, all curation signal is "burned" simultaneously (using the curation bonding curve) and the resulting GRT is held by the GNS smart contract (that is the contract that handles subgraph upgrades and auto-migrated signal). Each Curator on that subgraph therefore has a claim to that GRT proportional to the amount of shares they had for the subgraph. +Sinyalden GRT'ye bu dönüşüm, subgraph sahibinin subgraph'ı Katman1'de kullanımdan kaldırması durumunda gerçekleşecek olanla aynıdır. Subgraph kullanımdan kaldırıldığında veya transfer edildiğinde, tüm kürasyon sinyali aynı anda "yakılır" (kürasyon bağlanma eğrisi kullanılarak) ve ortaya çıkan GRT, GNS akıllı sözleşmesi (yani subgraph yükseltmelerini ve otomatik olarak taşınan sinyali işleyen sözleşme) tarafından tutulur. Bu nedenle, bu subgraph'daki her Küratör, subgraph için sahip oldukları stake miktarıyla orantılı olarak GRT üzerinde hak iddia eder. -A fraction of these GRT corresponding to the subgraph owner is sent to L2 together with the subgraph. +Bu GRT tokenlerin subgraph sahibine ilişkin bir bölümü, subgraph ile birlikte Katman2'ye iletilir. -At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. +Bu noktada, küratörlüğü yapılan GRT daha fazla sorgu ücreti biriktirmeyecektir, bu nedenle Küratörler GRT'lerini geri çekmeyi veya yeni kürasyon sinyali basmak için kullanılabilecekleri Katman2'deki aynı subgraph'a transfer etmeyi seçebilirler. GRT süresiz bir şekilde kullanılabileceğinden ve ne zaman yaptıklarına bakılmaksızın herkes paylarıyla orantılı bir miktar alacağından bunu yapmak için acele etmeye gerek yoktur. -## Choosing your L2 wallet +## Katman2 cüzdanınızın seçimi -If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. +Küratörlüğünü yaptığınız GRT'yi Katman2'ye transfer etmeye karar verirseniz, Katman2'deki küratörlük sinyaline sahip olacak farklı bir cüzdan seçebilirsiniz. -If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. +Metamask gibi "genel" bir cüzdan (Harici Olarak Sahip Olunan Hesap veya EOA, yani akıllı sözleşme olmayan bir cüzdan) kullanıyorsanız, bu opsiyoneldir ve Katman1'deki ile aynı Küratör adresini kullanmanız önerilir. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Çoklu imza (örneğin Safe) gibi bir akıllı sözleşme cüzdanı kullanıyorsanız, farklı bir Katman2 cüzdan adresi seçmek zorunludur, çünkü büyük olasılıkla bu hesap yalnızca ana ağ üzerinde kullanılabilir ve bu cüzdanı kullanarak Arbitrum'da işlem yapamazsınız. Bir akıllı sözleşme cüzdanı veya çoklu imza cüzdanı kullanmaya devam etmek istiyorsanız, Arbitrum'da yeni bir cüzdan oluşturun ve adresini Katman2 alıcı adresiniz olarak kullanın. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** +**Sizin kontrolünüzde ve Arbitrum üzerinde işlem yapabilen bir cüzdan adresi kullanmak oldukça önemlidir. Aksi takdirde, kürasyon kaybolacak ve kurtarılamayacaktır.** -## Sending curation to L2: Step 1 +## Katman2'ye kürasyon gönderme: Adım 1 -Before starting the transfer, you must decide which address will own the curation on L2 (see "Choosing your L2 wallet" above), and it is recommended having some ETH for gas already bridged on Arbitrum in case you need to retry the execution of the message on L2. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io) - since gas fees on Arbitrum are so low, you should only need a small amount, e.g. 0.01 ETH will probably be more than enough. +Transfere başlamadan önce, Katman2'deki kürasyonun hangi adrese ait olacağına karar vermelisiniz (yukarıdaki "Katman2 cüzdanınızın seçinmi" bölümüne bakın) ve mesajın Katman2'de yürütülmesini yeniden denemeniz gerektiğinde Arbitrum'da zaten köprülenmiş gas için kullanabileceğiniz bir miktar ETH bulundurmanız önerilir. Bazı borsalardan ETH satın alabilir ve doğrudan Arbitrum'a çekebilir veya bir ana ağ cüzdanından Katman2'ye ETH göndermek için Arbitrum köprüsünü kullanabilirsiniz: [bridge.arbitrum.io](http://bridge.arbitrum.io) - Arbitrum'daki gas ücretleri çok düşük olduğundan, yalnızca küçük bir miktara ihtiyacınız olacak, örneğin 0.01 ETH muhtemelen fazlasıyla yeterli olacaktır. -If a subgraph that you curate to has been transferred to L2, you will see a message on Explorer telling you that you're curating to a transferred subgraph. +Küratörlüğünü yaptığınız bir subgraph Katman2'ye transfer edilmişse, Gezgin'de transfer edilmiş bir subgraph'a küratörlük yaptığınızı belirten bir mesaj göreceksiniz. -When looking at the subgraph page, you can choose to withdraw or transfer the curation. Clicking on "Transfer Signal to Arbitrum" will open the transfer tool. +Subgraph sayfasına bakarken, kürasyonu geri çekmeyi veya transfer etmeyi seçebilirsiniz. "Sinyali Arbitrum'a Transfer Et" seçeneğine tıkladığınızda transfer aracı açılacaktır. ![Transfer signal](/img/transferSignalL2TransferTools.png) -After opening the Transfer Tool, you may be prompted to add some ETH to your wallet if you don't have any. Then you will be able to input the L2 wallet address into the "Receiving wallet address" field - **make sure you've entered the correct address here**. Clicking on Transfer Signal will prompt you to execute the transaction on your wallet (note some ETH value is included to pay for L2 gas); this will initiate the transfer. +Transfer Aracını açtıktan sonra, eğer hiç ETH'niz yoksa cüzdanınıza bir miktar ETH eklemeniz istenebilir. Daha sonra, katman2 cüzdan adresini "Alıcı cüzdan adresi" alanına girebileceksiniz - **buraya doğru adresi girdiğinizden emin olun**. Sinyal'i Transfer Et'e tıkladığınızda, cüzdanınızda işlemi gerçekleştirmeniz istenecektir (Katman2 gas'ı için ödeme yapmak üzere bir miktar ETH'nin dahil edildiğini unutmayın); bu, transferi başlatacaktır. -If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retryable tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. +Bu adımı uygularsanız, **3. adımı tamamlamak için yedi günden daha kısa bir sürede ilerlediğinizden mutlaka emin olmalısınız; aksi halde sinyal GRT'nizi kaybedeceksiniz.** Bunun nedeni Arbitrum'da Katman1-Katman2 mesajlaşmasının çalışma şeklidir: köprü üzerinden gönderilen mesajlar 7 gün içinde yürütülmesi gereken "yeniden denenebilir biletler"dir ve Arbitrum'da gas fiyatında ani artışlar olması durumunda ilk yürütmenin yeniden denenmesi gerekebilir. -## Sending curation to L2: step 2 +## Katman2'ye kürasyon gönderme: Adım 2 -Starting the transfer: +Transferin başlatılması: ![Send signal to L2](/img/sendingCurationToL2Step2First.png) -After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). +Transferi başlattıktan sonra, Katman1 kürasyon'unuzu Katman2'ye gönderen mesajın Arbitrum köprüsü üzerinden yayılması gerekir. Bu işlem yaklaşık 20 dakika sürer (köprü, işlemi içeren ana ağ bloğunun olası zincir yeniden düzenlemelerine karşı "güvenli" olmasını bekler). -Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. +Bu bekleme süresi sona erdiğinde Arbitrum, Katman2 sözleşmelerinde transferi otomatik olarak yürütmeye çalışacaktır. ![Sending curation signal to L2](/img/sendingCurationToL2Step2Second.png) -## Sending curation to L2: step 3 +## Katman2'ye kürasyon gönderme: Adım 3 -In most cases, this step will auto-execute as the L2 gas included in step 1 should be sufficient to execute the transaction that receives the curation on the Arbitrum contracts. In some cases, however, it is possible that a spike in gas prices on Arbitrum causes this auto-execution to fail. In this case, the "ticket" that sends your curation to L2 will be pending and require a retry within 7 days. +Çoğu durumda, bu adım otomatik olarak yürütülecektir çünkü 1. adımda yer alan Katman2 gas'ı Arbitrum sözleşmelerinde kürasyon'u içeren işlemi yürütmek için yeterli olacaktır. Ancak bazı durumlarda, Arbitrum'daki gas fiyatlarındaki bir artış bu otomatik yürütmenin başarısız olmasına neden olabilir. Bu durumda, kürasyon'unuzu Katman2'ye gönderen "bilet" beklemede olacak ve 7 gün içinde yeniden denenmesi gerekecektir. -If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. +Durum buysa, Arbitrum'da bir miktar ETH bulunan bir Katman2 cüzdanı bağlanmanız, cüzdan ağınızı Arbitrum'a geçirmeniz ve işlemi yeniden denemek için "Transferi Onayla" seçeneğine tıklamanız gerekecektir. ![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) -## Withdrawing your curation on L1 +## Katman1'deki kürasyonunuzu çekme -If you prefer not to send your GRT to L2, or you'd rather bridge the GRT manually, you can withdraw your curated GRT on L1. On the banner on the subgraph page, choose "Withdraw Signal" and confirm the transaction; the GRT will be sent to your Curator address. +GRT'nizi Katman2'ye göndermek istemiyorsanız veya manuel olarak köprülemeyi tercih ediyorsanız, Katman1'de kürasyonu gerçekleşmiş GRT'lerinizi çekebilirsiniz. Subgraph sayfasındaki afişte "Sinyali Çek" seçeneğini seçin ve işlemi onaylayın; GRT, Küratör adresinize gönderilecektir. diff --git a/website/pages/tr/billing.mdx b/website/pages/tr/billing.mdx index 3c21e5de1cdc..8e8723f7d8de 100644 --- a/website/pages/tr/billing.mdx +++ b/website/pages/tr/billing.mdx @@ -1,158 +1,208 @@ --- -title: Billing +title: Faturalandırma --- -> Invoices are generated on a weekly basis. +> Faturalar haftalık olarak oluşturulur. -There are two options for paying for your query fees: +Sorgu ücretlerinizi ödemek için iki seçeneğiniz vardır: -- [Paying with fiat currency with Banxa](#billing-with-banxa) -- [Paying with crypto wallet](#billing-on-arbitrum) +- [Banxa üzerinden fiat para birimi ile ödeme](#billing-with-banxa) +- [Kripto cüzdanı ile ödeme](#billing-on-arbitrum) -## Billing with Banxa +## Banxa ile Faturalandırma -Banxa enables you to bypass the need for an exchange and pay for your query fees using the fiat currency of your choice. The fiat currency will be converted to GRT, added to your account balance on the billing contract, and used to pay for queries associated with your API keys. +Banxa, takas ihtiyacını ortadan kaldırmanıza ve seçtiğiniz fiat para birimini kullanarak sorgu ücretlerinizi ödemenize olanak tanır. Fiat para birimi GRT'ye dönüştürülecek, faturalandırma sözleşmesindeki hesap bakiyenize eklenecek ve API anahtarlarınızla ilişkili sorgular için ödeme yapmak için kullanılacaktır. -There may be KYC requirements depending on the regulations in your country. For more information about KYC, please visit [Banxa's FAQ page](https://docs.banxa.com/docs/faqs). +Ülkenizdeki düzenlemelere bağlı olarak KYC gereklilikleri olabilir. KYC hakkında daha fazla bilgi için lütfen [Banxa'nın SSS sayfasını](https://docs.banxa.com/docs/faqs) ziyaret edin. -You can learn more about Banxa by reading their [documentation](https://docs.banxa.com/docs). +[Burada yer alan dökümanları](https://docs.banxa.com/docs) okuyarak Banxa hakkında daha fazla bilgi edinebilirsiniz. -### Paying for query fees with Banxa +### Banxa ile sorgu ücretlerinin ödenmesi -1. Select “Pay with Card” option in [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit). -2. Enter the amount of GRT to be added to your account balance. -3. Click the 'Continue with Banxa' button. -4. Enter necessary banking information on Banxa including payment method & fiat currency of choice. -5. Finish the transaction. +1. [Subgraph Studio](https://thegraph.com/studio/billing/?show=Deposit)'da “Kartla Öde” seçeneğini seçin. +2. Hesap bakiyenize eklenecek GRT tutarını giriniz. +3. "Banxa ile Devam Et" düğmesine tıklayın. +4. Ödeme yöntemi & tercih edilen fiat para birimi dahil olmak üzere Banxa'da gerekli bankacılık bilgilerini girin. +5. İşlemi tamamlayın. -It may take up to 10 minutes to complete the transaction. Once the transaction is confirmed, the purchased GRT will automatically be added to your account balance on Arbitrum. +İşlemin tamamlanması 10 dakika kadar sürebilir. İşlem onaylandıktan sonra satın alınan GRT, Arbitrum'daki hesap bakiyenize otomatik olarak eklenecektir. -## Billing on Arbitrum +## Arbitrum'da Faturalandırma -While The Graph protocol operates on Ethereum Mainnet, [the billing contract](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a) lives on the [Arbitrum](https://arbitrum.io/) network to reduce transaction times and cost. You'll be required to pay the query fees generated from your API keys. Using the billing contract, you'll be able to: +Graph protokolü Ethereum Mainnet üzerinde çalışırken [faturalandırma sözleşmesi](https://arbiscan.io/address/0x1b07d3344188908fb6deceac381f3ee63c48477a), işlem sürelerini ve maliyeti azaltmak için [Arbitrum](https://arbitrum.io/) ağında yaşar. API anahtarlarınızdan oluşturulan sorgu ücretlerini ödemeniz istenecektir. Faturalandırma sözleşmesini kullanarak şunları yapabileceksiniz: -- Add and withdraw GRT from your account balance. -- Keep track of your balances based on how much GRT you have added to your account balance, how much you have removed, and your invoices. -- Automatically pay invoices based on query fees generated, as long as there is enough GRT in your account balance. +- Hesap bakiyenizden GRT ekleyin ve çekin. +- Hesap bakiyenize ne kadar GRT eklediğiniz, ne kadar çektiğiniz ve faturalarınıza göre bakiyelerinizi takip edin. +- Hesap bakiyenizde yeterli GRT olduğu sürece, oluşturulan sorgu ücretlerine göre faturaları otomatik olarak ödeyin. -### Adding GRT using a crypto wallet +### Kripto cüzdanı kullanarak GRT ekleme -> This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). + -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +> Bu bölüm, kripto cüzdanınızda hali hazırda GRT olduğu ve Ethereum ana ağında olduğunuz varsayılarak yazılmıştır. GRT'niz yoksa, GRT'yi nasıl alacağınızı [buradan](#getting-grt) öğrenebilirsiniz. -2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". +Bir kripto cüzdanı kullanarak fatura bakiyenize GRT eklemeyi gösteren bir içerik arıyorsanız bu [video](https://youtu.be/4Bw2sh0FxCg)yu izleyin. -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. +1. [Subgraph Stüdyo Faturalandırma Sayfasına](https://thegraph.com/studio/billing/) gidin. -4. Enter the amount of GRT you want to add to your account balance. You can also select the maximum amount of GRT you want to add to your account balance by clicking on the "Max" button. +2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdan seçim sayfasına yönlendirileceksiniz. Cüzdanınızı seçin ve "Bağlan" a tıklayın. -5. Click 'Allow GRT Access' to allow the Subgraph Studio to access your GRT. Sign the associated transaction in your wallet. This will not cost any gas. +3. Sayfanın ortasındaki "GRT Ekle" düğmesini tıklayın. Bir yan panel görünecektir. -6. Click 'Add GRT to account balance' to add the GRT to your account balance. Sign the associated transaction in your wallet. This will cost gas. +4. Hesap bakiyenize eklemek istediğiniz GRT miktarını girin. Ayrıca "Maksimum" butonuna tıklayarak hesap bakiyenize eklemek istediğiniz maksimum GRT miktarını da seçebilirsiniz. -7. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +5. Subgraph Stüdyo'nun GRT'nize erişmesine izin vermek için "GRT Erişimine İzin Ver" seçeneğini tıklayın. İlişkili işlemi cüzdanınızda imzalayın. Bu herhangi bir gaz ücretine mal olmayacaktır. -### Withdrawing GRT using a crypto wallet +6. GRT'yi hesap bakiyenize eklemek için 'Hesap bakiyesine GRT Ekle'yi tıklayın. İlişkili işlemi cüzdanınızda imzalayın. Bu işlem gaz ücretine mal olacaktır. -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on the Arbitrum network. +7. İşlem onaylandıktan sonra, GRT'nin bir saat içinde hesap bakiyenize eklendiğini göreceksiniz. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +### Bir kripto cüzdanı kullanarak GRT'yi çekme -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +> Bu bölüm, [Subgraph Stüdyo](https://thegraph.com/studio/billing/)'da hesap bakiyenize GRT yatırdığınız ve Arbitrum ağında olduğunuz varsayılarak yazılmıştır. -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +1. [Subgraph Stüdyo Faturalandırma Sayfası](https://thegraph.com/studio/billing/)'na gidin. -4. Enter the amount of GRT you would like to withdraw. +2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdanınızı seçin ve "Bağlan"a tıklayın. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. The GRT will be sent to your Arbitrum wallet. +3. Sayfanın ortasındaki "GRT Ekle" düğmesinin yanındaki açılır menüyü tıklayın. GRT'yi çekmeyi seçin. Bir yan panel görünecektir. -6. Once the transaction is confirmed, you'll see the GRT withdrawn from your account balance in your Arbitrum wallet. +4. Çekmek istediğiniz GRT miktarını girin. -### Adding GRT using a multisig wallet +5. GRT'yi hesap bakiyenizden çekmek için 'GRT Çek'i tıklayın. İlişkili işlemi cüzdanınızda imzalayın. Bu gaza mal olacaktır. GRT, Arbitrum cüzdanınıza gönderilecektir. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +6. İşlem onaylandıktan sonra, Arbitrum cüzdanınızdaki hesap bakiyenizden GRT'nin çekildiğini göreceksiniz. -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. +### Multisig cüzdanı kullanarak GRT ekleme -3. Click the 'Add GRT' button at the center of the page. A side panel will appear. + -4. Once the transaction is confirmed, you'll see the GRT added to your account balance within an hour. +1. [Subgraph Stüdyo Faturalandırma Sayfası](https://thegraph.com/studio/billing/)'na gidin. -### Withdrawing GRT using a multisig wallet +2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdanınızı seçin ve "Bağlan"a tıklayın. [Gnosis-Safe](https://gnosis-safe.io/) kullanıyorsanız, imzalama cüzdanınızın yanı sıra multisig'inizi de bağlayabilirsiniz. Ardından ilgili mesajı imzalayın. Bu herhangi bir gaz ücretine mal olmayacak. -> This section is written assuming you have deposited GRT into your account balance on [Subgraph Studio](https://thegraph.com/studio/billing/) and that you're on Ethereum mainnet. +3. Sayfanın ortasındaki 'GRT Ekle' düğmesini tıklayın. Bir yan panel görünecektir. -1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). +4. İşlem onaylandıktan sonra, GRT'nin bir saat içinde hesap bakiyenize eklendiğini göreceksiniz. -2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". +### Multisig cüzdanı kullanarak GRT'yi çekme -3. Click the dropdown next to the 'Add GRT' button at the center of the page. Select withdraw GRT. A side panel will appear. +> Bu bölüm, [Subgraph Stüdyo](https://thegraph.com/studio/billing/) üzerindeki hesap bakiyenize GRT yatırdığınız ve Ethereum ana ağında olduğunuz varsayılarak yazılmıştır. -4. Enter the amount of GRT you would like to withdraw. Specify the receiving wallet which will receive the GRT from this transaction. The GRT will be sent to the receiving wallet on Arbitrum. +1. [Subgraph Stüdyo Faturalandırma Sayfası](https://thegraph.com/studio/billing/)'na gidin. -5. Click 'Withdraw GRT' to withdraw the GRT from your account balance. Sign the associated transaction in your wallet. This will cost gas. +2. Sayfanın sağ üst köşesindeki "Cüzdanı Bağla" düğmesine tıklayın. Cüzdanınızı seçin ve "Bağlan"a tıklayın. -6. Once the transaction is confirmed, you'll see the GRT added to your Arbitrum wallet within an hour. +3. Sayfanın ortasındaki "GRT Ekle" düğmesinin yanındaki açılır menüyü tıklayın. GRT'yi çekmeyi seçin. Bir yan panel görünecektir. -## Getting GRT +4. Çekmek istediğiniz GRT miktarını girin. Bu işlemden GRT alacak alıcı cüzdanı belirtin. GRT, Arbitrum'da alıcı cüzdana gönderilecektir. -This section will show you how to get GRT to pay for query fees. +5. GRT'yi hesap bakiyenizden çekmek için 'GRT Çek'i tıklayın. İlişkili işlemi cüzdanınızda imzalayın. Bu işlem gaz ücretine mal olacaktır. + +6. İşlem onaylandıktan sonra, bir saat içinde GRT'nin Arbitrum cüzdanınıza eklendiğini göreceksiniz. + +## GRT Almak + +Bu bölüm, sorgu ücretlerini ödemek için GRT'yi nasıl alacağınızı gösterecektir. ### Coinbase -This will be a step by step guide for purchasing GRT on Coinbase. +Bu, Coinbase'de GRT satın almak için adım adım bir kılavuz olacaktır. -1. Go to [Coinbase](https://www.coinbase.com/) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy/Sell" button on the top right of the page. -4. Select the currency you want to purchase. Select GRT. -5. Select the payment method. Select your preferred payment method. -6. Select the amount of GRT you want to purchase. -7. Review your purchase. Review your purchase and click "Buy GRT". -8. Confirm your purchase. Confirm your purchase and you will have successfully purchased GRT. -9. You can transfer the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - To transfer the GRT to your crypto wallet, click on the "Accounts" button on the top right of the page. - - Click on the "Send" button next to the GRT account. - - Enter the amount of GRT you want to send and the wallet address you want to send it to. - - Click "Continue" and confirm your transaction. -Please note that for larger purchase amounts, Coinbase may require you to wait 7-10 days before transferring the full amount to a crypto wallet. +1. [Coinbase](https://www.coinbase.com/)'e gidin ve bir tane hesap oluşturun. +2. Bir hesap oluşturduktan sonra kimliğinizi, KYC (Müşterinizi Tanıyın) olarak bilinen bir süreçle doğrulamanız gerekecektir. Bu, tüm merkezi veya gözetim altındaki kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra GRT satın alabilirsiniz. Bunu sayfanın sağ üst köşesindeki "Al/Sat" butonuna tıklayarak yapabilirsiniz. +4. Satın almak istediğiniz para birimini ve ardından GRT'yi seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. +6. Satın almak istediğiniz GRT miktarını seçin. +7. Satın alma işleminizi gözden geçirin ve "GRT Satın Al"a tıklayın. +8. Satın alma işleminizi onaylayın ve GRT'yi başarıyla satın almış olacaksınız. +9. GRT'yi hesabınızdan [MetaMask](https://metamask.io/) gibi kripto cüzdanınıza aktarabilirsiniz. + - GRT'yi kripto cüzdanınıza aktarmak için sayfanın sağ üst köşesindeki "Hesaplar" düğmesine tıklayın. + - GRT hesabının yanındaki "Gönder" düğmesine tıklayın. + - Göndermek istediğiniz GRT miktarını ve göndermek istediğiniz cüzdan adresini girin. + - "Devam"a tıklayın ve işleminizi onaylayın. -Lütfen daha büyük satın alma tutarları için Coinbase'in tüm tutarı bir kripto cüzdanına aktarmadan önce 7-10 gün beklemenizi gerektirebileceğini unutmayın. -You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). +Coinbase'de GRT edinmekle alakalı daha fazla bilgiyi [buradan](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) öğrenebilirsiniz. ### Binance -This will be a step by step guide for purchasing GRT on Binance. +Bu, Binance'de GRT satın almak için adım adım bir rehberdir. -1. Go to [Binance](https://www.binance.com/en) and create an account. -2. Once you have created an account, you will need to verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. -3. Once you have verified your identity, you can purchase GRT. You can do this by clicking on the "Buy Now" button on the homepage banner. -4. You will be taken to a page where you can select the currency you want to purchase. Select GRT. -5. Select your preferred payment method. You'll be able to pay with different fiat currencies such as Euros, US Dollars, and more. -6. Select the amount of GRT you want to purchase. -7. Review your purchase and click "Buy GRT". -8. Confirm your purchase and you will be able to see your GRT in your Binance Spot Wallet. -9. You can withdraw the GRT from your account to your crypto wallet such as [MetaMask](https://metamask.io/). - - [To withdraw](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570) the GRT to your crypto wallet, add your crypto wallet's address to the withdrawel whitelist. - - Click on the "wallet" button, click withdraw, and select GRT. - - Enter the amount of GRT you want to send and the whitelisted wallet address you want to send it to. - - Click "Continue" and confirm your transaction. +1. [Binance](https://www.binance.com/en)'e gidin ve bir hesap oluşturun. +2. Bir hesap oluşturduktan sonra kimliğinizi, KYC (Müşterinizi Tanıyın) olarak bilinen bir süreçle doğrulamanız gerekecektir. Bu, tüm merkezi veya gözetim altındaki kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra GRT satın alabilirsiniz. Bunu, ana sayfa başlığındaki "Şimdi Satın Al" düğmesine tıklayarak yapabilirsiniz. +4. Satın almak istediğiniz para birimini seçebileceğiniz bir sayfaya yönlendirileceksiniz. GRT'yi seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. Euro, Dolar ve daha fazlası gibi farklı itibari para birimleriyle ödeme yapabileceksiniz. +6. Satın almak istediğiniz GRT miktarını seçin. +7. Satın alma işleminizi gözden geçirin ve "GRT Satın Al"a tıklayın. +8. Satın alma işleminizi onaylayın ve GRT'nizi Binance Spot Cüzdanınızda görebileceksiniz. +9. GRT'yi hesabınızdan [MetaMask](https://metamask.io/) gibi kripto cüzdanınıza çekebilirsiniz. + - GRT'yi kripto cüzdanınıza [geri çekmek için](https://www.binance.com/en/blog/ecosystem/how-to-transfer-crypto-from-binance-to-trust-wallet-8305050796630181570), kripto cüzdanınızın adresini para çekme beyaz listenize ekleyin. + - "Cüzdan" düğmesine tıklayın, para çekme seçeneğine tıklayın ve GRT'yi seçin. + - Göndermek istediğiniz GRT miktarını ve göndermek istediğiniz beyaz listedeki cüzdan adresini girin. + - "Devam"a tıklayın ve işleminizi onaylayın. -You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). +Binance'de GRT edinmekle alakalı daha fazla bilgiyi [buradan](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) öğrenebilirsiniz. ### Uniswap -This is how you can purchase GRT on Uniswap. +Uniswap'te GRT'yi bu şekilde satın alabilirsiniz. + +1. [Uniswap](https://app.uniswap.org/#/swap)'e gidin ve cüzdanınızı bağlayın. +2. Takas etmek istediğiniz belirteci seçin. ETH'yi seçin. +3. Takas etmek istediğiniz belirteci seçin. GRT'yi seçin. + - Doğru token ile takas yaptığınızdan lütfen emin olun. GRT akıllı sözleşme adresi:`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`X +4. Takas etmek istediğiniz ETH miktarını girin. +5. "Swap"e tıklayın. +6. Cüzdanınızdaki işlemi onaylayın ve işlemin gerçekleşmesini bekleyin. + +Uniswap'da GRT edinmekle alakalı daha fazla bilgiyi [buradan](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) öğrenebilirsiniz. + +## Ethereum Edinme + +Bu bölüm size işlem ücretlerini veya gaz maliyetlerini ödemek için Ethereum'u (ETH) nasıl edineceğinizi gösterecektir. ETH, Ethereum ağında token transferi veya sözleşmelerle etkileşim gibi işlemleri gerçekleştirmek için gerekmektedir. + +### Coinbase + +Bu, Coinbase'de ETH satın almak için adım adım bir rehberdir. + +1. [Coinbase](https://www.coinbase.com/)'e gidin ve bir tane hesap oluşturun. +2. Bir hesap oluşturduktan sonra, KYC (veya MüşteriniTanı) olarak bilinen bir süreçle kimliğinizi doğrulayın. Bu, tüm merkezi veya emanete dayalı kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra, sayfanın sağ üst köşesindeki "Al/Sat" düğmesine tıklayarak ETH satın alın. +4. Satın almak istediğiniz para birimini seçin. ETH'yi seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. +6. Satın almak istediğiniz ETH miktarını girin. +7. Satın alma işleminizi kontrol edin ve "ETH Satın Al "a tıklayın. +8. Satın alımınızı onaylayın ve başarılı bir şekilde ETH satın almış olacaksınız. +9. ETH'yi Coinbase hesabınızdan [MetaMask](https://metamask.io/) gibi kripto cüzdanınıza aktarabilirsiniz. + - ETH'yi kripto cüzdanınıza aktarmak için sayfanın sağ üst köşesindeki "Hesaplar" düğmesine tıklayın. + - ETH hesabının yanında bulunan "Gönder" düğmesine tıklayın. + - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz cüzdan adresini girin. + - "Devam"a tıklayın ve işleminizi onaylayın. + +Coinbase'de ETH edinmekle alakalı daha fazla bilgiyi [buradan](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) öğrenebilirsiniz. + +### Binance + +Bu, Binance'de ETH satın almak için adım adım bir rehberdir. -1. Go to [Uniswap](https://app.uniswap.org/#/swap) and connect your wallet. -2. Select the token you want to swap from. Select ETH. -3. Select the token you want to swap to. Select GRT. - - Make sure you're swapping for the correct token. The GRT smart contract address is: `0xc944E90C64B2c07662A292be6244BDf05Cda44a7` -4. Enter the amount of ETH you want to swap. -5. Click "Swap". -6. Confirm the transaction in your wallet and you wait for the transaction to process. +1. [Binance](https://www.binance.com/en)'e gidin ve bir hesap oluşturun. +2. Bir hesap oluşturduktan sonra, KYC (veya MüşteriniTanı) olarak bilinen bir süreçle kimliğinizi doğrulayın. Bu, tüm merkezi veya emanete dayalı kripto borsaları için standart bir prosedürdür. +3. Kimliğinizi doğruladıktan sonra, ana sayfa afişindeki "Şimdi Satın Al" düğmesine tıklayarak ETH satın alın. +4. Satın almak istediğiniz para birimini seçin. ETH'yi seçin. +5. Tercih ettiğiniz ödeme yöntemini seçin. +6. Satın almak istediğiniz ETH miktarını girin. +7. Satın alma işleminizi kontrol edin ve "ETH Satın Al "a tıklayın. +8. Satın alımınızı onaylayın ve ETH'nizi Binance Spot Cüzdanınızda görüceksiniz. +9. ETH'yi hesabınızdan [MetaMask](https://metamask.io/) gibi kripto cüzdanınıza çekebilirsiniz. + - ETH'yi kripto cüzdanınıza çekmek için, kripto cüzdanınızın adresini güvenilir para çekme adresleri listesine ekleyin. + - "Cüzdan" düğmesine tıklayın, para çekme seçeneğine tıklayın ve ETH'yi seçin. + - Göndermek istediğiniz ETH miktarını ve göndermek istediğiniz güvenilir adresler listesindeki cüzdan adresini girin. + - "Devam"a tıklayın ve işleminizi onaylayın. -You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +Binance'de ETH edinmekle alakalı daha fazla bilgiyi [buradan](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) öğrenebilirsiniz. -## Arbitrum Bridge +## Arbitrum Köprüsü -The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). +Faturalandırma sözleşmesi yalnızca GRT'yi Ethereum ana ağından Arbitrum ağına bağlamak için tasarlanmıştır. GRT'nizi Arbitrum'dan tekrar Ethereum ana ağına aktarmak isterseniz, [Arbitrum Köprüsü](https://bridge.arbitrum.io/?l2ChainId=42161)'nü kullanmanız gerekir. diff --git a/website/pages/tr/chain-integration-overview.mdx b/website/pages/tr/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/tr/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/tr/cookbook/arweave.mdx b/website/pages/tr/cookbook/arweave.mdx index 15aaf1a38831..3dc882d5d44c 100644 --- a/website/pages/tr/cookbook/arweave.mdx +++ b/website/pages/tr/cookbook/arweave.mdx @@ -1,111 +1,111 @@ --- -title: Building Subgraphs on Arweave +title: Arweave Üzerinde Subgraphlar Oluşturma --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! -In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. +Bu rehberde, Arweave blok zincirini indekslemek için nasıl Subgraphs oluşturacağınızı ve dağıtacağınızı öğreneceksiniz. -## What is Arweave? +## Arweave Nedir? -The Arweave protocol allows developers to store data permanently and that is the main difference between Arweave and IPFS, where IPFS lacks the feature; permanence, and files stored on Arweave can't be changed or deleted. +Arweave protokolü geliştiricilere verileri kalıcı olarak depolama imkanı sağlar ve bu, Arweave ile IPFS arasındaki temel farktır. IPFS'de böyle bir özellik bulunmaz; yani IPFS'te depolanan dosyalar kalıcı değildir ve Arweave'de depolanan dosyalar değiştirilemez veya silinemez. -Arweave already has built numerous libraries for integrating the protocol in a number of different programming languages. For more information you can check: +Arweave, protokolü farklı programlama dillerine entegre etmek için halihazırda çok sayıda kütüphane oluşturmuştur. Daha fazla bilgi için şurayı kontrol edebilirsiniz: - [Arwiki](https://arwiki.wiki/#/en/main) -- [Arweave Resources](https://www.arweave.org/build) +- [Arweave Kaynakları](https://www.arweave.org/build) -## What are Arweave Subgraphs? +## Arweave Subgraphları Nedir? -The Graph allows you to build custom open APIs called "Subgraphs". Subgraphs are used to tell indexers (server operators) which data to index on a blockchain and save on their servers in order for you to be able to query it at any time using [GraphQL](https://graphql.org/). +Graph, "Subgraphs" adı verilen size özel açık API'lar oluşturmanıza olanak tanır. Subgraphlar, indeksleyicilere (sunucu operatörleri) bir blok zincirinde hangi verileri indekslemeleri gerektiğini ve daha sonra istedikleri zaman [GraphQL](https://graphql.org/) kullanarak bu verileri sorgulayabilmeleri adına verileri sunucularında kaydetmeleri gerektiğini söylemek için kullanılır. -[Graph Node](https://github.com/graphprotocol/graph-node) is now able to index data on Arweave protocol. The current integration is only indexing Arweave as a blockchain (blocks and transactions), it is not indexing the stored files yet. +[Graph Düğümü](https://github.com/graphprotocol/graph-node) artık Arweave protokolündeki verileri indeksleyebiliyor. Mevcut entegrasyon Arweave'i yalnızca bir blok zinciri (bloklar ve işlemler) olarak indeksliyor, ancak henüz depolanan dosyaları indekslemiyor. -## Building an Arweave Subgraph +## Bir Arweave Subgraph'ı Oluşturma -To be able to build and deploy Arweave Subgraphs, you need two packages: +Arweave Subgraphları oluşturabilmek ve dağıtabilmek için iki pakete ihtiyacınız vardır: -1. `@graphprotocol/graph-cli` above version 0.30.2 - This is a command-line tool for building and deploying subgraphs. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-cli) to download using `npm`. -2. `@graphprotocol/graph-ts` above version 0.27.0 - This is library of subgraph-specific types. [Click here](https://www.npmjs.com/package/@graphprotocol/graph-ts) to download using `npm`. +1. `@graphprotocol/graph-cli` 'nin 0.30.2 sürümünün üstü - Bu, subgraphları oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. `npm` kullanarak indirmek için [buraya tıklayın](https://www.npmjs.com/package/@graphprotocol/graph-cli). +2. `@graphprotocol/graph-ts`'in 0.27.0 sürümünün üstü - Bu, subgraph'a özgü tiplerin bulunduğu bir kütüphanedir. `npm` kullanarak indirmek için [buraya tıklayın](https://www.npmjs.com/package/@graphprotocol/graph-ts). -## Subgraph's components +## Subgraph'ın bileşenleri -There are three components of a subgraph: +Bir subgraph'ın üç bileşeni vardır: ### 1. Manifest - `subgraph.yaml` -Defines the data sources of interest, and how they should be processed. Arweave is a new kind of data source. +İlgilenilen veri kaynaklarını ve bunların nasıl işlenmesi gerektiğini tanımlar. Arweave yeni bir veri kaynağı türüdür. -### 2. Schema - `schema.graphql` +### 2. Şema - `schema.graphql` -Here you define which data you want to be able to query after indexing your Subgraph using GraphQL. This is actually similar to a model for an API, where the model defines the structure of a request body. +Burada, GraphQL kullanarak Subgraph'ınızı indeksledikten sonra hangi verileri sorgulayabilmek istediğinizi tanımlarsınız. Bu aslında, modelin bir istek gövdesinin yapısını tanımladığı bir API modeline benzer. -The requirements for Arweave subgraphs are covered by the [existing documentation](/developing/creating-a-subgraph/#the-graphql-schema). +Arweave subgraphları için gereksinimler [mevcut dokümantasyonda](/developing/creating-a-subgraph/#the-graphql-schema) ele alınmıştır. -### 3. AssemblyScript Mappings - `mapping.ts` +### 3. AssemblyScript Eşleştirmeleri - `mapping.ts` -This is the logic that determines how data should be retrieved and stored when someone interacts with the data sources you are listening to. The data gets translated and is stored based off the schema you have listed. +Bu, birisi sizin etkinliklerini gözlemlediğiniz veri kaynaklarıyla etkileşimde bulunduğunda verinin nasıl alınması ve depolanması gerektiğini belirleyen mantıktır. Veri çevrilir ve belirttiğiniz şemaya göre depolanır. -During subgraph development there are two key commands: +Subgraph geliştirme sırasında iki anahtar komut vardır: ``` -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph codegen # manifest'de tanımlanan şema dosyasından tipleri üretir +$ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm subgraph dosyalarını bir /build klasöründe hazırlar ``` -## Subgraph Manifest Definition +## Subgraph Manifest Tanımı -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: +Subgraph manifesti `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bir Arweave subgraph'ı özelinde örnek bir subgraph manifesti için aşağıya bakınız: ```yaml specVersion: 0.0.5 description: Arweave Blocks Indexing schema: - file: ./schema.graphql # link to the schema file + file: ./schema.graphql # şema dosyasına bağlantı dataSources: - kind: arweave name: arweave-blocks - network: arweave-mainnet # The Graph only supports Arweave Mainnet + network: arweave-mainnet # Graph yalnızca Arweave Ana Ağı'nı destekler source: - owner: 'ID-OF-AN-OWNER' # The public key of an Arweave wallet - startBlock: 0 # set this to 0 to start indexing from chain genesis + owner: 'ID-OF-AN-OWNER' # Bir Arweave cüzdanının açık anahtarı + startBlock: 0 # indekslemeyi zincir oluşumundan başlatmak için bunu 0 olarak ayarlayın mapping: apiVersion: 0.0.5 language: wasm/assemblyscript - file: ./src/blocks.ts # link to the file with the Assemblyscript mappings + file: ./src/blocks.ts # Assemblyscript eşleştirmelerinin bulunduğu dosyaya bağlantı entities: - Block - Transaction blockHandlers: - - handler: handleBlock # the function name in the mapping file + - handler: handleBlock # eşleştirme dosyasındaki fonksiyon adı transactionHandlers: - - handler: handleTx # the function name in the mapping file + - handler: handleTx # eşleştirme dosyasındaki fonksiyon adı ``` -- Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` -- Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet +- Arweave subgraphları yeni bir veri kaynağı türünü tanıtır (`arweave`) +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` +- Arweave veri kaynakları, bir Arweave cüzdanının genel anahtarı olan opsiyonel bir source.owner alanı sunar -Arweave data sources support two types of handlers: +Arweave veri kaynakları iki tür işleyiciyi destekler: -- `blockHandlers` - Run on every new Arweave block. No source.owner is required. -- `transactionHandlers` - Run on every transaction where the data source's `source.owner` is the owner. Currently an owner is required for `transactionHandlers`, if users want to process all transactions they should provide "" as the `source.owner` +- `blockHandlers` - Her yeni Arweave bloğunda çalıştırılır. source.owner gerekli değildir. +- `transactionHandlers` - Veri kaynağının `source.owner`'ının sahibi olduğu her işlemde çalıştırılır. Şu anda `transactionHandlers` için bir sahip gereklidir. Kullanıcılar tüm işlemleri işlemek istiyorlarsa `source.owner` olarak "" sağlamalıdırlar -> The source.owner can be the owner's address, or their Public Key. +> source.owner, sahibin adresi veya Genel Anahtarı olabilir. -> Transactions are the building blocks of the Arweave permaweb and they are objects created by end-users. +> İşlemler Arweave permaweb'in yapı taşlarıdır ve son kullanıcılar tarafından oluşturulan nesnelerdir. -> Note: [Bundlr](https://bundlr.network/) transactions are not supported yet. +> Not: [Bundlr](https://bundlr.network/) işlemleri henüz desteklenmemektedir. -## Schema Definition +## Şema Tanımı -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). +Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla ayrıntı [burada](/developing/creating-a-subgraph/#the-graphql-schema) bulunmaktadır. -## AssemblyScript Mappings +## AssemblyScript Eşlemeleri -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. -Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Arweave indeksleme, [AssemblyScript API](/developing/assemblyscript-api/)'sine Arweave'ye özgü veri tipleri ekler. ```tsx class Block { @@ -146,51 +146,51 @@ class Transaction { } ``` -Block handlers receive a `Block`, while transactions receive a `Transaction`. +Blok işleyicileri bir `Block` alırken, işlemler bir `Transaction` alır. -Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). +Bir Arweave Subgraph'ının eşleştirmelerini yazmak, bir Ethereum Subgraph'ının eşleştirmelerini yazmaya çok benzerdir. Daha fazla bilgi için [buraya](/developing/creating-a-subgraph/#writing-mappings) tıklayın. -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -## Querying an Arweave Subgraph +## Arweave Subgraph'ını Sorgulama -The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +Arweave subgraphları için GraphQL uç noktası, mevcut API arayüzü ile şema tanımı tarafından belirlenir. Daha fazla bilgi için lütfen [GraphQL API dökümantasyonunu](/querying/graphql-api/) ziyaret edin. -## Example Subgraphs +## Örnek Subgraph'ler -Here is an example subgraph for reference: +İşte referans olması için örnek bir subgraph: -- [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) +- [Arweave için örnek subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## FAQ +## SSS -### Can a subgraph index Arweave and other chains? +### Bir subgraph Arweave ve diğer zincirleri indeksleyebilir mi? -No, a subgraph can only support data sources from one chain/network. +Hayır, bir subgraph yalnızca bir zincirden/ağdan veri kaynaklarını destekleyebilir. -### Can I index the stored files on Arweave? +### Depolanmış dosyaları Arweave üzerinde indeksleyebilir miyim? -Currently, The Graph is only indexing Arweave as a blockchain (its blocks and transactions). +Şu anda Graph, Arweave'yi yalnızca bir blok zinciri (blokları ve işlemleri) olarak indekslemektedir. -### Can I identify Bundlr bundles in my subgraph? +### Subgraph'ımdaki Bundlr paketlerini tanımlayabilir miyim? -This is not currently supported. +Bu şu anda desteklenmemektedir. -### How can I filter transactions to a specific account? +### İşlemleri belirli bir hesaba özel olarak nasıl filtreleyebilirim? -The source.owner can be the user's public key or account address. +source.owner kullanıcının genel anahtarı veya hesap adresi olabilir. -### What is the current encryption format? +### Mevcut şifreleme formatı nedir? -Data is generally passed into the mappings as Bytes, which if stored directly is returned in the subgraph in a `hex` format (ex. block and transaction hashes). You may want to convert to a `base64` or `base64 URL`-safe format in your mappings, in order to match what is displayed in block explorers like [Arweave Explorer](https://viewblock.io/arweave/). +Veri genellikle eşleştirmelere Bayt olarak aktarılır ve doğrudan depolanırsa subgraph'ta `hex` formatında döndürülür (örn. blok ve işlem hashları). [Arweave Explorer](https://viewblock.io/arweave/) gibi blok gezginlerinde görüntülenenlerle eşleştirmek için eşleştirmelerinizde `base64` veya `base64 URL`-güvenli biçime dönüştürmek isteyebilirsiniz. -The following `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` helper function can be used, and will be added to `graph-ts`: +Aşağıdaki `bytesToBase64(bytes: Uint8Array, urlSafe: boolean): string` yardımcı fonksiyonu kullanılabilir ve `graph-ts`'ye eklenecektir: ``` const base64Alphabet = [ diff --git a/website/pages/tr/cookbook/cosmos.mdx b/website/pages/tr/cookbook/cosmos.mdx index ef21e4bc0855..cea7b2b7336e 100644 --- a/website/pages/tr/cookbook/cosmos.mdx +++ b/website/pages/tr/cookbook/cosmos.mdx @@ -1,51 +1,51 @@ --- -title: Building Subgraphs on Cosmos +title: Cosmos'ta Subgraph'ler Oluşturma --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. +Bu kılavuz, [Cosmos](https://docs.cosmos.network/) tabanlı blok zincirlerini indeksleyen subgraph'ler oluşturmaya yönelik bir giriş niteliğindedir. -## What are Cosmos subgraphs? +## Cosmos subgraph'leri nelerdir? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +Graph, geliştiricilerin blockchain etkinliklerini işlemesine ve ortaya çıkan verileri subgraph olarak bilinen açık bir GraphQL API aracılığıyla kolayca kullanılabilir hale getirmesine olanak tanır. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık Cosmos etkinliklerini işleyebilir, bu da Cosmos geliştiricilerinin artık zincir üstü olayları kolayca dizine eklemek için subgraph'ler oluşturabileceği anlamına gelir. -There are four types of handlers supported in Cosmos subgraphs: +Cosmos subgraph'lerinde desteklenen dört tür işleyici vardır: -- **Block handlers** run whenever a new block is appended to the chain. -- **Event handlers** run when a specific event is emitted. -- **Transaction handlers** run when a transaction occurs. -- **Message handlers** run when a specific message occurs. +- **Blok işleyicileri**, zincire her yeni blok eklendiğinde çalışır. +- **Olay işleyicileri**, belirli bir olay yayınlandığında çalışır. +- **İşlem işleyicileri**, bir işlem gerçekleştiğinde çalışır. +- **Mesaj işleyicileri**, belirli bir mesaj oluştuğunda çalışır. -Based on the [official Cosmos documentation](https://docs.cosmos.network/): +[resmi Cosmos belgelerine](https://docs.cosmos.network/) göre: -> [Events](https://docs.cosmos.network/main/core/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. +> [Olaylar](https://docs.cosmos.network/main/core/events), uygulamanın yürütülmesi hakkında bilgi içeren nesnelerdir. Genellikle çeşitli mesajların ve indeksleme işlemlerinin yürütülmesini izlemek için blok kaşifleri ve cüzdanlar gibi hizmet sağlayıcılar tarafından kullanılırlar. -> [Transactions](https://docs.cosmos.network/main/core/transactions) are objects created by end-users to trigger state changes in the application. +> [İşlemler](https://docs.cosmos.network/main/core/transactions), uygulamada durum değişikliklerini tetiklemek için son kullanıcılar tarafından oluşturulan nesnelerdir. -> [Messages](https://docs.cosmos.network/main/core/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. +> [Mesajlar](https://docs.cosmos.network/main/core/transactions#messages), ait oldukları modül kapsamında durum geçişlerini tetikleyen modüle özgü nesnelerdir. -Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. +Tüm verilere bir blok işleyici ile erişilebilmesine rağmen, diğer işleyiciler, subgraph geliştiricilerin verileri çok daha ayrıntılı bir şekilde işlemesine olanak tanır. -## Building a Cosmos subgraph +## Cosmos subgraph'i inşa etme -### Subgraph Dependencies +### Subgraph Gereksinimleri -[graph-cli](https://github.com/graphprotocol/graph-cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. +[graph-cli](https://github.com/graphprotocol/graph-cli) subgraph'ler oluşturmak ve deploy etmek için kullanılan bir CLI aracıdır, sürüm `>=0.30.0` Cosmos subgraph'leri ile çalışmak için gereklidir. -[graph-ts](https://github.com/graphprotocol/graph-ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. +[graph-ts](https://github.com/graphprotocol/graph-ts) subgraph'e özgü türlerden oluşan bir kitaplıktır, sürüm `>=0.27.0` Cosmos subgraph'leri ile çalışmak için gereklidir. -### Subgraph Main Components +### Subgraph Ana Bileşenleri -There are three key parts when it comes to defining a subgraph: +Bir subgraph'i tanımlama noktasında üç anahtar kısım vardır: -**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. +**subgraph.yaml**: hangi olayların izleneceğini ve bunların nasıl işleneceğini tanımlayan subgraph bildirimini içeren bir YAML dosyası. -**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. +**schema.graphql**: subgrpah'iniz için hangi verilerin depolandığını ve bunun GraphQL aracılığıyla nasıl sorgulanacağını tanımlayan bir GraphQL şeması. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. +**AssemblyScript Eşlemeleri**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) blok zinciri verilerinden şemanızda tanımlanan varlıklara çeviren kod. -### Subgraph Manifest Definition +### Subgraph Manifest Tanımı -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: +Subgraph bildirimi (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgilenilen tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken işlevleri (`işleyiciler`) tanımlar. Bir Cosmos subgrpah'i için örnek bir subgraph bildirimi için alt kısma göz atın: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). -- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. +- Cosmos subgraph'leri, yeni bir `tür` veri kaynağı sunar (`cosmos`). +- `Ağ`, Cosmos ekosistemindeki bir zincire karşılık gelmelidir. Örnekte, Cosmos Hub mainnet'i kullanılmıştır. -### Schema Definition +### Şema Tanımı -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graph-ql-schema). +Şema tanımı, ortaya çıkan subgraph veri tabanının yapısını ve varlıklar arasındaki ilişkileri açıklar. Bu, orijinal veri kaynağının agnostiğidir. [burada](/developing/creating-a-subgraph/#the-graph-ql-schema) subgraph şema tanımı hakkında daha fazla ayrıntı bulabilirsiniz. -### AssemblyScript Mappings +### AssemblyScript Eşlemeleri -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Cosmos indeksleme, Cosmos'a özgü veri türlerini [AssemblyScript API](/developing/assemblyscript-api/) ile tanıştırır. ```tsx class Block { @@ -163,42 +163,42 @@ class Any { } ``` -Each handler type comes with its own data structure that is passed as an argument to a mapping function. +Her işleyici türü, bir eşleme işlevine bağımsız değişken olarak iletilen kendi veri yapısıyla birlikte gelir. -- Block handlers receive the `Block` type. -- Event handlers receive the `EventData` type. -- Transaction handlers receive the `TransactionData` type. -- Message handlers receive the `MessageData` type. +- Blok işleyicileri `Block` tipini alır. +- Etkinlik işleyicileri, `EventData` türünü alır. +- İşlem işleyicileri, `TransactionData` türünü alır. +- Mesaj işleyicileri, `MessageData` tipini alır. -As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). +`MessageData`'ın bir parçası olarak, mesaj işleyici, bir mesajı kapsayan bir işlemle ilgili en önemli bilgileri içeren bir işlem bağlamı alır. İşlem bağlamı, `EventData` türünde de mevcuttur, ancak yalnızca karşılık gelen olay bir işlemle ilişkilendirildiğinde. Ek olarak, tüm işleyiciler bir bloğa başvuru alır (`HeaderOnlyBlock`). -You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Cosmos entegrasyonu türlerinin tam listesini [burada](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts) bulabilirsiniz. -### Message decoding +### Mesaj çözme -It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://developers.google.com/protocol-buffers/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. +Cosmos mesajlarının zincire özgü olduğunu ve seri hale getirilmiş bir [Protocol Buffers](https://developers.google.com/protocol-buffers/) yükü biçiminde bir subgraph'e aktarıldığını unutmamak önemlidir. Sonuç olarak, mesaj verilerinin işlenmeden önce bir eşleme işlevinde kodunun çözülmesi gerekir. -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Bir subgraph'taki mesaj verisinin nasıl çözüleceğine dair bir örnek [burada](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts) bulunabilir. -## Creating and building a Cosmos subgraph +## Bir Cosmos subgraph'i inşa etme ve oluşturma -The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: +Alt çizge eşlemelerini yazmaya başlamadan önceki ilk adım, alt çizge şema dosyasında (`schema.graphql`) tanımlanan varlıklara dayalı tip bağlarını oluşturmaktır. Bu, eşleme işlevlerinin bu türlerde yeni nesneler oluşturmasına ve bunları depoya kaydetmesine izin verecektir. Bu, `codegen` CLI komutu kullanılarak yapılır: ```bash $ graph codegen ``` -Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: +Eşlemeler hazır olduğunda, subgraph'in oluşturulması gerekir. Bu adım, bildirimde veya eşlemelerde olabilecek hataları vurgulayacaktır. Graph Node'una deploy edilmek için subgraph'in başarılı bir şekilde oluşturulması gerekir. `build` CLI komutu kullanılarak yapılabilir: ```bash $ graph build ``` -## Deploying a Cosmos subgraph +## Bir Cosmos subgraph'ini deploy etme -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: +Subgraph'inizi oluşturup, `graph create` CLI komutunu çalıştırdıktan sonra `graph deploy` CLI komutunu kullanarak subgraph'inizi dağıtabilirsiniz: -**Hosted Service** +**Barındırılan Hizmet** ```bash graph create account/subgraph-name --product hosted-service @@ -208,7 +208,7 @@ graph create account/subgraph-name --product hosted-service graph deploy account/subgraph-name --product hosted-service ``` -**Local Graph Node (based on default configuration):** +**Yerel Graph Node'u (varsayılan yapılandırmaya göre):** ```bash graph create subgraph-name --node http://localhost:8020 @@ -218,42 +218,42 @@ graph create subgraph-name --node http://localhost:8020 graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -## Querying a Cosmos subgraph +## Bir Cosmos subgraph'ini sorgulama -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +Cosmos subgraph'leri için GraphQL uç noktası, mevcut API arayüzü ile şema tanımı tarafından belirlenir. Daha fazla bilgi için lütfen [GraphQL API belgelerini](/querying/graphql-api/) ziyaret edin. -## Supported Cosmos Blockchains +## Desteklenen Cosmos Blok Zincirleri ### Cosmos Hub -#### What is Cosmos Hub? +#### Cosmos Hub Nedir? -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. +[Cosmos Hub blok zinciri](https://hub.cosmos.network/), [Cosmos](https://cosmos.network/) ekosistemindeki ilk blok zinciridir. Daha fazla bilgi için [resmi belgeleri](https://docs.cosmos.network/) ziyaret edebilirsiniz. -#### Networks +#### Ağlar -Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. +Cosmos Hub mainnet `cosmoshub-4`'tür. Cosmos Hub'ın geçerli test ağı `theta-testnet-001`'dir.
    Diğer Cosmos Hub ağları, yani `cosmoshub-3` durduruldu, bu nedenle onlar için herhangi bir veri sağlanmadı. ### Osmosis -> Osmosis support in Graph Node and on the Hosted Service is in beta: please contact the graph team with any questions about building Osmosis subgraphs! +> Graph Node'unda ve Barındırılan Hizmette Osmosis desteği beta sürümündedir: Osmosis subgraph'lerinin oluşturulmasıyla ilgili tüm sorularınız için lütfen Graph ekibiyle iletişime geçin! -#### What is Osmosis? +#### Osmosis Nedir? -[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. +[Osmosis](https://osmosis.zone/), Cosmos SDK üzerine inşa edilmiş merkeziyetsiz, zincirler arası otomatikleştirilmiş bir piyasa yapıcı (AMM) protokolüdür. Kullanıcıların özel likidite havuzları oluşturmasına ve IBC özellikli token ticareti yapmasına olanak tanır. Daha fazla bilgi için [resmi belgeleri](https://docs.osmosis.zone/) ziyaret edebilirsiniz. -#### Networks +#### Ağlar -Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. +Osmosis mainnet'i `osmosis-1`'dir. Osmosis geçerli test ağı `osmo-test-4` şeklindedir. -## Example Subgraphs +## Örnek Subgraph'ler -Here are some example subgraphs for reference: +İşte referans için bazı örnek subgraph'ler: -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Blok Filtreleme Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Validatör Ödül Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Validatör Delegasyon Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Osmosis Token Swap Örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/tr/cookbook/grafting.mdx b/website/pages/tr/cookbook/grafting.mdx index 54ad7a0eaff8..4ce145999b69 100644 --- a/website/pages/tr/cookbook/grafting.mdx +++ b/website/pages/tr/cookbook/grafting.mdx @@ -1,40 +1,56 @@ --- -title: Replace a Contract and Keep its History With Grafting +title: Bir Sözleşmeyi Değiştirin ve Graftlama ile Geçmişini Koruyun --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +Bu rehberde, mevcut subgraphları graftlayarak yeni subgraphları nasıl oluşturacağınızı ve dağıtacağınızı öğreneceksiniz. -## What is Grafting? +## Graftlama Nedir? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +Graftlama, mevcut bir subgraph'daki verileri yeniden kullanır ve daha sonraki bir blokta indekslemeye başlar. Bu, geliştirme sırasında eşleştirmelerdeki basit hataları hızlı bir şekilde geçmek veya mevcut bir subgraph'ın başarısız olduktan sonra geçici olarak tekrar çalışmasını sağlamak için kullanışlıdır. Ayrıca, sıfırdan indekslenmesi uzun süren bir subgraph'a bir özellik eklerken de kullanılabilir. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu olan bir GraphQL şeması kullanabilir. Kendi başına geçerli bir subgraph şeması olmalıdır, ancak şu şekillerde temel subgraph şemasından sapabilir: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Varlık türlerini ekler veya kaldırır +- Varlık türlerinden öznitelikleri kaldırır +- Varlık türlerine null yapılabilir öznitelikler ekler +- Null yapılamayan öznitelikleri null yapılabilir özniteliklere dönüştürür +- Numaralandırmalara değerler ekler +- Arayüzleri ekler veya kaldırır +- Arayüzün hangi varlık türleri için uygulandığını değiştirir -For more information, you can check: +Daha fazla bilgi için kontrol edebilirsiniz: -- [Grafting](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) +- [Graftlama](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +Bu eğitici içerikte, temel bir kullanım örneğini ele alacağız. Mevcut bir sözleşmeyi özdeş bir sözleşme ile değiştireceğiz (yeni bir adresle, ancak aynı kodla). Ardından, mevcut subgraph'ı yeni sözleşmeyi izleyen "base" subgraph'a graftlayacağız. -## Building an Existing Subgraph +## Ağa Yükseltme Durumunda Graftlamaya İlişkin Önemli Not -Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +> **Uyarı**: Eğer subgraph'ınızı Subgraph Stüdyo veya barındırılan hizmetten merkeziyetsiz ağa yükseltiyorsanız, yükseltme sürecinde graftlama kullanmaktan kaçınmanız şiddetle tavsiye edilir. -- [Subgraph example repo](https://github.com/t-proctor/grafting-tutorial) +### Bu Neden Önemli? -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +Graftlama, bir subgraph'ı diğerine "graftlamanıza" ve geçmiş verileri mevcut subgraph'tan yeni bir sürüme etkili bir şekilde transfer etmenize olanak tanıyan güçlü bir özelliktir. Bu, verileri korumak ve indekslemede zaman kazanmak için etkili bir yol olsa da, graftlama, barındırılan bir ortamdan merkeziyersiz ağa taşınırken karmaşıklıklar ve potansiyel sorunlar ortaya çıkarabilir. Bir subgraph'ı Graph Ağı'ndan barındırılan hizmete veya Subgraph Stüdyo'ya geri graftlamak mümkün değildir. -## Subgraph Manifest Definition +### En İyi Uygulamalar -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +**İlk Taşıma**: Subgraph'ınızı merkeziyetsiz ağa ilk kez dağıttığınızda, bunu graftlama yapmaksızın gerçekleştirin. Subgraph'ın kararlı olduğundan ve beklendiği gibi çalıştığından emin olun. + +**Sonraki Güncellemeler**: Subgraph'ınız merkeziyetsiz ağda yayında ve kararlı olduğunda, geçişi daha sorunsuz hale getirmek ve geçmiş verileri korumak adına gelecek sürümler için graftlamayı kullanabilirsiniz. + +Bu yönergelere uyarak riskleri en aza indirebilir ve daha sorunsuz bir taşıma süreci geçirebilirsiniz. + +## Mevcut Bir Subgraph'ı Oluşturma + +Subgraphlar oluşturmak Graph'ın önemli bir parçasıdır ve [burada](http://localhost:3000/en/cookbook/quick-start/) daha ayrıntılı olarak açıklanmıştır. Bu eğitici içerikte kullanılan mevcut subgraph'ı oluşturabilmek ve dağıtabilmek için aşağıdaki Github deposu sağlanmıştır: + +- [Subgraph örnek deposu](https://github.com/t-proctor/grafting-tutorial) + +> Not: Subgraph'ta kullanılan sözleşme aşağıdaki [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit)'den alınmıştır. + +## Subgraph Manifest Tanımı + +Subgraph manifesti `subgraph.yaml`, subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Kullanacağınız örnek bir subgraph bildirimi için aşağıya bakın: ```yaml specVersion: 0.0.4 @@ -63,13 +79,13 @@ dataSources: file: ./src/lock.ts ``` -- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract -- The network should correspond to a indexed network being queried. Since we're running on Goerli testnet, the network is `goerli` -- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. +- `Lock` veri kaynağı, sözleşmeyi derleyip dağıttığımızda alacağımız abi ve sözleşme adresidir +- Ağ, sorgulanan indekslenmiş bir ağa karşılık gelmelidir. Göerli testnet üzerinde çalıştığımız için, ağ `goerli` +- `mapping` bölümü, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bu durumda, `Withdrawal` olayının etkinliklerini gözlemliyoruz ve yayıldığında `handleWithdrawal` fonksiyonunu çağırıyoruz. -## Grafting Manifest Definition +## Graftlama Manifest Tanımı -Grafting requires adding two new items to the original subgraph manifest: +Graftlama, orijinal subgraph bildirimine iki yeni öğe eklemeyi gerektirir: ```yaml --- @@ -80,16 +96,16 @@ graft: block: 1502122 # block number ``` -- `features:` is a list of all used [feature names](developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `features:` kullanılan tüm [özellik adlarının](developing/creating-a-subgraph/#experimental-features) bir listesidir. +- `graft:` `base` subgraph'ın ve üzerine graftlanacak bloğun bir eşleştirmesidir. `block`, indekslemeye başlanacak blok numarasıdır. Graph, temel subgraph'ın verilerini verilen bloğa kadar ve bu blok dahil olmak üzere kopyalayacak ve ardından yeni subgraph'ı bu bloktan itibaren indekslemeye devam edecektir. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +`base` ve `block` değerler iki subgraph kullanılarak bulunabilir: biri temel indeksleme için ve diğeri graftlamalı -## Deploying the Base Subgraph +## Temel Subgraph'ı Dağıtma -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. [Graph Stüdyo kullanıcı arayüzüne](https://thegraph.com/studio/) gidin ve Göerli testnet üzerinde `graft-example` adında bir subgraph oluşturun +2. Depoda bulunan `graft-example` klasöründeki subgraph sayfanızda bulunan `AUTH & DEPLOY` bölümündeki talimatları izleyin +3. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız ```graphql { @@ -101,7 +117,7 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -It returns something like this: +Şuna benzer bir şey döndürür: ``` { @@ -122,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Subgraph'ın düzgün bir şekilde indekslendiğini doğruladıktan sonra, subgraph'ı graftlama ile hızlı bir şekilde güncelleyebilirsiniz. -## Deploying the Grafting Subgraph +## Graftlama Subgraph'ını Dağıtma -The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. +Graft yerine geçen subgraph.yaml yeni bir sözleşme adresine sahip olacaktır. Bu, merkeziyetsiz uygulamanızı güncellediğinizde, bir sözleşmeyi yeniden dağıttığınızda vb. gerçekleşebilir. -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in The Graph Studio UI. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. [Graph Stüdyo kullanıcı arayüzüne](https://thegraph.com/studio/) gidin ve Göerli testnet üzerinde `graft-replacement` adında bir subgraph oluşturun +2. Yeni bir manifest oluşturun. `graph-replacement` için `subgraph.yaml` dosyası, farklı bir sözleşme adresi içerir ve nasıl graft edilmesi gerektiği hakkında yeni bilgiler içerir. Bunlar eski sözleşme tarafından gözlemlediğiniz [yayılan son olay bloğu](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) ve eski subgraph `base`'ini içeren `block`lardır. `base` subgraph ID'si, orijinal `graph-example` subgraph'ınızın `Deployment ID`'sidir. Bu bilgiyi Graph Stüdyo kullanıcı arayüzünde bulabilirsiniz. +3. Github deposunda bulunan `graft-replacement` klasöründeki subgraph sayfanızda bulunan `AUTH & DEPLOY` bölümündeki talimatları izleyin +4. Tamamlandığında, subgraph'ın doğru bir şekilde indekslendiğinden emin olun. Eğer aşağıdaki komutu Graph Test Alanında(Playground) çalıştırırsanız ```graphql { @@ -143,7 +159,7 @@ The graft replacement subgraph.yaml will have a new contract address. This could } ``` -It should return the following: +Aşağıdakileri döndürmelidir: ``` { @@ -169,18 +185,18 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) and [Event 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). The new contract emitted one `Withdrawal` after, [Event 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +`graft-replacement` subgraph'ının eski `graph-example` verilerinden ve yeni sözleşme adresinden gelen daha yeni verilerden indeksleme yaptığını görebilirsiniz. Orijinal sözleşme, [Olay 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) ve [Olay 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493) olmak üzere iki `Withdrawal` olayı yayınlamıştır. Yeni sözleşme, [Olay 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3)'ten sonra bir `Withdrawal` olayı yayınlamıştır. Önceden indekslenmiş iki işlem (Olay 1 ve 2) ve yeni işlem (Olay 3) `graft-replacement` subgraph'ında bir araya getirilmiştir. -Congrats! You have succesfully grafted a subgraph onto another subgraph. +Tebrikler! Bir subgraph'ı başka bir subgraph'a graftlamayı başardınız. -## Additional Resources +## Ek Kaynaklar -If you want more experience with grafting, here's a few examples for popular contracts: +Graftlama konusunda daha fazla tecrübe edinmek istiyorsanız, işte popüler sözleşmeler için birkaç örnek: - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3/protocols/uniswap-v3/config/templates/uniswap.v3.template.yaml), -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](developing/creating-a-subgraph/#data-source-templates) can achieve similar results +Graph uzmanlığında biraz daha ileri gitmek için, temel veri kaynaklarındaki değişiklikleri ele almanın diğer yollarını öğrenmeyi düşünün.[Veri Kaynağı Şablonları](developing/creating-a-subgraph/#data-source-templates) gibi alternatifler benzer sonuçlar elde etmenizi sağlayabilir -> Note: A lot of material from this article was taken from the previously published [Arweave article](/cookbook/arweave/) +> Not: Bu makaledeki birçok materyal daha önce yayınlanan [Arweave makalesinden](/cookbook/arweave/) alınmıştır diff --git a/website/pages/tr/cookbook/near.mdx b/website/pages/tr/cookbook/near.mdx index 879e8e5c15aa..d9e86609befa 100644 --- a/website/pages/tr/cookbook/near.mdx +++ b/website/pages/tr/cookbook/near.mdx @@ -1,81 +1,81 @@ --- -title: Building Subgraphs on NEAR +title: NEAR Üzerinde Subgraphlar Oluşturma --- -> NEAR support in Graph Node and on the Hosted Service is in beta: please contact near@thegraph.com with any questions about building NEAR subgraphs! +> Graph Düğümü'nde ve Barındırılan Hizmette NEAR desteği beta aşamasındadır: NEAR'da subgraphlar oluşturmayla ilgili sorularınız için lütfen near@thegraph.com adresiyle iletişime geçin! -This guide is an introduction to building subgraphs indexing smart contracts on the [NEAR blockchain](https://docs.near.org/). +Bu rehber, [NEAR blok zincirinde](https://docs.near.org/) akıllı sözleşmeleri indeksleyen subgraphlar oluşturmaya giriş niteliğindedir. -## What is NEAR? +## NEAR Nedir? -[NEAR](https://near.org/) is a smart contract platform for building decentralized applications. Visit the [official documentation](https://docs.near.org/docs/concepts/new-to-near) for more information. +[NEAR](https://near.org/), merkeziyetsiz uygulamalar oluşturmak için akıllı bir sözleşme platformudur. Daha fazla bilgi için [resmi dökümantasyonu](https://docs.near.org/docs/concepts/new-to-near) ziyaret edin. -## What are NEAR subgraphs? +## NEAR subgraphları nedir? -The Graph gives developers tools to process blockchain events and make the resulting data easily available via a GraphQL API, known individually as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process NEAR events, which means that NEAR developers can now build subgraphs to index their smart contracts. +Graph, geliştiricilere blok zinciri olaylarını işlemek ve elde edilen verileri tek tek subgraph olarak bilinen bir GraphQL API aracılığıyla kolayca erişilebilir hale getirmek için araçlar sunar. [Graph Düğümü](https://github.com/graphprotocol/graph-node) artık NEAR olaylarını işleyebiliyor, bu da NEAR geliştiricilerinin artık akıllı sözleşmelerini indekslemek için subgraphlar oluşturabilecekleri anlamına geliyor. -Subgraphs are event-based, which means that they listen for and then process on-chain events. There are currently two types of handlers supported for NEAR subgraphs: +Subgraphlar olay tabanlıdır, yani zincir üzerindeki olayların etkinliklerini gözler ve ardından işlerler. Şu anda NEAR subgraphları için desteklenen iki tür işleyici vardır: -- Block handlers: these are run on every new block -- Receipt handlers: run every time a message is executed at a specified account +- Blok işleyicileri: Bunlar her yeni blokta çalışır +- Makbuz işleyicileri: Belirli bir hesapta her mesaj yürütüldüğünde çalışır -[From the NEAR documentation](https://docs.near.org/docs/concepts/transaction#receipt): +[NEAR dokümantasyonundan](https://docs.near.org/docs/concepts/transaction#receipt): -> A Receipt is the only actionable object in the system. When we talk about "processing a transaction" on the NEAR platform, this eventually means "applying receipts" at some point. +> Makbuz, sistemdeki eyleme geçirilebilir tek nesnedir. NEAR platformunda "bir işlemin işlenmesinden" bahsettiğimizde, bu nihayetinde bir noktada "makbuzların uygulanması" anlamına gelir. -## Building a NEAR Subgraph +## NEAR Subgraph'ı Oluşturma -`@graphprotocol/graph-cli` is a command-line tool for building and deploying subgraphs. +`@graphprotocol/graph-cli`, subgraphları oluşturmak ve dağıtmak için kullanılan bir komut satırı aracıdır. -`@graphprotocol/graph-ts` is a library of subgraph-specific types. +`@graphprotocol/graph-ts`, bir subgraph özel türler kütüphanesidir. -NEAR subgraph development requires `graph-cli` above version `0.23.0`, and `graph-ts` above version `0.23.0`. +NEAR subgraph'ı geliştirmek, `0.23.0` sürümünden yüksek `graph-cli` ve `0.23.0` sürümünden yüksek `graph-ts` gerektirir. -> Building a NEAR subgraph is very similar to building a subgraph that indexes Ethereum. +> Bir NEAR subgraph'ı oluşturmak, Ethereum'u indeksleyen bir subgraph oluşturmakla çok benzerdir. -There are three aspects of subgraph definition: +Subgraph tanımının üç yönü vardır: -**subgraph.yaml:** the subgraph manifest, defining the data sources of interest, and how they should be processed. NEAR is a new `kind` of data source. +**subgraph.yaml:** Veri kaynaklarını ve bunların nasıl işleneceğini tanımlayan subgraph manifestidir. NEAR, yeni bir veri kaynağı türüdür(`kind`). -**schema.graphql:** a schema file that defines what data is stored for your subgraph, and how to query it via GraphQL. The requirements for NEAR subgraphs are covered by [the existing documentation](/developing/creating-a-subgraph#the-graphql-schema). +**schema.graphql:** Subgraph'ınız için hangi verilerin depolandığını ve bunlara GraphQL aracılığıyla nasıl sorgu yapılacağını tanımlayan bir şema dosyası. NEAR subgraph gereksinimleri [mevcut belgelendirmede](/developing/creating-a-subgraph#the-graphql-schema) ele alınmıştır. -**AssemblyScript Mappings:** [AssemblyScript code](/developing/assemblyscript-api) that translates from the event data to the entities defined in your schema. NEAR support introduces NEAR-specific data types and new JSON parsing functionality. +**AssemblyScript Eşleştirmeleri:** Olay verilerini şemanızda tanımlanan varlıklara çeviren [AssemblyScript kodu](/developing/assemblyscript-api). NEAR desteği, NEAR'a özgü veri tiplerini ve yeni JSON ayrıştırma fonksiyonelliğini tanıtır. -During subgraph development there are two key commands: +Subgraph geliştirme sırasında iki temel komut bulunmaktadır: ```bash -$ graph codegen # generates types from the schema file identified in the manifest -$ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder +$ graph codegen # manifest'de tanımlanan şema dosyasından tipleri üretir +$ graph build # AssemblyScript dosyalarından Web Assembly oluşturur ve tüm subgraph dosyalarını bir /build klasöründe hazırlar ``` -### Subgraph Manifest Definition +### Subgraph Manifest Tanımı -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: +Subgraph manifesti (`subgraph.yaml`), subgraph için veri kaynaklarını, ilgili tetikleyicileri ve bu tetikleyicilere yanıt olarak çalıştırılması gereken fonksiyonları tanımlar. Bir NEAR subgraph'ı özelinde örnek bir subgraph manifesti için aşağıya bakınız: ```yaml specVersion: 0.0.2 schema: - file: ./src/schema.graphql # link to the schema file + file: ./src/schema.graphql # şema dosyasına bağlantı dataSources: - kind: near network: near-mainnet source: - account: app.good-morning.near # This data source will monitor this account - startBlock: 10662188 # Required for NEAR + account: app.good-morning.near # Bu veri kaynağı bu hesabı izleyecektir + startBlock: 10662188 # NEAR için gereklidir mapping: apiVersion: 0.0.5 language: wasm/assemblyscript blockHandlers: - - handler: handleNewBlock # the function name in the mapping file + - handler: handleNewBlock # eşleştirme dosyasındaki fonksiyon adı receiptHandlers: - - handler: handleReceipt # the function name in the mapping file - file: ./src/mapping.ts # link to the file with the Assemblyscript mappings + - handler: handleReceipt # eşleştirme dosyasındaki fonksiyon adı + file: ./src/mapping.ts # Assemblyscript eşleştirmelerinin bulunduğu dosyaya bağlantı ``` -- NEAR subgraphs introduce a new `kind` of data source (`near`) -- The `network` should correspond to a network on the hosting Graph Node. On the Hosted Service, NEAR's mainnet is `near-mainnet`, and NEAR's testnet is `near-testnet` -- NEAR data sources introduce an optional `source.account` field, which is a human-readable ID corresponding to a [NEAR account](https://docs.near.org/docs/concepts/account). This can be an account or a sub-account. -- NEAR data sources introduce an alternative optional `source.accounts` field, which contains optional suffixes and prefixes. At least prefix or suffix must be specified, they will match the any account starting or ending with the list of values respectively. The example below would match: `[app|good].*[morning.near|morning.testnet]`. If only a list of prefixes or suffixes is necessary the other field can be omitted. +- NEAR subgraphları yeni bir veri kaynağı türü(`kind`) olan `near`'ı sunar +- `network`, barındırılan Graph Düğümündeki bir ağa karşılık gelmelidir. Barındırılan Hizmette, NEAR'ın ana ağı `near-mainnet` ve NEAR'ın test ağı `near-testnet`'tir +- NEAR veri kaynakları, insan tarafından okunabilir bir [NEAR hesabına](https://docs.near.org/docs/concepts/account) ID olan opsiyonel `source.account` alanını sunar. Bu bir hesap veya alt hesap olabilir. +- NEAR veri kaynakları, opsiyonel son ekler ve ön ekler içeren alternatif bir opsiyonel `source.accounts` alanı sunar. En azından ön ek veya son ek belirtilmelidir, bunlar sırasıyla değer listesiyle başlayan veya biten herhangi bir hesapla eşleşecektir. Aşağıdaki örnek eşleşecektir: `[app|good].*[morning.near|morning.testnet]`. Yalnızca bir ön ek veya son ek listesi gerekliyse, diğer alan atlanabilir. ```yaml accounts: @@ -87,20 +87,20 @@ accounts: - morning.testnet ``` -NEAR data sources support two types of handlers: +NEAR veri kaynakları iki tür işleyiciyi destekler: -- `blockHandlers`: run on every new NEAR block. No `source.account` is required. -- `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/docs/concepts/account#subaccounts) must be added as independent data sources). +- `blockHandlers`: Her yeni NEAR bloğunda çalıştırılır. `source.account` gerekli değildir. +- `receiptHandlers`: Veri kaynağının `source.account`'unun alıcı olduğu her makbuz üzerinde çalıştırılır. Yalnızca tam eşleşmelerin işlendiğini unutmayın ([alt hesaplar](https://docs.near.org/docs/concepts/account#subaccounts) bağımsız veri kaynakları olarak eklenmelidir). -### Schema Definition +### Şema Tanımı -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). +Şema tanımı, ortaya çıkan subgraph veritabanının yapısını ve varlıklar arasındaki ilişkileri tanımlar. Bu, orijinal veri kaynağından bağımsızdır. Subgraph şema tanımı hakkında daha fazla ayrıntı [burada](/developing/creating-a-subgraph#the-graphql-schema) bulunmaktadır. -### AssemblyScript Mappings +### AssemblyScript Eşleştirmeleri -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Olayları işlemek için işleyiciler [AssemblyScript](https://www.assemblyscript.org/) içinde yazılmıştır. -NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/assemblyscript-api). +NEAR indeksleme,[AssemblyScript API](/developing/assemblyscript-api)'sine NEAR'a özgü veri tipleri ekler. ```typescript @@ -162,50 +162,50 @@ class ReceiptWithOutcome { } ``` -These types are passed to block & receipt handlers: +Bu türler blok & makbuz işleyicilerine aktarılır: -- Block handlers will receive a `Block` -- Receipt handlers will receive a `ReceiptWithOutcome` +- Blok işleyicileri bir `Block` alır +- Makbuz işleyicileri bir `ReceiptWithOutcome` alır -Otherwise, the rest of the [AssemblyScript API](/developing/assemblyscript-api) is available to NEAR subgraph developers during mapping execution. +Aksi takdirde, [AssemblyScript API](/developing/assemblyscript-api)'sinin geri kalanı eşleştirme yürütmesi sırasında NEAR subgraph geliştiricileri tarafından kullanılabilir. -This includes a new JSON parsing function - logs on NEAR are frequently emitted as stringified JSONs. A new `json.fromString(...)` function is available as part of the [JSON API](/developing/assemblyscript-api#json-api) to allow developers to easily process these logs. +Buna yeni bir JSON ayrıştırma fonksiyonuda dahildir - NEAR'daki kayıtlar sıklıkla dizilmiş JSON'lar olarak yayılır. Yeni bir `json.fromString(...)` fonksiyonu, geliştiricilerin bu kayıtları kolayca işlemesine olanak sağlamak için [JSON API](/developing/assemblyscript-api#json-api)'nin bir parçası olarak mevcuttur. -## Deploying a NEAR Subgraph +## NEAR Subgraph'ını Dağıtma -Once you have a built subgraph, it is time to deploy it to Graph Node for indexing. NEAR subgraphs can be deployed to any Graph Node `>=v0.26.x` (this version has not yet been tagged & released). +Bir subgraph oluşturduktan sonra, artık indeksleme için Graph Düğümü'ne dağıtma zamanı gelmiştir. NEAR subgraphları sürümü `>=v0.26.x` (bu sürüm henüz etiketlenmemiş & yayınlanmamıştır) olan herhangi bir Graph Düğümü'ne dağıtılabilir. -The Graph's Hosted Service currently supports indexing NEAR mainnet and testnet in beta, with the following network names: +Graph'ın Barındırılan Hizmeti şu anda NEAR ana ağı ve test ağı'nı beta olarak aşağıdaki ağ adlarıyla indekslemeyi desteklemektedir: - `near-mainnet` - `near-testnet` -More information on creating and deploying subgraphs on the Hosted Service can be found [here](/deploying/deploying-a-subgraph-to-hosted). +Barındırılan Hizmette subgraph oluşturma ve dağıtma hakkında daha fazla bilgiyi [burada](/deploying/deploying-a-subgraph-to-hosted) bulabilirsiniz. -As a quick primer - the first step is to "create" your subgraph - this only needs to be done once. On the Hosted Service, this can be done from [your Dashboard](https://thegraph.com/hosted-service/dashboard): "Add Subgraph". +Hızlı bir başlangıç olarak - ilk adım yalnızca bir kez yapılması geren, subgraph'ınızı "oluşturmaktır". Barındırılan Hizmette bu işlem [Kontrol Panelinizden](https://thegraph.com/hosted-service/dashboard) yapılabilir: "Subgraph Ekle". -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command: +Subgraph'ınız oluşturulduktan sonra, `graph deploy` CLI komutunu kullanarak subgraph'ınızı dağıtabilirsiniz: ```sh -$ graph create --node subgraph/name # creates a subgraph on a local Graph Node (on the Hosted Service, this is done via the UI) -$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # uploads the build files to a specified IPFS endpoint, and then deploys the subgraph to a specified Graph Node based on the manifest IPFS hash +$ graph create --node subgraph/name # yerel bir Graph Düğümü üzerinde bir subgraph oluşturur (Barındırılan Hizmet üzerinde bu işlem Kullanıcı Arayüzü üzerinden yapılır) +$ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # derleme dosyalarını belirtilen bir IPFS uç noktasına yükler ve ardından subgraph'ı manifest IPFS hash'ına göre belirtilen bir Graph Düğümüne dağıtır ``` -The node configuration will depend on where the subgraph is being deployed. +Düğüm yapılandırması, subgraph'ın nerede dağıtıldığına bağlı olacaktır. -### Hosted Service +### Barındırılan Hizmet ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token ``` -### Local Graph Node (based on default configuration) +### Yerel Graph Düğümü (varsayılan yapılandırmaya göre) ```sh graph deploy --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -Once your subgraph has been deployed, it will be indexed by Graph Node. You can check its progress by querying the subgraph itself: +Subgraph'ınız dağıtıldıktan sonra Graph Düğüme tarafından indekslenecektir. Subgraph'ın kendisini sorgulayarak ilerlemesini kontrol edebilirsiniz: ```graphql { @@ -217,45 +217,45 @@ Once your subgraph has been deployed, it will be indexed by Graph Node. You can } ``` -### Indexing NEAR with a Local Graph Node +### NEAR'ı Yerel Graph Düğümü ile İndeksleme -Running a Graph Node that indexes NEAR has the following operational requirements: +NEAR'ı indeksleyen bir Graph Düğümü çalıştırmanın aşağıdaki operasyonel gereksinimleri vardır: -- NEAR Indexer Framework with Firehose instrumentation -- NEAR Firehose Component(s) -- Graph Node with Firehose endpoint configured +- Firehose enstrümantasyonu ile NEAR İndeksleyici Çerçevesi +- NEAR Firehose Bileşen(ler)i +- Firehose uç noktası yapılandırılmış Graph Düğümü -We will provide more information on running the above components soon. +Yukarıdaki bileşenlerin çalıştırılması hakkında yakında daha fazla bilgi vereceğiz. -## Querying a NEAR Subgraph +## NEAR Subgraph'ını Sorgulama -The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. +NEAR subgraphları için GraphQL uç noktası, mevcut API arayüzü ile şema tanımı tarafından belirlenir. Daha fazla bilgi için lütfen [GraphQL API dökümantasyonunu](/querying/graphql-api) ziyaret edin. -## Example Subgraphs +## Örnek Subgraph'ler -Here are some example subgraphs for reference: +İşte referans için bazı örnek subgraph'ler: -[NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) +[NEAR Blokları](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) -[NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) +[NEAR Makbuzları](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## FAQ +## SSS -### How does the beta work? +### Beta nasıl çalışır? -NEAR support is in beta, which means that there may be changes to the API as we continue to work on improving the integration. Please email near@thegraph.com so that we can support you in building NEAR subgraphs, and keep you up to date on the latest developments! +NEAR desteği beta aşamasındadır, bu da entegrasyonu geliştirmek için çalışmaya devam ederken API'de değişiklikler olabileceği anlamına gelir. NEAR subgraphları oluştururken size destek olabilmemiz ve en son gelişmelerden sizi haberdar edebilmemiz için lütfen near@thegraph.com adresine e-posta gönderin! -### Can a subgraph index both NEAR and EVM chains? +### Bir subgraph hem NEAR hem de EVM zincirlerini indeksleyebilir mi? -No, a subgraph can only support data sources from one chain/network. +Hayır, bir subgraph yalnızca bir zincirden/ağdan veri kaynaklarını destekleyebilir. -### Can subgraphs react to more specific triggers? +### Subgraphlar daha spesifik tetikleyicilere tepki verebilir mi? -Currently, only Block and Receipt triggers are supported. We are investigating triggers for function calls to a specified account. We are also interested in supporting event triggers, once NEAR has native event support. +Şu anda yalnızca Blok ve Makbuz tetikleyicileri desteklenmektedir. Belirli bir hesaba yapılan fonksiyon çağrıları için tetikleyicileri araştırma aşamasındayız. NEAR yerel olay desteğine sahip oldu takdirde, olay tetikleyicilerini desteklemekle de ilgileneceğiz. -### Will receipt handlers trigger for accounts and their sub-accounts? +### Makbuz işleyicileri hesaplar ve bunların alt hesapları için tetiklenecek mi? -If an `account` is specified, that will only match the exact account name. It is possible to match sub-accounts by specifying an `accounts` field, with `suffixes` and `prefixes` specified to match accounts and sub-accounts, for example the following would match all `mintbase1.near` sub-accounts: +Bir `account` belirtilirse, bu yalnızca tam hesap adıyla eşleşecektir. Hesapları ve alt hesapları eşleştirmek için `suffixes` ve `prefixes` ile birlikte bir `accounts` alanı belirterek alt hesapları eşleştirmek mümkündür, örneğin aşağıdaki tüm `mintbase1.near` alt hesaplarıyla eşleşir: ```yaml accounts: @@ -263,22 +263,22 @@ accounts: - mintbase1.near ``` -### Can NEAR subgraphs make view calls to NEAR accounts during mappings? +### NEAR subgraphları eşleştirmeler sırasında NEAR hesaplarına görünüm çağrıları yapabilir mi? -This is not supported. We are evaluating whether this functionality is required for indexing. +Bu desteklenmemektedir. Bu fonksiyonelliğin indeksleme için gerekli olup olmadığını değerlendiriyoruz. -### Can I use data source templates in my NEAR subgraph? +### NEAR subgraph'ımda veri kaynağı şablonları kullanabilir miyim? -This is not currently supported. We are evaluating whether this functionality is required for indexing. +Bu şu anda desteklenmemektedir. Bu fonksiyonelliğin indeksleme için gerekli olup olmadığını değerlendiriyoruz. -### Ethereum subgraphs support "pending" and "current" versions, how can I deploy a "pending" version of a NEAR subgraph? +### Ethereum subgraphları "beklemedeki" ve "mevcut" sürümleri destekler, bir NEAR subgraph'ının "beklemedeki" sürümünü nasıl dağıtabilirim? -Pending functionality is not yet supported for NEAR subgraphs. In the interim, you can deploy a new version to a different "named" subgraph, and then when that is synced with the chain head, you can redeploy to your primary "named" subgraph, which will use the same underlying deployment ID, so the main subgraph will be instantly synced. +Bekleme fonksiyonelliği henüz NEAR subgraphları için desteklenmemektedir. Bu arada, farklı "adlandırılmış" bir subgraph'a yeni bir sürüm dağıtabilir ve daha sonra bu zincir başı ile senkronize edildiğinde, aynı temel dağıtım ID'sini kullanacak olan birincil "adlandırılmış" subgraph'ınıza yeniden dağıtabilirsiniz. Böylece ana subgraph anında senkronize olur. -### My question hasn't been answered, where can I get more help building NEAR subgraphs? +### Sorum yanıtlanmadı, NEAR subgraphları oluşturma konusunda nereden daha fazla yardım alabilirim? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +Subgraph geliştirme hakkında genel bir soruysa, [Geliştirici dökümantasyonu'nun](/quick-start) geri kalanında çok daha fazla bilgi bulunmaktadır. Aksi durumda lütfen [The Graph Protocol Discord](https://discord.gg/graphprotocol) sunucusuna katılın ve #near kanalında sorunuzu sorun veya near@thegraph.com adresine e-posta gönderin. -## References +## Referanslar -- [NEAR developer documentation](https://docs.near.org/docs/develop/basics/getting-started) +- [NEAR geliştirici dökümantasyonu](https://docs.near.org/docs/develop/basics/getting-started) diff --git a/website/pages/tr/cookbook/subgraph-debug-forking.mdx b/website/pages/tr/cookbook/subgraph-debug-forking.mdx index 7ac3bf96ca10..5cab228a3c86 100644 --- a/website/pages/tr/cookbook/subgraph-debug-forking.mdx +++ b/website/pages/tr/cookbook/subgraph-debug-forking.mdx @@ -1,26 +1,26 @@ --- -title: Quick and Easy Subgraph Debugging Using Forks +title: Fork Kullanarak Hızlı ve Kolay Subgraph Hata Ayıklama --- -As with many systems processing large amounts of data, The Graph's Indexers (Graph nodes) may take quite some time to sync-up your subgraph with the target blockchain. The discrepancy between quick changes with the purpose of debugging and long wait times needed for indexing is extremely counterproductive and we are well aware of that. This is why we are introducing **subgraph forking**, developed by [LimeChain](https://limechain.tech/), and in this article I will show you how this feature can be used to substantially speed-up subgraph debugging! +Büyük miktarda veriyi işleyen birçok sistemde olduğu gibi, Graph'ın İndeksleyicilerinin (Graph düğümleri) hedef blok zinciri ile subgraphınızı senkronize etmesi oldukça uzun süreler gerektirebilir. Hata ayıklama amaçlı hızlı değişiklikler için gereken kısa bekleme sürelerine karşın indeksleme için gereken uzun bekleme süreleri arasındaki fark son derece verimsiz ve bunun farkındayız. Bu nedenle, [LimeChain](https://limechain.tech/) tarafından geliştirilen **subgraph forklamayı** tanıtıyoruz ve bu makaledeseiz bu özelliğin subgraph hata ayıklamasını önemli ölçüde hızlandırmak için nasıl kullanılabileceğini göstereceğim! -## Ok, what is it? +## Peki, nedir bu Subgraph Forklama? -**Subgraph forking** is the process of lazily fetching entities from _another_ subgraph's store (usually a remote one). +**Subgraph forklama**, _başka_ bir subgraph'ın deposundan(genellikle uzaktaki birinden) unsurları yavaş bir şekilde getirme işlemidir. -In the context of debugging, **subgraph forking** allows you to debug your failed subgraph at block _X_ without needing to wait to sync-up to block _X_. +Hata ayıklama bağlamında, **subgraph forklama**, _X_ bloğunda başarısız olan subgraph'ınızda yine aynı _X_ bloğunun senkronize olmasını beklemeksizin hata ayıklamanıza olanak tanır. -## What?! How? +## Ne?! Nasıl? -When you deploy a subgraph to a remote Graph node for indexing and it fails at block _X_, the good news is that the Graph node will still serve GraphQL queries using its store, which is synced-up to block _X_. That's great! This means we can take advantage of this "up-to-date" store to fix the bugs arising when indexing block _X_. +İndekslemek için bir subgraph'ı uzak bir Graph düğümüne dağıttığınızda ve bu, _X_ bloğunda başarısız olduğunda, iyi haber şu ki Graph düğümü _X_ bloğuna kadar senkronize edilmiş deposunu kullanarak GraphQL sorgularını sunmaya devam edecektir. Bu harika! Bu, _X_ bloğunu indeksleme sırasında ortaya çıkan hataları düzeltmek için "güncel" depodan yararlanabileceğimiz anlamına geliyor. -In a nutshell, we are going to _fork the failing subgraph_ from a remote Graph node that is guaranteed to have the subgraph indexed up to block _X_ in order to provide the locally deployed subgraph being debugged at block _X_ an up-to-date view of the indexing state. +Kısacası, _başarısız bir subgraph'ı_ _X_ bloğuna kadar indekslendiği garanti edilen başka uzaktaki bir Graph düğümünden, _X_ bloğunda hata ayıklanan yerel olarak dağıtılan subgraph'ın indeksleme durumundaki güncel bir görünümünü sağlamak adına _forklayacağız_. -## Please, show me some code! +## Lütfen bana biraz kod göster! -To stay focused on subgraph debugging, let's keep things simple and run along with the [example-subgraph](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) indexing the Ethereum Gravity smart contract. +Subgraph hata ayıklamasında konsantrasyonu bozmamak adına işleri basit tutalım ve Ethereum Gravity akıllı sözleşmesini indeksleyen [subgraph örneği](https://github.com/graphprotocol/graph-tooling/tree/main/examples/ethereum-gravatar) ile ilerleyelim. -Here are the handlers defined for indexing `Gravatar`s, with no bugs whatsoever: +Burada hiç hata olmadan `Gravatar`ları indekslemek için tanımlanan işleyiciler şunlardır: ```tsx export function handleNewGravatar(event: NewGravatar): void { @@ -44,43 +44,43 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -Oops, how unfortunate, when I deploy my perfect looking subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) it fails with the _"Gravatar not found!"_ error. +Ah, ne şanssızlık, mükemmel görünen subgraph'ımı [Barındırılan Hizmete](https://thegraph.com/hosted-service/) dağıttığımda _"Gravatar not found!"_ hatasıyla karşılaştım. -The usual way to attempt a fix is: +Genellikle düzeltmeyi denemek için yol şudur: -1. Make a change in the mappings source, which you believe will solve the issue (while I know it won't). -2. Re-deploy the subgraph to the [Hosted Service](https://thegraph.com/hosted-service/) (or another remote Graph node). -3. Wait for it to sync-up. -4. If it breaks again go back to 1, otherwise: Hooray! +1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın (ama ben çözmeyeceğini biliyorum). +2. Subgraph'ı [Barındırılan Hizmet'e](https://thegraph.com/hosted-service/) tekrar dağıtın (yada başka uzaktaki Graph Düğümüne). +3. Senkronize olması için bekleyin. +4. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! -It is indeed pretty familiar to an ordinary debug process, but there is one step that horribly slows down the process: _3. Wait for it to sync-up._ +Bu, sıradan bir hata ayıklama işlemine gerçekten oldukça benzerdir, fakat işlemi korkunç şekilde yavaşlatan bir adım vardır: _3. Senkronize olması için bekleyin._ -Using **subgraph forking** we can essentially eliminate this step. Here is how it looks: +Aslında **subgraph forklama** kullanarak bu adımı ortadan kaldırabiliriz. Nasıl göründüğüne bakalım: -0. Spin-up a local Graph node with the **_appropriate fork-base_** set. -1. Make a change in the mappings source, which you believe will solve the issue. -2. Deploy to the local Graph node, **_forking the failing subgraph_** and **_starting from the problematic block_**. -3. If it breaks again, go back to 1, otherwise: Hooray! +0. **_Uygun fork temelli _** küme ile yerel bir Graph düğümünü başlatın. +1. Eşleştirme kaynağında, sorunu çözeceğine inandığınız bir değişiklik yapın. +2. **_Başarısız subgraph'ı forklayarak_** ve **_sorunlu bloktan başlayarak_** yerel Graph düğümüne dağıtın. +3. Tekrar sorunla karşılaşırsanız 1. aşamaya geri dönün, aksi takdirde: Yaşasın! -Now, you may have 2 questions: +Şimdi, 2 sorunuz olabilir: -1. fork-base what??? -2. Forking who?! +1. fork temelli ne??? +2. Kimi forkluyoruz?! -And I answer: +Ve ben cevap veriyorum: -1. `fork-base` is the "base" URL, such that when the _subgraph id_ is appended the resulting URL (`/`) is a valid GraphQL endpoint for the subgraph's store. -2. Forking is easy, no need to sweat: +1. `fork temelli (fork-base)` subgraph deposu için geçerli bir GraphQL uç noktası oluşturacak şekilde _subgraph kimliği(id)_ eklendiğinde oluşan URL'ye (`/`) eklenen "temel" bir URL'dir. +2. Forklama kolay, ter dökmeye gerek yok: ```bash $ graph deploy --debug-fork --ipfs http://localhost:5001 --node http://localhost:8020 ``` -Also, don't forget to set the `dataSources.source.startBlock` field in the subgraph manifest to the number of the problematic block, so you can skip indexing unnecessary blocks and take advantage of the fork! +Ayrıca, subgraph manifestindeki `dataSources.source.startBlock` alanını sorunlu bloğun numarasına ayarlamayı unutmayın, böylece gereksiz blokları indekslemeyi geçebilir ve forklamanın avantajından yararlanabilirsiniz! -So, here is what I do: +İşte benim ne yaptığım: -0. I spin-up a local graph node ([here is how to do it](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)) with the `fork-base` option set to: `https://api.thegraph.com/subgraphs/id/`, since I will fork a subgraph, the buggy one I deployed earlier, from the [HostedService](https://thegraph.com/hosted-service/). +0. [Barındırılan Hizmette](https://thegraph.com/hosted-service/) daha önceden dağıttığım hatalı olan bir subgraph'ı forklayacağım için `fork temelli` seçeneği `https://api.thegraph.com/subgraphs/id/` şeklinde ayarlanmış olan yerel bir graph düğümü başlatıyorum ([nasıl yapılacağına dair bilgiyi buradan bulabilirsiniz](https://github.com/graphprotocol/graph-node#running-a-local-graph-node)). ``` $ cargo run -p graph-node --release -- \ @@ -90,13 +90,13 @@ $ cargo run -p graph-node --release -- \ --fork-base https://api.thegraph.com/subgraphs/id/ ``` -1. After careful inspection I notice that there is a mismatch in the `id` representations used when indexing `Gravatar`s in my two handlers. While `handleNewGravatar` converts it to a hex (`event.params.id.toHex()`), `handleUpdatedGravatar` uses an int32 (`event.params.id.toI32()`) which causes the `handleUpdatedGravatar` to panic with "Gravatar not found!". I make them both convert the `id` to a hex. -2. After I made the changes I deploy my subgraph to the local Graph node, **_forking the failing subgraph_** and setting `dataSources.source.startBlock` to `6190343` in `subgraph.yaml`: +1. Dikkatli bir inceleme sonrasında, iki işleyicimdeki `Gravatar`ları indekslerken kullanılan `kimlik(id)` temsillerinde bir uyuşmazlık oldupunu fark ettim. `handleNewGravatar` onu bir 16'lık sisteme (`event.params.id.toHex()`) dönüştürürken, `handleUpdatedGravatar`, `handleUpdatedGravatar`'ın "Gravatar not found!" hatası vermesine neden olan bir int32 (`event.params.id.toI32()`) kullanır. Her ikisinde de `kimliğin` 16'lık sisteme dönüştürülmesini sağlarım. +2. Değişiklikleri yaptıktan sonra **_başarısız olan subgraph'ı forklayarak_** ve `subgraph.yaml`'da `dataSources.source.startBlock`'u `6190343` olarak ayarlayarak subgraph'ımı yerel Graph düğümüne dağıtırım: ```bash $ graph deploy gravity --debug-fork QmNp169tKvomnH3cPXTfGg4ZEhAHA6kEq5oy1XDqAxqHmW --ipfs http://localhost:5001 --node http://localhost:8020 ``` -3. I inspect the logs produced by the local Graph node and, Hooray!, everything seems to be working. -4. I deploy my now bug-free subgraph to a remote Graph node and live happily ever after! (no potatoes tho) -5. The end... +3. Yerel Graph düğümü tarafından sağlanan kayıtları incelerim ve Yaşasın! Her şey sorunsuz çalışıyor gibi. +4. Artık hatasız subgraph'ımı uzaktaki bir Graph düğümüne dağıtıyorum ve bundan sonra mutlu bir şekilde yaşamaya devam edeceğim! (patates olmadan) +5. Son... diff --git a/website/pages/tr/cookbook/subgraph-uncrashable.mdx b/website/pages/tr/cookbook/subgraph-uncrashable.mdx index 989310a3f9a0..015a2720bb6a 100644 --- a/website/pages/tr/cookbook/subgraph-uncrashable.mdx +++ b/website/pages/tr/cookbook/subgraph-uncrashable.mdx @@ -1,29 +1,29 @@ --- -title: Safe Subgraph Code Generator +title: Güvenli Subgraph Kod Oluşturucu --- -[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/) is a code generation tool that generates a set of helper functions from the graphql schema of a project. It ensures that all interactions with entities in your subgraph are completely safe and consistent. +[Subgraph Uncrashable](https://float-capital.github.io/float-subgraph-uncrashable/), bir proje graphql şemasından yardımcı fonksiyonlar kümesi oluşturan bir kod oluşturma aracıdır. Subgraphlarınızdaki unsurlarla olan tüm etkileşimlerin tamamen güvenli ve tutarlı olmasını temin eder. -## Why integrate with Subgraph Uncrashable? +## Neden Subgraph Uncrashable'ı entegre etmelisiniz? -- **Continuous Uptime**. Mishandled entities may cause subgraphs to crash, which can be disruptive for projects that are dependent on The Graph. Set up helper functions to make your subgraphs “uncrashable” and ensure business continuity. +- **Devamlı Çalışma Süresi**. Yanlış işlenen unsurlar subgraphlar'ın çökmesine neden olabilir ve bu da Graph'a bağlı projeler için işleri aksatabilir. Subgraphlar'ınızı "çökmez" hale getirmek ve işinizin devamlılığını sağlamak için yardımcı fonksiyonları kurun. -- **Completely Safe**. Common problems seen in subgraph development are issues of loading undefined entities, not setting or initializing all values of entities, and race conditions on loading and saving entities. Ensure all interactions with entities are completely atomic. +- **Tamamen Güvenli**. Subgraph geliştirme aşamasında sık görülen sorunlar, tanımsız unsurların yüklenmesi, tüm unsur değerlerinin ayarlanmaması veya başlatılmaması ve unsurları yüklerken ve kaydederken yarış koşullarıdır. Unsurlarla olan etkileşimlerin tamamının tamamen atomik(atomic) olduğundan emin olun. -- **User Configurable** Set default values and configure the level of security checks that suits your individual project's needs. Warning logs are recorded indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. +- **Kullanıcı Ayarlı** Varsayılan değerleri ayarlayın ve proje ihtiyaçlarınıza uygun olarak güvenlik kontrolleri düzeyini yapılandırın. Veri doğruluğunu sağlamak için Subgraph mantığında ihlal gerçekleştiğinde uyarı kayıtları kaydedilir ve sorunu düzeltmek için kullanılabilir. -**Key Features** +**Ana Özellikler** -- The code generation tool accommodates **all** subgraph types and is configurable for users to set sane defaults on values. The code generation will use this config to generate helper functions that are to the users specification. +- Kod oluşturma aracı **tüm** subgraph türlerine uygun ve kullanıcıların makul varsayılan değerler ayarlamasına izin verir. Kod oluşturma, bu yapılandırmayı kullanarak kullanıcıların belirlediği özelliklere göre yardımcı fonksiyonlar oluşturacaktır. -- The framework also includes a way (via the config file) to create custom, but safe, setter functions for groups of entity variables. This way it is impossible for the user to load/use a stale graph entity and it is also impossible to forget to save or set a variable that is required by the function. +- Framework ayrıca unsur değişkenleri grupları için özel, ancak güvenli ayarlayıcı fonksiyonları oluşturmanın bir yolunu (yapılandırma dosyası aracılığıyla) içerir. Bu sayede, kullanıcının eski bir graph unsurunu yüklemesi/kullanması ve ayrıca fonksiyonun gerektirdiği bir değişkeni kaydetmeyi veya ayarlamayı unutması imkansız hale gelir. -- Warning logs are recorded as logs indicating where there is a breach of subgraph logic to help patch the issue to ensure data accuracy. These logs can be viewed in the The Graph's hosted service under the 'Logs' section. +- Uyarı kayıtları, subgraph mantığında bir ihlal olduğunda veri doğruluğunu sağlamak amacıyla sorunu düzeltmek için kullanılabilecek kayıtlar olarak kaydedilir. Bu kayıtlar, Graph'ın barındırılan hizmetinde 'Kayıtlar' bölümünde görüntülenebilir. -Subgraph Uncrashable can be run as an optional flag using the Graph CLI codegen command. +Subgraph Uncrashable, Graph CLI codegen komutu kullanılarak isteğe bağlı bir bayrak olarak çalıştırılabilir. ```sh graph codegen -u [options] [] ``` -Visit the [subgraph uncrashable documentation](https://float-capital.github.io/float-subgraph-uncrashable/docs/) or watch this [video tutorial](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) to learn more and to get started with developing safer subgraphs. +Daha fazla bilgi edinmek ve daha güvenli subgraphlar geliştirmeye başlamak için [subgraph uncrashable dökümanını](https://float-capital.github.io/float-subgraph-uncrashable/docs/) ziyaret edebilir veya bu [öğretici videoyu](https://float-capital.github.io/float-subgraph-uncrashable/docs/tutorial) izleyebilirsiniz. diff --git a/website/pages/tr/cookbook/substreams-powered-subgraphs.mdx b/website/pages/tr/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..bd2911674994 100644 --- a/website/pages/tr/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/tr/cookbook/substreams-powered-subgraphs.mdx @@ -1,30 +1,30 @@ --- -title: Substreams-powered subgraphs +title: Substreams destekli subgraphlar --- -[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. +[Substreams](/substreams), Graph Ağı için StreamingFast tarafından geliştirilen bir blok zinciri verileri işleme çerçevesidir. Bir substreams modülü, subgraph varlıklarıyla uyumlu olan varlık değişiklikleri çıktısı verebilir. Bir subgraph, böyle bir Substreams modülünü veri kaynağı olarak kullanabilir ve Substreams'in indeksleme hızını ve ek verilerini subgraph geliştiricilere kazandırabilir. -## Requirements +## Gereksinimler -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +Bu cookbook, [yarn](https://yarnpkg.com/), [yerel Substreams geliştirme için gerekli bağımlılıklar](https://substreams.streamingfast.io/developers-guide/installation-requirements) ve Graph CLI'nin en son sürümünü (>=0.52.0) gerektirir: ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## Cookbook'u edinin -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> Bu rehber, bu [Substreams destekli subgraph'ı referans](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph) olarak kullanmaktadır. ``` graph init --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## Bir Substreams paketi tanımlama -A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. +Bir Substreams paketi tiplerden ([Protocol Buffers](https://protobuf.dev/) olarak tanımlanmış olanlar), modüllerden (Rust dilinde yazılmış), tiplere referans veren ve modüllerin nasıl tetikleneceğini belirten bir `substreams.yaml` dosyasından oluşur. [Substreams geliştirme hakkında daha fazla bilgi edinmek için Substreams dökümantasyonunu ziyaret edin](/substreams) ve daha fazla örnek için [awesome-substreams](https://github.com/pinax-network/awesome-substreams) ve [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) sayfalarına göz atın. -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): +Bahsi geçen Substreams paketi, Ethereum Ana Ağı'nda kontrat dağıtımlarını algılar ve yeni oluşturulan kontratlar için oluşturma bloğunu ve zaman damgasını takip eder. Bunun için `/proto/example.proto` içinde buna özel bir `Contract` türü bulunmaktadır ([Protokol Buffers tanımlama hakkında daha fazla bilgi edinin](https://protobuf.dev/programming-guides/proto3/#simple)): ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: +Substreams paketinin temel mantığı, her bloğu işleyen, Create çağrıları için bir filtreleme yapan ve geri dönüş olarak `Contracts` verisini döndüren `lib.rs` içindeki bulunan `map_contract` modülüdür: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. +> `substreams_entity_change` paketi ayrıca varlık değişiklikleri oluşturmak için özel bir `Tables` işlevine sahiptir ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). Oluşturulan Varlık Değişiklikleri, buna karşılık gelen subgraph'ın `subgraph.graphql` dosyasında tanımlanan `schema.graphql` varlıklarıyla uyumlu olmalıdır. ``` #[substreams::handlers::map] @@ -88,7 +88,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: +Bu Substreams paketini bir subgraph tarafından kullanılmak üzere hazırlamak için aşağıdaki komutları çalıştırmanız gerekir: ```bash yarn substreams:protogen # generates types in /src/pb @@ -147,19 +147,19 @@ yarn substreams:package # packages the substreams in a .spkg file # alternatively, yarn substreams:prepare calls all of the above commands ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> Substreams komutlarının temelini anlamak isterseniz, bu komutlar, `package.json` dosyasında tanımlanmıştır -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +Bu,`substreams.yaml` dosyasındaki paket adı ve sürümden hareketle bir `spkg` dosyası oluşturur. `spkg` dosyası, Graph Düğümü'nün bu Substreams paketini alabilmesi için gereken tüm bilgilere sahiptir. -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. +> Substreams paketini güncellerseniz, yaptığınız değişikliklere bağlı olarak `spkg`'yı güncel tutmak için yukarıdaki komutların bazılarını veya tümünü çalıştırmanız gerekebilir. -## Defining a Substreams-powered subgraph +## Substreams Destekli Bir Subgraph Tanımlama -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. +Substreams destekli subgraphlar yeni bir veri kaynağı türü('kind') olan "substreams"'i sunar. Bu tür subgraphlar yalnızca bir veri kaynağına sahip olabilir. -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +Bu veri kaynağı indekslenen ağı, Substreams paketi (`spkg`) olarak ilgili bir dosya konumu ve Substreams paketinin subgraph uyumlu varlık değişiklikleri üreten modülü belirtmelidir (bu durumda yukarıdaki Substreams paketinden `map_entity_changes`). Eşleştirme belirtilmiştir, fakat yalnızca eşleştirme türünü ("substreams/graph-entities") ve apiVersion'ı tanımlar. -> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). +> Şu anda Subgraph Stüdyo ve Graph Ağı, `mainnet`i (Ethereum Ana Ağı) indeksleyen Substreams destekli subgraphları desteklemektedir. ```yaml specVersion: 0.0.4 @@ -180,7 +180,7 @@ dataSources: apiVersion: 0.0.5 ``` -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. +`subgraph.yaml` dosyası ayrıca bir şema dosyasına referans verir. Bu dosyanın gereksinimleri değişmemiş olmasına rağmen, `subgraph.yaml` içinde referans verilen Substreams modülü tarafından üretilen varlık değişiklikleriyle uyumlu olmalıdır. ```graphql type Contract @entity { @@ -194,9 +194,9 @@ type Contract @entity { } ``` -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. +Yukarıdakiler verildiğinde, subgraph geliştiricileri Graph CLI kullanarak bu Substreams destekli subgraph'ı dağıtabilir. -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). +> Ethereum Ana Ağını indeksleyen Substreams destekli subgraphlar, [Subgraph Stüdyo'ya](https://thegraph.com/studio/) dağıtılabilir. ```bash yarn install # install graph-cli @@ -204,11 +204,11 @@ yarn subgraph:build # build the subgraph yarn subgraph:deploy # deploy the subgraph ``` -That's it! You have built and deployed a Substreams-powered subgraph. +Bu kadar! Bir Substreams destekli subgraph oluşturup dağıttınız. -## Serving Substreams-powered subgraphs +## Substreams destekli subgraphlar'ın sunulması -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: +Substreams destekli subgraphlar'ı sunabilmek için Graph Düğümü'nün ilgili ağ için bir Substreams sağlayıcısı ve zincir başını takip etmek için bir Firehose veya RPC yapılandırılması gerekmektedir. Bu sağlayıcılar, bir `config.toml` dosyası aracılığıyla yapılandırılabilir: ```toml [chains.mainnet] diff --git a/website/pages/tr/cookbook/upgrading-a-subgraph.mdx b/website/pages/tr/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..2782090d89c5 100644 --- a/website/pages/tr/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/tr/cookbook/upgrading-a-subgraph.mdx @@ -1,25 +1,25 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: Mevcut Bir Subgraph'ı Graph Ağına Yükseltme --- -## Introduction +## Giriş -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +Bu, subgraph'ınızı barındırılan hizmetten Graph'ın merkeziyetsiz ağına nasıl yükselteceğinize yönelik bir rehberdirr. Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido ve daha birçok proje dahil olmak üzere 1.000'den fazla subgraph başarıyla Graph Ağı'na yükseltildi! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +Yükseltme işlemi hızlıdır ve subgraphlar'ınız yalnızca Graph Ağı'nda elde edebileceğiniz güvenilirlik ve performanstan sonsuza kadar yararlanacaktır. -### Prerequisites +### Ön Koşullar -- You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. -- You have a wallet with ETH to publish your subgraph on-chain. -- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. +- Barındırılan hizmet üzerinde zaten bir subgraph dağıttınız. +- The subgraph is indexing a chain available on The Graph Network. +- Subgraph'ınızı zincir üzerinde yayınlamak için ETH içeren bir cüzdanınız var. +- İndeksleyicilerin indekslemeye başlayabilmesi için subgraph'ınızı düzenlemek üzere ~10.000 GRT'ye sahipsiniz. -## Upgrading an Existing Subgraph to The Graph Network +## Mevcut Bir Subgraph'ı Graph Ağına Yükseltme -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> Subgraph'ınız için özel komutları [Subgraph Stüdyo'da](https://thegraph.com/studio/) bulabilirsiniz. -1. Get the latest version of the graph-cli installed: +1. Graph-cli'nin en son sürümünü yükleyin: ```sh npm install -g @graphprotocol/graph-cli @@ -29,29 +29,29 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +subgraph.yaml dosyasındaki `apiVersion` sürümünüzün `0.0.5` veya daha yüksek olduğundan emin olun. -2. Inside the subgraph's main project repository, authenticate the subgraph to deploy and build on the studio: +2. Subgraph'ın ana proje deposunun içinde, stüdyoda dağıtmak ve derlemek için subgraph'ın doğrulamasını yapın: ```sh graph auth --studio ``` -3. Generate files and build the subgraph: +3. Dosyaları ve subgraph'ı oluşturun: ```sh graph codegen && graph build ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +Subgraph'ınızda oluşturma hataları varsa, [AssemblyScript Geçiş Kılavuzu](/release-notes/assemblyscript-migration-guide/)'na bakın. -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. Cüzdanınızla [Subgraph Stüdyo] \(https://thegraph.com/studio/) adresinde oturum açın ve subgraph'ı dağıtın. Subgraph'ınızın adını temel alan Stüdyo kullanıcı arayüzünde `` öğenizi bulabilirsiniz. ```sh graph deploy --studio ``` -5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): +5. Stüdyo'nun test alanında sorguları test edin. İşte [Sushi - Mainnet Exchange Subgraph'ı](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground) için bazı örnekler: ```sh { @@ -68,69 +68,69 @@ graph deploy --studio } ``` -6. At this point, your subgraph is now deployed on Subgraph Studio, but not yet published to the decentralized network. You can now test the subgraph to make sure it is working as intended using the temporary query URL as seen on top of the right column above. As this name already suggests, this is a temporary URL and should not be used in production. +6. Bu noktada, subgraph'ınız artık Subgraph Stüdyo'da dağıtılmıştır, fakat henüz merkeziyetsiz ağda yayınlanmamıştır. Artık yukarıdaki sağ sütunun üst kısmında görülen geçici sorgu URL'sini kullanarak amaçlandığı gibi çalıştığından emin olmak için subgraph'ı test edebilirsiniz. Bu addan da anlaşılacağı gibi, bu geçici bir URL'dir ve üretimde kullanılmamalıdır. -- Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- Güncelleme sadece mevcut subgraph'ınızın başka bir versiyonunu zincir üzerinde yayınlamaktır. +- Bunun bir maliyeti olduğundan, yayınlamadan önce "Geliştirme Sorgusu URL'sini" kullanarak Subgraph Stüdyo'da subgraph'ınızı dağıtmanız ve test etmeniz şiddetle tavsiye edilir. Örnek bir işlemi görün [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Fiyatlar kabaca 100 gwei'de 0,0425 ETH civarındadır. +- Subgraph'ınızı güncellemeniz gerektiğinde, sizden bir güncelleme ücreti alınacaktır. Bu bir maliyet oluşturduğundan, ana ağa dağıtmadan önce subgraph'ınızı Göerli'de dağıtmanız ve test etmeniz şiddetle tavsiye edilir. Bazı durumlarda, o subgraph'ta sinyal yoksa bir miktar GRT de gerektirebilir. Bu subgraph sürümünde sinyal/kürasyon olması durumunda (otomatik geçiş kullanılarak), vergiler bölünecektir. -7. Publish the subgraph on The Graph's decentralized network by hitting the "Publish" button. +7. "Yayınla" düğmesine basarak subgraph'ı Graph'ın merkeziyetsiz ağında yayınlayın. -You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. +İndeksleyiciler tarafından indekslendiğinden emin olmak için subgraph'ınızı GRT ile kürate etmelisiniz. Gaz maliyetlerinden tasarruf etmek için, subgraph'ınızı ağda yayınladığınız işlemle aynı işlemde kürate edebilirsiniz. Yüksek hizmet kalitesi için subgraph'ınızı en az 10.000 GRT ile kürate etmeniz önerilir. -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +İşte bu kadar! Yayınlamayı tamamladıktan sonra, subgraph'ınızı [Graph Gezgini] \(https://thegraph.com/explorer) aracılığıyla merkeziyetsiz ağ üzerinde canlı olarak görüntüleyebileceksiniz. -Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. +Küratörlere subgraph'ınızın sinyal vermeye hazır olduğunu bildirmek için Discord'daki [#Curators kanalından] \(https://discord.gg/s5HfGMXmbW) yararlanabilirsiniz. Beklenen sorgu hacminizi onlarla paylaşmanız da faydalı olacaktır. Böylece, subgraph'ınızda ne kadar GRT sinyali vermeleri gerektiğini kestirebilirler. -### Create an API key +### Bir API anahtarı oluşturun -You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). +Subgraph Stüdyo'da bir API anahtarı oluşturabilirsiniz [here](https://thegraph.com/studio/apikeys/). ![API key creation page](/img/api-image.png) -At the end of each week, an invoice will be generated based on the query fees that have been incurred during this period. This invoice will be paid automatically using the GRT available in your balance. Your balance will be updated after the cost of your query fees are withdrawn. Query fees are paid in GRT via the Arbitrum network. You will need to add GRT to the Arbitrum billing contract to enable your API key via the following steps: +Her haftanın sonunda, bu süre zarfında gerçekleşen sorgu ücretlerine dayalı bir fatura oluşturulacaktır. Bu fatura, bakiyenizde bulunan GRT kullanılarak otomatik olarak ödenecektir. Sorgu ücretlerinizin maliyeti çekildikten sonra bakiyeniz güncellenecektir. Sorgu ücretleri Arbitrum ağı üzerinden GRT olarak ödenir. API anahtarınızı etkinleştirmek için aşağıdaki adımları izleyerek GRT'yi Arbitrum faturalandırma sözleşmesine eklemeniz gerekmektedir: -- Purchase GRT on an exchange of your choice. -- Send the GRT to your wallet. -- On the Billing page in Studio, click on Add GRT. +- Seçtiğiniz bir borsadan GRT satın alın. +- GRT'yi cüzdanınıza gönderin. +- Stüdyo'daki Faturalandırma sayfasında GRT Ekle'ye tıklayın. ![Add GRT in billing](/img/Add-GRT-New-Page.png) -- Follow the steps to add your GRT to your billing balance. -- Your GRT will be automatically bridged to the Arbitrum network and added to your billing balance. +- GRT'nizi fatura bakiyenize eklemek için adımları izleyin. +- GRT'niz otomatik olarak Arbitrum ağına köprülenecek ve fatura bakiyenize eklenecektir. ![Billing pane](/img/New-Billing-Pane.png) -> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. +> Not: GRT'nin fatura bakiyenize eklenmesine ilişkin tüm talimatlar için [resmi faturalandırma sayfasına] \(../billing.mdx) bakın. -### Securing your API key +### API anahtarınızın güvenliğini sağlama -It is recommended that you secure the API by limiting its usage in two ways: +API'nin kullanımını iki şekilde sınırlandırarak güvenliğini sağlamanız önerilir: -1. Authorized Subgraphs -2. Authorized Domain +1. Yetkilendirilmiş Subgraphlar +2. Yetkilendirilmiş Domain -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +API anahtarınızı güvence altına alabilirsiniz [here](https://thegraph.com/studio/apikeys/test/). ![Subgraph lockdown page](/img/subgraph-lockdown.png) -### Querying your subgraph on the decentralized network +### Merkeziyetsiz ağ üzerinde subgraph'ınızı sorgulama -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +Artık ağdaki İndeksleyicilerin indeksleme durumunu Graph Gezgini'nde kontrol edebilirsiniz (örnek [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). Üstteki yeşil çizgi, gönderme sırasında 8 İndeksleyicinin bu subgraph'ı başarıyla indekslediğini göstermektedir. Ayrıca İndeksleyici sekmesinde hangi İndeksleyicilerin subgraph'ınızı aldığını görebilirsiniz. ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) -As soon as the first Indexer has fully indexed your subgraph you can start to query the subgraph on the decentralized network. In order to retrieve the query URL for your subgraph, you can copy/paste it by clicking on the symbol next to the query URL. You will see something like this: +İlk İndeksleyici subgraph'ınızı tam olarak indekslediğinde, subgraph'ı merkeziyetsiz ağda sorgulamaya başlayabilirsiniz. Subgraph'ınızın sorgu URL'sini almak için, sorgu URL'sinin yanındaki simgeye tıklayarak kopyalayıp yapıştırabilirsiniz. Şunun gibi bir şey göreceksiniz: `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. +Önemli: `[api-key]` yerine yukarıdaki bölümde oluşturulan gerçek API anahtarını kullandığınızdan emin olun. -You can now use that Query URL in your dapp to send your GraphQL requests to. +Artık GraphQL isteklerinizi göndermek için merkeziyetsiz uygulamanızda bu Sorgu URL'sini kullanabilirsiniz. -Congratulations! You are now a pioneer of decentralization! +Tebrikler! Artık merkeziyetsizliğin öncülerinden birisiniz! -> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: +> Not: Ağın dağıtılmış yapısı nedeniyle, farklı İndeksleyicilerin farklı blokları indekslemiş olması söz konusu olabilir. Yalnızca yeni verileri almak için, aşağıdaki örnekte gösterildiği gibi block: `{ number_gte: $minBlock }` alan bağımsız değişkeniyle sorgunuzu sunmak için bir İndeksleyicinin indeklemesi gereken minimum bloğu belirtebilirsiniz: ```graphql { @@ -140,86 +140,86 @@ Congratulations! You are now a pioneer of decentralization! } ``` -More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). +Ağın doğası ve yeniden düzenlemelerin nasıl ele alınacağı hakkında daha fazla bilgi [Dağıtılmış Sistemler] \(/querying/distributed-systems/) dokümantasyon makalesinde açıklanmaktadır. -## Updating a Subgraph on the Network +## Ağ Üzerinde Bir Subgraph'ın Güncellenmesi -If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. +Ağdaki mevcut bir subgraph'ı güncellemek isterseniz, bunu Graph CLI aracılığıyla subgraph'ınızın yeni bir sürümünü Subgraph Stüdyo'ya dağıtarak yapabilirsiniz. -1. Make changes to your current subgraph. A good idea is to test small fixes on the Subgraph Studio by publishing to Goerli. -2. Deploy the following and specify the new version in the command (eg. v0.0.1, v0.0.2, etc): +1. Mevcut subgraph'ınızda değişiklikler yapın. Küçük düzeltmeleri Göerli'de yayınlayarak Subgraph Stüdyo'da test etmek iyi bir fikir olabilir. +2. Aşağıdakileri dağıtın ve komutta yeni sürümü belirtin (örn. v0.0.1, v0.0.2, vb.): ```sh graph deploy --studio ``` -3. Test the new version in the Subgraph Studio by querying in the playground -4. Publish the new version on The Graph Network. Remember that this requires gas (as described in the section above). +3. Test alanında(playground) sorgulama yaparak Subgraph Stüdyo'da yeni sürümü test edin +4. Yeni sürümü Graph Ağı'nda yayınlayın. Bunun için gas gerektiğini unutmayınız (yukarıdaki bölümde açıklandığı gibi). -### Owner Update Fee: Deep Dive +### Sahip Güncelleme Ücreti: Derinlemesine İnceleme -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> Not: Arbitrum'da küratörlük bağlanma eğrileri kullanmaz. Arbitrum hakkında daha fazla bilgi edinin [here](/arbitrum/arbitrum-faq/). -An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). +Bir güncelleme GRT'nin subgraph eski versiyonundan yeni versiyonuna taşınmasını gerektirmektedir. Bu, her güncelleme için yeni bir bağlanma eğrisinin oluşturulacağı anlamına gelir (bağlanma eğrileri hakkında daha fazla bilgi [here](/network/curating#bonding-curve-101)). -The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. +Yeni bağlanma eğrisi, yeni versiyona taşınan tüm GRT'den %1 kürasyon vergisi almaktadır. Sahip bunun %50'sini veya %1,25'ini ödemek zorundadır. Diğer %1,25'lik kısım ise tüm küratörler tarafından ücret olarak karşılanır. Bu teşvik tasarımı, bir subgraph sahibinin tekrarlamalı güncelleme çağrılarıyla küratörün tüm fonlarını tüketmesini önlemek için uygulanmaktadır. Herhangi bir küratörlük faaliyeti yoksa, kendi subgraph'ınızı sinyallemek için en az 100 GRT ödemeniz gerekecektir. -Let's make an example, this is only the case if your subgraph is being actively curated on: +Bir örnek verelim, bu yalnızca subgraph'ınızda aktif olarak küratörlük yapılıyorsa geçerlidir: -- 100,000 GRT is signaled using auto-migrate on v1 of a subgraph -- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned -- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. +- 100.000 GRT, bir subgraph'ın birinci versiyonunda otomatik geçiş kullanılarak bildirilir +- Subgraph sahibi, ikinci versiyona güncelleme yapar. 100.000 GRT yeni bir bağlanma eğrisine taşınır, 97.500 GRT yeni eğriye yerleştirilir ve 2.500 GRT yakılır +- Sahip, daha sonra ücretin yarısını ödemek için 1250 GRT yakmış bulunmaktadır. Sahip, güncelleme öncesinde bunu cüzdanlarında bulundurmalıdır; aksi halde güncelleme başarılı olmayacaktır. Bu, güncelleme ile aynı işlemde gerçekleşir. -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_Bu mekanizma şu anda ağda yayında olsa da, topluluk şu anda subgraph geliştiricileri için güncelleme maliyetini azaltmanın yollarını tartışıyor._ -### Maintaining a Stable Version of a Subgraph +### Bir Subgraph'ın Kararlı Bir Sürümünü Koruma -If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. +Subgraph'ınızda çok fazla değişiklik yapıyorsanız, onu sürekli güncellemek ve güncelleme maliyetlerini karşılamak iyi bir fikir değildir. Subgraph'ınız istikrarlı ve tutarlı bir sürümünü korumak, yalnızca maliyet açısından değil, aynı zamanda İndeksleyicilerin senkronizasyon sürelerinden emin olabilmeleri için de kritik öneme sahiptir. İndeksleyicilerin senkronizasyon sürelerinin etkilenmemesi için bir güncelleme planladığınızda indeksleyiciler sinyallenmelidir. Subgraph'ınızı sürümlendirirken İndeksleyicileri bilgilendirmek için Discord'daki [#Indexers kanalını](https://discord.gg/JexvtHa7dq) kullanmaktan çekinmeyin. -Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. +Subgraphlar, harici geliştiricilerin yararlandığı açık API'lerdir. Açık API'lerin harici geliştiricilerin uygulamalarını bozmaması için katı standartlara uyması gerekmektedir. Graph Ağı'nda bir subgraph geliştiricisi, İndeksleyicileri, yeni bir subgraph'ı senkronize etmenin onlar için ne kadar sürdüğünü ve **aynı zamanda** subgraph'ı kullanan diğer geliştiricileri de göz önünde bulundurmalıdır. -### Updating the Metadata of a Subgraph +### Bir Subgraph'ın Üst Verisini Güncelleme -You can update the metadata of your subgraphs without having to publish a new version. The metadata includes the subgraph name, image, description, website URL, source code URL, and categories. Developers can do this by updating their subgraph details in the Subgraph Studio where you can edit all applicable fields. +Yeni bir sürüm yayınlamak zorunda kalmadan subgraphlar'ınızın üst verisini güncelleyebilirsiniz. Üst veri subgraph adını, görüntüsünü, açıklamasını, web site URL'sini, kaynak kodu URL'sini ve kategorileri içerir. Geliştiriciler bunu, geçerli tüm alanları düzenleyebilmenize olanak sağlayan Subgraph Stüdyo'da subgraph ayrıntılarını güncelleyerek yapabilirler. -Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +**Gezgin'de Subgraph Ayrıntılarını Güncelle** seçeneğinin işaretli olduğundan emin olun ve **Kaydet** seçeneğine tıklayın. Bunu işaretlediğiniz takdirde, yeni bir dağıtımla, yeni bir sürüm yayınlamak zorunda kalmadan Gezgin'deki subgraph ayrıntılarını güncelleyen bir zincir içi işlem oluşturulacaktır. -## Best Practices for Deploying a Subgraph to The Graph Network +## Bir Subgraph'ı Graph Ağına Dağıtmak için En İyi Uygulamalar -1. Leveraging an ENS name for Subgraph Development: +1. Subgraph Geliştirme için bir ENS adından yararlanma: -- Set up your ENS [here](https://app.ens.domains/) -- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). +- ENS'nizi oluşturun [here](https://app.ens.domains/) +- ENS adınızı ayarlarınıza ekleyin [here](https://thegraph.com/explorer/settings?view=display-name). -2. The more filled out your profiles are, the better the chances for your subgraphs to be indexed and curated. +2. Profilleriniz ne kadar dolu olursa, subgraphlar'ınızın indekslenme ve kürate edilme şansı o kadar artar. -## Deprecating a Subgraph on The Graph Network +## Graph Ağında Bir Subgraph'ın Kullanımdan Kaldırılması -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +Subgraph'ınızı kullanımdan kaldırmak ve Graph Ağı'ndan silmek için adımları izleyin [here](/managing/deprecating-a-subgraph). -## Querying a Subgraph + Billing on The Graph Network +## Bir Subgraph'ı Sorgulama + Graph Ağında Faturalama -The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. +Barındırılan hizmet, geliştiricilerin subgrpahlar'ını herhangi bir kısıtlama olmaksızın dağıtmalarına izin verecek şekilde oluşturulmuştur. -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +Graph Ağı'nın gerçekten merkeziyetsiz olması için, protokol teşviklerinin temel bir parçası olarak sorgu ücretlerinin ödenmesi gerekir. API'lere abone olma ve sorgu ücretlerini ödeme hakkında daha fazla bilgi için faturalandırma belgelerine göz atın [here](/billing/). -### Estimate Query Fees on the Network +### Ağdaki Sorgu Ücretlerini Tahmin Etme -While this is not a live feature in the product UI, you can set your maximum budget per query by taking the amount you're willing to pay per month and dividing it by your expected query volume. +Bu özellik şu anda ürün arayüzünde aktif olmasa da, her ay ödemek istediğiniz tutarı alıp beklenen sorgu hacminize bölerek sorgu başına maksimum bütçenizi ayarlayabilirsiniz. -While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. +Sorgu bütçenize siz karar verirken, bir İndeksleyicinin sorguları karar verdiğiniz fiyattan sunmaya istekli olacağının garantisi yoktur. Bir Ağ Geçidi sizi, ödemeye razı olduğunuz fiyattan veya daha düşük bir fiyattan sorgu sunmaya istekli bir İndeksleyici ile eşleştirebilirse, bütçenizin **ve** onların fiyatının deltasını/farkını ödersiniz. Sonuç olarak, daha düşük bir sorgu fiyatı, kullanabileceğiniz İndeksleyici havuzunu daraltır ve bu da aldığınız hizmetin kalitesini etkileyebilir. Subgraph'ınıza kürasyonu ve büyük isim İndeksleyicileri çekebileceğinden ötürü yüksek sorgu ücretlerinizin faydalıdır. -Remember that it's a dynamic and growing market, but how you interact with it is in your control. There is no maximum or minimum price specified in the protocol or the Gateways. For example, you can look at the price paid by a few of the dapps on the network (on a per-week basis), below. See the last column, which shows query fees in GRT. +Bunun dinamik ve büyüyen bir pazar olduğunu, fakat bununla nasıl etkileşim kuracağınızın sizin kontrolünüzde olduğunu unutmayın. Protokolde veya Ağ Geçitlerinde belirtilen maksimum veya minimum bir fiyat yoktur. Örneğin, ağdaki birkaç merkeziyetsiz uygulama tarafından ödenen ücrete (haftalık bazda) aşağıdan bakabilirsiniz. GRT cinsinden sorgu ücretlerini gösteren son sütuna bakın. ![QueryFee](/img/QueryFee.png) -## Additional Resources +## Ek Kaynaklar -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +Eğer hala kafanız karışıksa, endişelenmeyin! Aşağıdaki kaynaklara göz atın veya subgraphları merkeziyetsiz ağa yükseltme hakkındaki video kılavuzumuzu izleyin: -- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around - - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [Subgraph Studio documentation](/deploying/subgraph-studio) +- [Graph Ağı Kontratları](https://github.com/graphprotocol/contracts) +- [Kürasyon Sözleşmesi] \(https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - GNS'nin sarmaladığı temel sözleşme + - Adres - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` +- [Subgraph Stüdyo dökümantasyonu](/deploying/subgraph-studio) diff --git a/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx b/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx index 3e7fd9021562..a10ee956bc14 100644 --- a/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx +++ b/website/pages/tr/deploying/deploying-a-subgraph-to-hosted.mdx @@ -8,69 +8,69 @@ Bu sayfada barındırılan hizmete bir subgraph'in nasıl deploy edileceği aç ## Bir barındırılan hizmet hesabı oluşturun -Before using the Hosted Service, create an account in our Hosted Service. You will need a [Github](https://github.com/) account for that; if you don't have one, you need to create that first. Then, navigate to the [Hosted Service](https://thegraph.com/hosted-service/), click on the _'Sign up with Github'_ button, and complete Github's authorization flow. +Barındırılan hizmeti kullanmadan önce, barındırılan hizmetimizde bir hesap oluşturun. Bunun için bir [Github](https://github.com/) hesabına ihtiyacınız olacak; Eğer sahip değilseniz, önce onu oluşturmanız gerekir. Ardından, [Hosted Service](https://thegraph.com/hosted-service/)'e gidin, _'Github ile Kaydolun'_ düğmesine tıklayın ve Github'ın yetkilendirme akışını tamamlayın. -## Store the Access Token +## Erişim Belirtecinizi Saklayın -After creating an account, navigate to your [dashboard](https://thegraph.com/hosted-service/dashboard). Copy the access token displayed on the dashboard and run `graph auth --product hosted-service `. This will store the access token on your computer. You only need to do this once, or if you ever regenerate the access token. +Bir hesap oluşturduktan sonra, [dashboard](https://thegraph.com/hosted-service/dashboard)'unuza gidin. Dashboard üzerinde görüntülenen erişim belirtecini kopyalayın ve `graph auth --product hosted-service `'yi çalıştırın. Bu, erişim belirtecini bilgisayarınızda saklayacaktır. Bunu yalnızca bir kez veya erişim belirtecini yeniden oluşturursanız yapmanız gerekir. -## Create a Subgraph on the Hosted Service +## Barındırılan Hizmet Üzerinde Subgraph Oluşturun -Before deploying the subgraph, you need to create it in The Graph Explorer. Go to the [dashboard](https://thegraph.com/hosted-service/dashboard) and click on the _'Add Subgraph'_ button and fill in the information below as appropriate: +Subgraph deploy etmeden önce, onu Graph Explorer'da oluşturmanız gerekir. [dashboard](https://thegraph.com/hosted-service/dashboard)'e gidin ve _'Subgraph Ekle'_ düğmesini tıklayın ve aşağıdaki bilgileri uygun şekilde doldurun: -**Image** - Select an image to be used as a preview image and thumbnail for the subgraph. +**Resim** - Subgraph için önizleme resmi ve küçük resmi olarak kullanılacak bir resim seçin. -**Subgraph Name** - Together with the account name that the subgraph is created under, this will also define the `account-name/subgraph-name`-style name used for deployments and GraphQL endpoints. _This field cannot be changed later._ +**Subgraph Adı** - Subgraph'in altında oluşturulduğu hesap adıyla birlikte bu, dağıtımlar ve GraphQL uç noktaları için kullanılan `account-name/subgraph-name` stili adı da tanımlar. _Bu alan daha sonra değiştirilemez._ -**Account** - The account that the subgraph is created under. This can be the account of an individual or organization. _Subgraphs cannot be moved between accounts later._ +**Hesap** - Subgraph'in altında oluşturulduğu hesap. Bu, bir kişinin veya kuruluşun hesabı olabilir. _Subgraph'ler daha sonra hesaplar arasında taşınamaz._ -**Subtitle** - Text that will appear in subgraph cards. +**Altyazı** -Subgraph kartlarında görünecek metin. -**Description** - Description of the subgraph, visible on the subgraph details page. +**Açıklama** - Subgraph'in açıklaması, subgraph ayrıntıları sayfasında görünür. -**GitHub URL** - Link to the subgraph repository on GitHub. +**GitHub URL** - GitHub'daki subgraph deposuna bağlantı. -**Hide** - Switching this on hides the subgraph in the Graph Explorer. +**Gizle** - Bunu açmak, Graph Gezgini'ndeki subgraph'i gizler. -After saving the new subgraph, you are shown a screen with help on how to install the Graph CLI, how to generate the scaffolding for a new subgraph, and how to deploy your subgraph. The first two steps were covered in the [Defining a Subgraph section](/developing/defining-a-subgraph). +Yeni subgraph'i kaydettikten sonra, Graph CLI'yi nasıl kuracağınız, yeni bir subgraph için yapı iskelesini nasıl oluşturacağınız ve subgraph'inizi nasıl deploy edeceğiniz konusunda yardım içeren bir ekran gösterilir. İlk iki adım [Bir Subgraph Tanımlama bölümünde](/developing/defining-a-subgraph) ele alındı. -## Deploy a Subgraph on the Hosted Service +## Barındırılan Hizmette Bir Subgraph Deploy Edin -Deploying your subgraph will upload the subgraph files that you've built with `yarn build` to IPFS and tell the Graph Explorer to start indexing your subgraph using these files. +Subgraph'inizi deploy etmek, `yarn build` ile oluşturduğunuz subgraph dosyalarını IPFS'ye yükleyecek ve Graph Gezgini'ne bu dosyaları kullanarak subgraph'inizi indekslemeye başlamasını söyleyecektir. -You deploy the subgraph by running `yarn deploy` +`yarn deploy`'ı çalıştırarak subgraph'i deploy edersiniz -After deploying the subgraph, the Graph Explorer will switch to showing the synchronization status of your subgraph. Depending on the amount of data and the number of events that need to be extracted from historical blocks, starting with the genesis block, syncing can take from a few minutes to several hours. +Subgraph'i deploy ettikten sonra, Graph Gezgini, subgraph'inizin senkronizasyon durumunu göstermeye geçiş yapacaktır. Genesis bloğundan başlayarak geçmiş bloklardan çıkarılması gereken veri miktarına ve etkinlik sayısına bağlı olarak senkronizasyon birkaç dakikadan birkaç saate kadar sürebilir. -The subgraph status switches to `Synced` once the Graph Node has extracted all data from historical blocks. The Graph Node will continue inspecting blocks for your subgraph as these blocks are mined. +Graph node'u tarihsel bloklardan tüm verileri çıkardıktan sonra subgraph durumu `Synced`'e geçer. Graph node'u, bu bloklar çıkarılırken subgraph'iniz için blokları incelemeye devam edecektir. -## Redeploying a Subgraph +## Bir Subgraph'i Yeniden Deploy Etmek -When making changes to your subgraph definition, for example, to fix a problem in the entity mappings, run the `yarn deploy` command above again to deploy the updated version of your subgraph. Any update of a subgraph requires that Graph Node reindexes your entire subgraph, again starting with the genesis block. +Örneğin varlık eşlemelerindeki bir sorunu düzeltmek için subgraph tanımınızda değişiklik yaparken, subgraph'inizin güncellenmiş sürümünü deploy etmek için yukarıdaki `yarn deploy` komutunu tekrar çalıştırın. Bir subgraph'in herhangi bir güncellemesi, Graph node'unun yine genesis bloğundan başlayarak tüm subgraph'inizi yeniden indekslemesi gerektirir. -If your previously deployed subgraph is still in status `Syncing`, it will be immediately replaced with the newly deployed version. If the previously deployed subgraph is already fully synced, Graph Node will mark the newly deployed version as the `Pending Version`, sync it in the background, and only replace the currently deployed version with the new one once syncing the new version has finished. This ensures that you have a subgraph to work with while the new version is syncing. +Önceden deploy edilen subgraph'iniz hala `Syncing` durumundaysa, hemen yeni deploy edilen sürümle değiştirilecektir. Daha önce deploy edilen subgraph zaten tam olarak senkronize edilmişse, Graph node'u yeni deploy edilen sürümü `Pending Version` olarak işaretler, arka planda senkronize eder ve yalnızca yeni sürümün senkronizasyonu bittiğinde mevcut deploy edilen sürümü yenisiyle değiştirir. Bu, yeni sürüm eşitlenirken üzerinde çalışabileceğiniz bir subgraph'inizin olmasını sağlar. -## Deploying the subgraph to multiple networks +## Subgraph'i birden çok ağa deploy etmek -In some cases, you will want to deploy the same subgraph to multiple networks without duplicating all of its code. The main challenge that comes with this is that the contract addresses on these networks are different. +Bazı durumlarda, tüm kodunu çoğaltmadan aynı subgraph'i birden çok ağa deploy etmek isteyeceksiniz. Bununla birlikte gelen temel zorluk, bu ağlardaki sözleşme adreslerinin farklı olmasıdır. -### Using graph-cli +### Graph-cli Kullanımı -Both `graph build` (since `v0.29.0`) and `graph deploy` (since `v0.32.0`) accept two new options: +Hem `graph build` (`v0.29.0`'dan beri) hem de `graph deploy` (`v0.32.0`'dan beri) iki yeni seçeneği kabul eder: ```sh -Options: +Seçenekler: ... --network Network configuration to use from the networks config file --network-file Networks config file path (default: "./networks.json") ``` -You can use the `--network` option to specify a network configuration from a `json` standard file (defaults to `networks.json`) to easily update your subgraph during development. +Geliştirme sırasında subgraph'inizi kolayca güncellemek için bir `json` standart dosyasından (varsayılan olarak `networks.json`'dir) bir ağ yapılandırması belirtmek için `--network` seçeneğini kullanabilirsiniz. -**Note:** The `init` command will now auto-generate a `networks.json` based on the provided information. You will then be able to update existing or add additional networks. +**Not:** `init` komutu artık sağlanan bilgilere göre otomatik olarak bir `networks.json` oluşturacaktır. Ardından mevcut ağları güncelleyebilir veya ek ağlar ekleyebilirsiniz. -If you don't have a `networks.json` file, you'll need to manually create one with the following structure: +Bir `networks.json` dosyanız yoksa, aşağıdaki yapıya sahip bir dosyayı el ile oluşturmanız gerekir: ```json { @@ -98,9 +98,9 @@ If you don't have a `networks.json` file, you'll need to manually create one wit } ``` -**Note:** You don't have to specify any of the `templates` (if you have any) in the config file, only the `dataSources`. If there are any `templates` declared in the `subgraph.yaml` file, their network will be automatically updated to the one specified with the `--network` option. +**Not:** Yapılandırma dosyasında herhangi bir `templates` (eğer varsa) belirtmeniz gerekmez, yalnızca `dataSources`'ü belirtmeniz gerekir. `subgraph.yaml` dosyasında bildirilen herhangi bir `templates` varsa, ağları otomatik olarak `--network` seçeneği ile belirtilene güncellenir. -Now, let's assume you want to be able to deploy your subgraph to the `mainnet` and `goerli` networks, and this is your `subgraph.yaml`: +Şimdi, subgraph'inizi `mainnet` ve `goerli` ağlarına deploy etmek istediğinizi varsayalım ve bu sizin `subgraph.yaml`'iniz: ```yaml # ... @@ -115,7 +115,7 @@ dataSources: kind: ethereum/events ``` -This is what your networks config file should look like: +Ağ yapılandırma dosyanız şöyle görünmelidir: ```json { @@ -132,7 +132,7 @@ This is what your networks config file should look like: } ``` -Now we can run one of the following commands: +Artık aşağıdaki komutlardan birini çalıştırabiliriz: ```sh # Using default networks.json file @@ -142,7 +142,7 @@ yarn build --network goerli yarn build --network goerli --network-file path/to/config ``` -The `build` command will update your `subgraph.yaml` with the `goerli` configuration and then re-compile the subgraph. Your `subgraph.yaml` file now should look like this: +`build` komutu, `subgraph.yaml` dosyanızı `goerli` yapılandırmasıyla güncelleyecek ve ardından subgraph'i yeniden derleyecektir. `subgraph.yaml` dosyanız artık şöyle görünmelidir: ```yaml # ... @@ -157,9 +157,9 @@ dataSources: kind: ethereum/events ``` -Now you are ready to `yarn deploy`. +Artık `yarn deploy` için hazırsınız. -**Note:** As mentioned earlier, since `graph-cli 0.32.0` you can directly run `yarn deploy` with the `--network` option: +**Note:** Daha önce de belirtildiği gibi, `graph-cli 0.32.0`'dan beri `yarn deploy`'u `--network` seçeneğiyle doğrudan çalıştırabilirsiniz: ```sh # Using default networks.json file @@ -169,11 +169,11 @@ yarn deploy --network goerli yarn deploy --network goerli --network-file path/to/config ``` -### Using subgraph.yaml template +### subgraph.yaml şablonunu kullanma -One solution for older graph-cli versions that allows to parameterize aspects like contract addresses is to generate parts of it using a templating system like [Mustache](https://mustache.github.io/) or [Handlebars](https://handlebarsjs.com/). +Sözleşme adresleri gibi yönleri parametreleştirmeye izin veren eski graph-cli sürümleri için bir çözüm, [Mustache](https://mustache.github.io/) veya [Handlebars](https://handlebarsjs.com/) gibi bir şablonlama sistemi kullanarak bunun parçalarını oluşturmaktır. -To illustrate this approach, let's assume a subgraph should be deployed to mainnet and Goerli using different contract addresses. You could then define two config files providing the addresses for each network: +Bu yaklaşımı göstermek için, farklı sözleşme adresleri kullanılarak mainnet ve Goerli'ye bir subgraph'in deploy edilmesi gerektiğini varsayalım. Daha sonra, her ağ için adresleri sağlayan iki yapılandırma dosyası tanımlayabilirsiniz: ```json { @@ -182,7 +182,7 @@ To illustrate this approach, let's assume a subgraph should be deployed to mainn } ``` -and +ve ```json { @@ -191,7 +191,7 @@ and } ``` -Along with that, you would substitute the network name and addresses in the manifest with variable placeholders `{{network}}` and `{{address}}` and rename the manifest to e.g. `subgraph.template.yaml`: +Bununla birlikte, bildirimdeki ağ adını ve adreslerini `{{network}}` ve `{{address}}` değişken yer tutucularıyla değiştirir ve ve bildirimi örnek `subgraph.template.yaml` olarak yeniden adlandırın: ```yaml # ... @@ -208,7 +208,7 @@ dataSources: kind: ethereum/events ``` -In order to generate a manifest to either network, you could add two additional commands to `package.json` along with a dependency on `mustache`: +Her iki ağa da bildirim oluşturmak için `package.json` dosyasına, `mustache` bağımlılığıyla birlikte iki ek komut ekleyebilirsiniz: ```json { @@ -225,7 +225,7 @@ In order to generate a manifest to either network, you could add two additional } ``` -To deploy this subgraph for mainnet or Goerli you would now simply run one of the two following commands: +Bu subgraph'i mainnet veya Goerli için deploy etmek için şimdi aşağıdaki iki komuttan birini çalıştırmanız yeterlidir: ```sh # Mainnet: @@ -235,15 +235,15 @@ yarn prepare:mainnet && yarn deploy yarn prepare:goerli && yarn deploy ``` -A working example of this can be found [here](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759). +Bunun çalışan bir örneğini [burada](https://github.com/graphprotocol/example-subgraph/tree/371232cf68e6d814facf5e5413ad0fef65144759) bulabilirsiniz. -**Note:** This approach can also be applied to more complex situations, where it is necessary to substitute more than contract addresses and network names or where generating mappings or ABIs from templates as well. +**Not:** Bu yaklaşım, sözleşme adresleri ve ağ adlarından daha fazlasını değiştirmenin gerekli olduğu veya şablonlardan eşlemeler veya ABI'ler üretildiği daha karmaşık durumlara da uygulanabilir. -## Checking subgraph health +## Subgraph Sağlığını Kontrol Etme -If a subgraph syncs successfully, that is a good sign that it will continue to run well forever. However, new triggers on the network might cause your subgraph to hit an untested error condition or it may start to fall behind due to performance issues or issues with the node operators. +Bir subgraph başarılı bir şekilde eşitlenirse, bu, sonsuza kadar iyi çalışmaya devam edeceğine dair iyi bir işarettir. Bununla birlikte, ağdaki yeni tetikleyiciler, subgraph'inizin test edilmemiş bir hata durumuna düşmesine neden olabilir veya performans sorunları veya node operatörleriyle ilgili sorunlar nedeniyle geride kalmaya başlayabilir. -Graph Node exposes a graphql endpoint which you can query to check the status of your subgraph. On the Hosted Service, it is available at `https://api.thegraph.com/index-node/graphql`. On a local node, it is available on port `8030/graphql` by default. The full schema for this endpoint can be found [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). Here is an example query that checks the status of the current version of a subgraph: +Graph node'u, subgraph'inizin durumunu kontrol etmek için sorgulayabileceğiniz bir graphql bitiş noktası sunar. Barındırılan hizmette, `https://api.thegraph.com/index-node/graphql`'de kullanılabilir. Yerel bir node, varsayılan olarak `8030/graphql` bağlantı noktasında bulunur. Bu uç nokta için tam şema [burada](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql)'de bulunabilir. Aşağıda, bir subgraph'in geçerli sürümünün durumunu kontrol eden örnek bir sorgu verilmiştir: ```graphql { @@ -270,22 +270,22 @@ Graph Node exposes a graphql endpoint which you can query to check the status of } ``` -This will give you the `chainHeadBlock` which you can compare with the `latestBlock` on your subgraph to check if it is running behind. `synced` informs if the subgraph has ever caught up to the chain. `health` can currently take the values of `healthy` if no errors occurred, or `failed` if there was an error which halted the progress of the subgraph. In this case, you can check the `fatalError` field for details on this error. +Bu size, geride olup olmadığını kontrol etmek için subgraph'inizdeki `latestBlock` ile karşılaştırabileceğiniz `chainHeadBlock`'u verecektir. `synced`, subgraph'in zincire yetişip yetişmediğini bildirir. `health`, şu anda hiçbir hata oluşmazsa `healthy`'nin veya subgraph'in ilerlemesini durduran bir hata varsa `failed`'ın değerlerini alabilir. Bu durumda, bu hatayla ilgili ayrıntılar için `fatalError` alanını kontrol edebilirsiniz. -## Hosted service subgraph archive policy +## Barındırılan Hizmet Subgraph Arşiv Politikası -The Hosted Service is a free Graph Node Indexer. Developers can deploy subgraphs indexing a range of networks, which will be indexed, and made available to query via graphQL. +Barındırılan hizmet, ücretsiz bir Graph Node İndeksleyicisi'dir. Geliştiriciler, indekslenecek ve graphQL aracılığıyla sorgulanmak üzere hazır hale getirilecek bir dizi ağı indeksleyen subgraph'leri deploy edebilir. -To improve the performance of the service for active subgraphs, the Hosted Service will archive subgraphs that are inactive. +Etkin subgraph'ler için hizmetin performansını iyileştirmek amacıyla barındırılan hizmet, etkin olmayan subgraph'leri arşivleyecektir. -**A subgraph is defined as "inactive" if it was deployed to the Hosted Service more than 45 days ago, and if it has received 0 queries in the last 45 days.** +**Bir subgraph, barındırılan hizmete 45 günden daha uzun bir süre önce deploy edildiyse ve son 45 gün içinde 0 sorgu aldıysa "etkin değil" olarak tanımlanır.** -Developers will be notified by email if one of their subgraphs has been marked as inactive 7 days before it is removed. If they wish to "activate" their subgraph, they can do so by making a query in their subgraph's Hosted Service graphQL playground. Developers can always redeploy an archived subgraph if it is required again. +Subgraph'lerinden biri kaldırılmadan 7 gün önce etkin değil olarak işaretlendiyse, geliştiriciler e-posta ile bilgilendirilecektir. Subgraph'ini "etkinleştirmek" isterlerse, bunu subgraph'inin barındırılan hizmet graphQL oyun alanında bir sorgu yaparak yapabilirler. Geliştiriciler, tekrar gerekirse arşivlenmiş bir subgraph'i her zaman yeniden deploy edilebilir. -## Subgraph Studio subgraph archive policy +## Subgraph Studio Subgraph Arşiv Politikası -When a new version of a subgraph is deployed, the previous version is archived (deleted from the graph-node DB). This only happens if the previous version is not published to The Graph's decentralized network. +Bir subgraph'in yeni bir sürümü deploy edildiğinde, önceki sürüm arşivlenir (graph node'u DB'inden silinir). Bu, yalnızca önceki sürüm Graph'ın merkeziyetsiz ağında yayınlanmadığında gerçekleşir. -When a subgraph version isn’t queried for over 45 days, that version is archived. +Bir subgraph versiyonu 45 günden fazla sorgulanmadığında, o versiyon arşivlenir. -Every subgraph affected with this policy has an option to bring the version in question back. +Bu politikadan etkilenen her subgraph'in, söz konusu sürümü geri getirme seçeneği vardır. diff --git a/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..f0d76c22315a 100644 --- a/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/tr/deploying/deploying-a-subgraph-to-studio.mdx @@ -1,70 +1,70 @@ --- -title: Deploying a Subgraph to the Subgraph Studio +title: Subgraph Stüdyo'ya bir Subgraph Deploy Etme --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Hız sınırlaması olmayan subgraph'ları Subgraph Stüdyo'ya nasıl dağıtabileceğinizi [buradan](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294) öğrenin. -These are the steps to deploy your subgraph to the Subgraph Studio: +Subgraph'inizi Subgraph Stüdyo'ya deploy etme adımları şunlardır: -- Install The Graph CLI (with either yarn or npm) -- Create your Subgraph in the Subgraph Studio -- Authenticate your account from the CLI -- Deploying a Subgraph to the Subgraph Studio +- Graph CLI'ı Kurun (yarn veya npm ile) +- Subgraph'inizi Subgraph Stüdyo'da oluşturun +- CLI'den hesabınızın kimliğini doğrulayın +- Subgraph Stüdyo'ya bir subgraph deploy edin -## Installing Graph CLI +## Graph CLI'ı Yükleme -We are using the same CLI to deploy subgraphs to our [hosted service](https://thegraph.com/hosted-service/) and to the [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install graph-cli. This can be done using npm or yarn. +Subgraph'leri [barındırılan hizmet](https://thegraph.com/hosted-service/)'imize ve [Subgraph Stüdyo](https://thegraph.com/studio/)'ya deploy etmek için aynı CLI'yi kullanıyoruz. İşte graph-cli'ı kurma komutları. Bu, npm veya yarn kullanılarak yapılabilir. -**Install with yarn:** +**yarn ile yükleme:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**npm ile yükleme:** ```bash npm install -g @graphprotocol/graph-cli ``` -## Create your Subgraph in Subgraph Studio +## Subgraph'inizi Subgraph Stüdyo'da Oluşturun -Before deploying your actual subgraph you need to create a subgraph in [Subgraph Studio](https://thegraph.com/studio/). We recommend you read our [Studio documentation](/deploying/subgraph-studio) to learn more about this. +Gerçek subgraph'inizi deploy etmeden önce, [Subgraph Stüdyo](https://thegraph.com/studio/)'da bir subgraph oluşturmanız gerekir. Bu konuda daha fazla bilgi edinmek için [Studio dökümanları](/deploying/subgraph-studio) başlıklı yazımızı okumanızı öneririz. -## Initialize your Subgraph +## Subgraph'inizi Başlatın -Once your subgraph has been created in Subgraph Studio you can initialize the subgraph code using this command: +Subgraph'iniz Subgraph Stüdyo'da oluşturulduktan sonra, bu komutu kullanarak subgraph kodunu başlatabilirsiniz: ```bash graph init --studio ``` -The `` value can be found on your subgraph details page in Subgraph Studio: +`` değeri, Subgraph Stüdyo'daki subgraph ayrıntıları sayfanızda bulunabilir: -![Subgraph Studio - Slug](/img/doc-subgraph-slug.png) +![Subgraph Stüdyo - Slug](/img/doc-subgraph-slug.png) -After running `graph init`, you will be asked to input the contract address, network, and ABI that you want to query. Doing this will generate a new folder on your local machine with some basic code to start working on your subgraph. You can then finalize your subgraph to make sure it works as expected. +`graph init`'i çalıştırdıktan sonra, sorgulamak istediğiniz sözleşme adresini, ağı ve ABI'ı girmeniz istenecektir. Bunu yapmak, subgraph'iniz üzerinde çalışmaya başlamak için bazı temel kodlarla birlikte yerel makinenizde yeni bir klasör oluşturacaktır. Ardından, beklendiği gibi çalıştığından emin olmak için subgraph'inizi sonlandırabilirsiniz. -## Graph Auth +## Graph Kimlik Doğrulaması -Before being able to deploy your subgraph to Subgraph Studio, you need to login into your account within the CLI. To do this, you will need your deploy key that you can find on your "My Subgraphs" page or your subgraph details page. +Subgraph'inizi Subgraph Stüdyo'ya deploy etmeden önce, CLI'da hesabınıza giriş yapmanız gerekir. Bunu yapmak için, "Subgraph'lerim" sayfanızda veya subgraph ayrıntıları sayfanızda bulabileceğiniz deploy anahtarınıza ihtiyacınız olacak. -Here is the command that you need to use to authenticate from the CLI: +CLI'dan kimlik doğrulaması yapmak için kullanmanız gereken komut şu şekildedir: ```bash graph auth --studio ``` -## Deploying a Subgraph to Subgraph Studio +## Subgraph Stüdyo'ya Subgraph Deploy Etme -Once you are ready, you can deploy your subgraph to Subgraph Studio. Doing this won't publish your subgraph to the decentralized network, it will only deploy it to your Studio account where you will be able to test it and update the metadata. +Hazır olduğunuzda, subgraph'inizi Subgraph Stüdyo'ya deploy edebilirsiniz. Bunu yapmak, subgraph'inizi merkeziyetsiz ağda yayınlamaz, yalnızca onu test edebileceğiniz ve meta verileri güncelleyebileceğiniz Stüdyo hesabınıza deploy eder. -Here is the CLI command that you need to use to deploy your subgraph. +İşte subgraph'inizi deploy etmek için kullanmanız gereken CLI komutu. ```bash graph deploy --studio ``` -After running this command, the CLI will ask for a version label, you can name it however you want, you can use labels such as `0.1` and `0.2` or use letters as well such as `uniswap-v2-0.1`. Those labels will be visible in Graph Explorer and can be used by curators to decide if they want to signal on this version or not, so choose them wisely. +Bu komutu çalıştırdıktan sonra, CLI bir sürüm etiketi isteyecektir, onu istediğiniz gibi adlandırabilirsiniz, `0.1` and `0.2` gibi etiketler kullanabilir veya `uniswap-v2-0.1` gibi harfleri de kullanabilirsiniz. Bu etiketler Graph Gezgini'nde görünür olacak ve kullanılabilir. Küratörler tarafından bu sürümde sinyal vermek isteyip istemediklerine karar vermek için, bu yüzden onları akıllıca seçin. -Once deployed, you can test your subgraph in Subgraph Studio using the playground, deploy another version if needed, update the metadata, and when you are ready, publish your subgraph to Graph Explorer. +Deploy edildikten sonra subgraph'inizi playground'ı kullanarak Subgraph Stüdyo'da test edebilir, gerekirse başka bir sürümü deploy edilebilir, meta verileri güncelleyebilir ve hazır olduğunuzda subgraph'inizi Graph Gezgini'nde yayınlayabilirsiniz. diff --git a/website/pages/tr/deploying/hosted-service.mdx b/website/pages/tr/deploying/hosted-service.mdx index 2e6093531110..017f73f29030 100644 --- a/website/pages/tr/deploying/hosted-service.mdx +++ b/website/pages/tr/deploying/hosted-service.mdx @@ -1,24 +1,24 @@ --- -title: What is the Hosted Service? +title: Barındırılan Hizmet Nedir? --- -> Please note, the hosted service will begin sunsetting in 2023, but it will remain available to networks that are not supported on the decentralized network. Developers are encouraged to [upgrade their subgraphs to The Graph Network](/cookbook/upgrading-a-subgraph) as more networks are supported. Each network will have their hosted service equivalents gradually sunset to ensure developers have enough time to upgrade subgraphs to the decentralized network. Read more about the sunsetting of the hosted service [here](https://thegraph.com/blog/sunsetting-hosted-service). +> Barındırılan hizmetin 2023 yılı içerisinde kullanımdan kalkacağını, ancak merkeziyetsiz ağda desteklenmeyen ağlar için kullanılabilir kalacağını lütfen unutmayın. Daha fazla ağ desteklendikçe geliştiricilerin [subgraphlar'ını The Graph Ağı'na yükseltmeleri](/cookbook/upgrading-a-subgraph) teşvik edilmektedir. Geliştiricilerin subgraphları merkeziyetsiz ağa yükseltmek için yeterli zamana sahip olmalarını sağlamak adına her ağın barındırılan hizmet eşdeğerleri kademeli olarak sonlandırılacaktır. Barındırılan hizmetin kullanımdan kaldırılması hakkında daha fazla bilgiyi [buradan](https://thegraph.com/blog/sunsetting-hosted-service) edinebilirsiniz. -This section will walk you through deploying a subgraph to the [hosted service](https://thegraph.com/hosted-service/). +Bu bölüm, [barındırılan hizmet](https://thegraph.com/hosted-service/)'e bir subgraph dağıtma konusunda size yol gösterecektir. -If you don't have an account on the hosted service, you can sign up with your GitHub account. Once you authenticate, you can start creating subgraphs through the UI and deploying them from your terminal. The hosted service supports a number of networks, such as Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum, and more. +Barındırılan hizmette bir hesabınız yoksa, GitHub hesabınızla kaydolabilirsiniz. Kimliğinizi doğruladıktan sonra, kullanıcı arayüzü aracılığıyla subgraphlar oluşturmaya ve bunları terminalinizden dağıtmaya başlayabilirsiniz. Barındırılan hizmet Polygon, Gnosis Chain, BNB Chain, Optimism, Arbitrum ve daha fazlası gibi bir dizi ağı desteklemektedir. -For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). +Kapsamlı bir liste için [Desteklenen ağlar](/developing/supported-networks/#hosted-service)'a bir göz atın. -## Create a Subgraph +## Subgraph Oluştur -First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` +Graph CLI'yi yüklemek için önce [buradaki](/developing/defining-a-subgraph) talimatları izleyin. `graph init --product hosted-service` ile geçerek bir subgraph oluşturun -### From an Existing Contract +### Mevcut Bir Sözleşmeden -If you already have a smart contract deployed to your network of choice, bootstrapping a new subgraph from this contract can be a good way to get started on the hosted service. +Tercih ettiğiniz ağa halihazırda dağıtılmış bir akıllı sözleşmeniz varsa, bu sözleşmeden yeni bir subgraph'ı önyüklemek, barındırılan hizmete başlamak için iyi bir yol olabilir. -You can use this command to create a subgraph that indexes all events from an existing contract. This will attempt to fetch the contract ABI from [Etherscan](https://etherscan.io/). +Mevcut bir sözleşmedeki tüm olayları indeksleyen bir subgraph oluşturmak için bu komutu kullanabilirsiniz. Bu, sözleşme ABI'sini [Etherscan](https://etherscan.io/)'den almaya çalışacak. ```sh graph init \ @@ -27,25 +27,36 @@ graph init \ / [] ``` -Additionally, you can use the following optional arguments. If the ABI cannot be fetched from Etherscan, it falls back to requesting a local file path. If any optional arguments are missing from the command, it takes you through an interactive form. +Ek olarak, aşağıdaki isteğe bağlı bağımsız değişkenleri kullanabilirsiniz. ABI, Etherscan'den getirilemezse, yerel bir dosya yolu istemeye geri döner. Komutta herhangi bir isteğe bağlı argüman eksikse, sizi etkileşimli bir forma götürür. ```sh --network \ --abi \ ``` -The `` in this case is your GitHub user or organization name, `` is the name for your subgraph, and `` is the optional name of the directory where `graph init` will put the example subgraph manifest. The `` is the address of your existing contract. `` is the name of the network that the contract lives on. `` is a local path to a contract ABI file. **Both `--network` and `--abi` are optional.** +Bu durumda ``, GitHub kullanıcı veya kuruluş adınızdır, `` subgraph'inizin adıdır ve ``, `graph init`'in örnek subgraph bildirimini koyacağı dizinin isteğe bağlı adıdır. ``, mevcut sözleşmenizin adresidir. ``, sözleşmenin üzerinde yaşadığı ağın adıdır. ``, bir sözleşme ABI dosyasına giden yerel bir yoldur. **`--network` ve `--abi` ikisi de isteğe bağlıdır.** -### From an Example Subgraph +### Örnek Bir Subgraph'ten -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: +`graph init`'in desteklediği ikinci mod, örnek bir subgraph'ten yeni bir proje yaratmaktır. Aşağıdaki komut bunu yapar: ``` graph init --from-example --product hosted-service / [] ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +Örnek subgraph, kullanıcı avatarlarını yöneten ve avatarlar oluşturulduğunda veya güncellendiğinde `NewGravatar` veya `UpdateGravatar` etkinliklerini yayınlayan Dani Grant'in Gravity sözleşmesine dayanmaktadır. Subgraph, bu olayları `Gravatar` varlıklarını Graph Node depolama alanına yazarak ve bunların olaylara göre güncellenmesini sağlayarak işler. Aşağıdaki bölümlerde, bu örnek için subgraph manifest'ini oluşturan dosyaların üzerinden geçilecektir. Akıllı sözleşmelerinizden hangi olaylara, eşlemelere ve daha fazlasına dikkat etmeniz gerektiğini daha iyi anlamak için [subgraph manifestosu](/developing/creating-a-subgraph#the-subgraph-manifest) ile devam edin. -## Supported Networks on the hosted service +### Bir Proxy Sözleşmesinden -You can find the list of the supported networks [Here](/developing/supported-networks). +Bir Proxy sözleşmesini izlemek üzere uyarlanmış bir subgraph oluşturmak için, uygulama sözleşmesinin adresini belirterek subgraph'ı başlatın. Başlatma işlemi tamamlandıktan sonra, son adım subgraph.yaml dosyasındaki ağ adının Proxy sözleşmesinin adresine güncellenmesini kapsar. Aşağıdaki komutu kullanabilirsiniz. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + +## Barındırılan Hizmette Desteklenen Ağlar + +Desteklenen ağların listesini [burada](/developing/supported-networks) bulabilirsiniz. diff --git a/website/pages/tr/deploying/subgraph-studio-faqs.mdx b/website/pages/tr/deploying/subgraph-studio-faqs.mdx index 65217d4b7741..50e5ce2fc28f 100644 --- a/website/pages/tr/deploying/subgraph-studio-faqs.mdx +++ b/website/pages/tr/deploying/subgraph-studio-faqs.mdx @@ -1,31 +1,31 @@ --- -title: Subgraph Studio FAQs +title: Subgraph Stüdyo SSS --- -## 1. What is Subgraph Studio? +## 1. Subgraph Stüdyo Nedir? -[Subgraph Studio](https://thegraph.com/studio/) is a dapp for creating, managing, and publishing subgraphs and API keys. +[Subgraph Stüdyo](https://thegraph.com/studio/), subgraph'ler ve API anahtarları oluşturmak, yönetmek ve yayınlamak için kullanılan merkeziyetsiz bir uygulamadır. -## 2. How do I create an API Key? +## 2. API Anahtarını Nasıl Oluşturabilirim? -To create an API, navigate to the Subgraph Studio and connect your wallet. You will be able to click the API keys tab at the top. There, you will be able to create an API key. +Bir API oluşturmak için Subgraph Stüdyo'ya gidin ve cüzdanınızı bağlayın. Üst kısımdaki API anahtarları sekmesine tıklayabileceksiniz. Orada bir API anahtarı oluşturabileceksiniz. -## 3. Can I create multiple API Keys? +## 3. Birden Çok API Anahtarı Oluşturabilir miyim? -Yes! You can create multiple API Keys to use in different projects. Check out the link [here](https://thegraph.com/studio/apikeys/). +Evet! Farklı projelerde kullanmak için birden çok API anahtarı oluşturabilirsiniz. [buradaki](https://thegraph.com/studio/apikeys/) bağlantıya göz atın. -## 4. How do I restrict a domain for an API Key? +## 4. API Anahtarı için Domain'i Nasıl Kısıtlarım? -After creating an API Key, in the Security section, you can define the domains that can query a specific API Key. +API anahtarı oluşturduktan sonra Güvenlik bölümünde belirli bir API anahtarını sorgulayabilecek domain'leri tanımlayabilirsiniz. -## 5. Can I transfer my subgraph to another owner? +## 5. Subgraph'ımı Başka Birine Devredebilir miyim? -Yes, subgraphs that have been published to Mainnet can be transferred to a new wallet or a Multisig. You can do so by clicking the three dots next to the 'Publish' button on the subgraph's details page and selecting 'Transfer ownership'. +Evet, ana ağda yayınlanan subgraph'ler yeni bir cüzdana veya bir Multisig'e aktarılabilir. Bunu, subgraph'in ayrıntılar sayfasındaki 'Yayınla' düğmesinin yanındaki üç noktayı tıklayıp 'Sahipliği aktar'ı seçerek yapabilirsiniz. -Note that you will no longer be able to see or edit the subgraph in Studio once it has been transferred. +Aktarıldıktan sonra subgraph'i artık Stüdyo'da göremeyeceğinizi veya düzenleyemeyeceğinizi unutmayın. -## 6. How do I find query URLs for subgraphs if I’m not the developer of the subgraph I want to use? +## 6. Kullanmak İstediğim Subgraph'ın Geliştiricisi Değilsem, bu Subgraphlar için Sorgu URL'lerini Nasıl Bulabilirim? -You can find the query URL of each subgraph in the Subgraph Details section of The Graph Explorer. When you click on the “Query” button, you will be directed to a pane wherein you can view the query URL of the subgraph you’re interested in. You can then replace the `` placeholder with the API key you wish to leverage in the Subgraph Studio. +Her subgraph'in sorgu URL'sini Graph Gezgini'nin Subgraph Ayrıntıları bölümünde bulabilirsiniz. "Sorgula" düğmesine tıkladığınızda, ilgilendiğiniz subgrpah'in sorgu URL'sini görüntüleyebileceğiniz bir bölmeye yönlendirileceksiniz. Ardından `` yer tutucusunu Subgraph Stüdyo'da kullanmak istediğiniz API anahtarıyla değiştirebilirsiniz. -Remember that you can create an API key and query any subgraph published to the network, even if you build a subgraph yourself. These queries via the new API key, are paid queries as any other on the network. +Kendiniz bir subgraph oluştursanız bile, bir API anahtarı oluşturabileceğinizi ve ağda yayınlanan herhangi bir subgraph'i sorgulayabileceğinizi unutmayın. Yeni API anahtarı aracılığıyla yapılan bu sorgular, ağdaki diğerleri gibi ücretli sorgulardır. diff --git a/website/pages/tr/deploying/subgraph-studio.mdx b/website/pages/tr/deploying/subgraph-studio.mdx index 1406065463d4..f761ac447878 100644 --- a/website/pages/tr/deploying/subgraph-studio.mdx +++ b/website/pages/tr/deploying/subgraph-studio.mdx @@ -1,95 +1,89 @@ --- -title: How to Use the Subgraph Studio +title: Subgraph Stüdyo Nasıl Kullanılır --- -Welcome to your new launchpad 👩🏽‍🚀 +Yeni başlatma panelinize hoş geldiniz 👩🏽‍🚀 -The Subgraph Studio is your place to build and create subgraphs, add metadata, and publish them to the new decentralized Explorer (more on that [here](/network/explorer)). +Subgraph Stüdyo, subgraph'ler oluşturup inşa edebileceğiniz, meta veriler ekleyeceğiniz ve bunları yeni merkeziyetsiz explorer'da yayınlayacağınız yerdir (bu konuda daha fazla bilgi için [buraya](/network/explorer) göz atabilirsiniz). -What you can do in the Subgraph Studio: +Subgraph Stüdyo'da yapabilecekleriniz: -- Create a subgraph through the Studio UI -- Deploy a subgraph using the CLI -- Publish a subgraph with the Studio UI -- Test it in the playground -- Integrate it in staging using the query URL -- Create and manage your API keys for specific subgraphs +- Stüdyo kullanıcı arabirimi aracılığıyla bir subgraph oluşturun +- CLI kullanarak bir subgraph deploy edin +- Stüdyo kullanıcı arayüzü ile bir subgraph yayınlayın +- Oyun alanında test edin +- Sorgu URL'sini kullanarak hazırlamaya entegre edin +- Belirli subgraph'ler için API anahtarlarınızı oluşturun ve yönetin -Here in the Subgraph Studio, you have full control over your subgraphs. Not only can you test your subgraphs before you publish them, but you can also restrict your API keys to specific domains and only allow certain Indexers to query from their API keys. +Burada, subgraph stüdyo'da, subgraph'leriniz üzerinde tam kontrole sahipsiniz. Subgraph'lerinizi yayınlamadan önce test etmenin yanı sıra API anahtarlarınızı belirli domainler ile sınırlayabilir ve yalnızca belirli indeksleyicilerin API anahtarlarından sorgulama yapmasına izin verebilirsiniz. -Querying subgraphs generates query fees, used to reward [Indexers](/network/indexing) on the Graph network. If you’re a dapp developer or subgraph developer, the Studio will empower you to build better subgraphs to power your or your community’s queries. The Studio is comprised of 5 main parts: +Subgraph'leri sorgulamak, Graph ağındaki [indeksleyiciler](/network/indexing)'i ödüllendirmek için kullanılan sorgu ücretleri oluşturur. Bir merkeziyetsiz uygulama geliştiricisi veya subgraph geliştiricisiyseniz, Stüdyo sizin veya topluluğunuzun sorgularını güçlendirmek için daha iyi subgraph'ler oluşturmanıza yardımcı olacaktır. Stüdyo 5 ana bölümden oluşmaktadır: -- Your user account controls -- A list of subgraphs that you’ve created -- A section to manage, view details and visualize the status of a specific subgraph -- A section to manage your API keys that you will need to query a subgraph -- A section to manage your billing +- Kullanıcı hesabı kontrolleriniz +- Oluşturduğunuz subgraph'lerin listesi +- Belirli bir subgraph'in yönetilmesi, ayrıntılarının görüntülenmesi ve durumunun görselleştirilmesi için bir bölüm +- Bir subgraph'i sorgulamak için ihtiyaç duyacağınız API anahtarlarınızı yönetmek için bir bölüm +- Faturalandırmanızı yönetmek için bir bölüm -## How to Create Your Account +## Hesabınızı Nasıl Oluşturursunuz -1. Sign in with your wallet - you can do this via MetaMask or WalletConnect -1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. +1. Cüzdanınızla oturum açın - bunu MetaMask veya WalletConnect aracılığıyla yapabilirsiniz +1. Oturum açtıktan sonra, benzersiz deploy anahtarınızı hesabınızın ana sayfasında göreceksiniz. Bu, subgraph'lerinizi yayınlamanıza veya API anahtarlarınızı + faturalandırmanızı yönetmenize olanak tanır. Güvenliğinin ihlal edildiğini düşünüyorsanız, yeniden oluşturulabilecek benzersiz bir deploy anahtarınız olacaktır. -## How to Create your Subgraph in Subgraph Studio +## Subgraph Stüdyo'da Subgraph Nasıl Oluşturulur -The best part! When you first create a subgraph, you’ll be directed to fill out: + -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website +## Graph Ağı ile Subgraph Uyumluluğu -## Subgraph Compatibility with The Graph Network +Graph Ağı, barındırılan hizmette bulunan tüm veri kaynaklarını & özellikleri henüz destekleyememektedir. Ağdaki indeksleyiciler tarafından desteklenebilmek için subgraph'ler: -The Graph Network is not yet able to support all of the data-sources & features available on the Hosted Service. In order to be supported by Indexers on the network, subgraphs must: - -- Index a [supported network](/developing/supported-networks) -- Must not use any of the following features: +- [Desteklenen ağ](/developing/supported-networks) dizini +- Aşağıdaki özelliklerden herhangi birini kullanmamalısınız: - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting + - Hayati olmayan hatalar + - Graftlama -More features & networks will be added to The Graph Network incrementally. +Graph Network'e kademeli olarak daha fazla özellik & ağ eklenecektir. -### Subgraph lifecycle flow +### Subgraph yaşam döngüsü akışı -![Subgraph Lifecycle](/img/subgraph-lifecycle.png) +![Subgraph Yaşam Döngüsü](/img/subgraph-lifecycle.png) -After you have created your subgraph, you will be able to deploy it using the [CLI](https://github.com/graphprotocol/graph-cli), or command-line interface. Deploying a subgraph with the CLI will push the subgraph to the Studio where you’ll be able to test subgraphs using the playground. This will eventually allow you to publish to the Graph Network. For more information on CLI setup, [check this out](/developing/defining-a-subgraph#install-the-graph-cli) (pst, make sure you have your deploy key on hand). Remember, deploying is **not the same as** publishing. When you deploy a subgraph, you just push it to the Studio where you’re able to test it. Versus, when you publish a subgraph, you are publishing it on-chain. +Subgraph'inizi oluşturduktan sonra, onu [CLI](https://github.com/graphprotocol/graph-cli) veya komut satırı arabirimini kullanarak deploy edebileceksiniz. CLI ile bir subgraph'i deploy etmek, subgraph'i, playgraund kullanarak subgraph'leri test edebileceğiniz Stüdyo'ya gönderir. Bu, sonunda Graph ağında yayınlamanıza izin verecektir. CLI kurulumu hakkında daha fazla bilgi için [bu kısmı kontrol edin](/developing/defining-a-subgraph#install-the-graph-cli) (Bu arada, deploy anahtarınızın elinizde olduğundan emin olun). Unutmayın, deploy yayınlamak ile **aynı şey ** değildir. Bir subgraph'i deploy ettiğinizde, onu test edebileceğiniz Stüdyo'ya göndermeniz yeterlidir. Buna karşılık, bir subgraph yayınladığınızda, onu zincir üzerinde yayınlamış olursunuz. -## Testing your Subgraph in Subgraph Studio +## Subgraph'inizi Subgraph Stüdyo'da Test Etme -If you’d like to test your subgraph before publishing it to the network, you can do this in the Subgraph **Playground** or look at your logs. The Subgraph logs will tell you **where** your subgraph fails in the case that it does. +Subgraph'inizi ağda yayınlamadan önce test etmek isterseniz, bunu Subgraph **Playground** üzerinde yapabilir veya günlüklerinize bakabilirsiniz. Subgraph günlükleri, başarısız olması durumunda subgraph'inizin **nerede** başarısız olduğunu size söyleyecektir. -## Publish your Subgraph in Subgraph Studio +## Subgraph'inizi Subgraph Stüdyo'da Yayınlayın -You’ve made it this far - congrats! +Buraya kadar geldin - tebrikler! -In order to publish your subgraph successfully, you’ll need to go through the following steps outlined in this [blog](https://thegraph.com/blog/building-with-subgraph-studio). +Subgraph'inizi başarılı bir şekilde yayınlamak için, bu [blog](https://thegraph.com/blog/building-with-subgraph-studio)'ta özetlenen aşağıdaki adımları uygulamanız gerekir. -Check out the video overview below as well: +Aşağıdaki video genel bakışına da göz atın: -Remember, while you’re going through your publishing flow, you’ll be able to push to either mainnet or Goerli. If you’re a first-time subgraph developer, we highly suggest you start with publishing to Goerli, which is free to do. This will allow you to see how the subgraph will work in The Graph Explorer and will allow you to test curation elements. +Yayın akışınızı gerçekleştirirken ana ağa veya Goerli'ye gönderebileceğinizi unutmayın. İlk kez bir subgraph geliştiricisiyseniz, ücretsiz olarak Goerli'de yayınlayarak başlamanızı önemle tavsiye ederiz. Bu, subgraph'in Graph Gezgini'nde nasıl çalışacağını görmenize ve iyileştirme öğelerini test etmenize olanak tanır. -Indexers need to submit mandatory Proof of Indexing records as of a specific block hash. Because publishing a subgraph is an action taken on-chain, remember that the transaction can take up to a few minutes to go through. Any address you use to publish the contract will be the only one able to publish future versions. Choose wisely! +İndeksleyicilerin, belirli bir blok karmasından itibaren zorunlu İndeksleme Kanıtı kayıtları sunması gerekir. Bir subgraph'i yayınlamak, zincir üzerinde gerçekleştirilen bir eylem olduğundan, işlemin tamamlanmasının birkaç dakika sürebileceğini unutmayın. Sözleşmeyi yayınlamak için kullandığınız herhangi bir adres, gelecekteki sürümleri yayınlayabilecek tek adres olacaktır. Bu akıllıca bir seçim! -Subgraphs with curation signal are shown to Indexers so that they can be indexed on the decentralized network. You can publish subgraphs and signal in one transaction, which allows you to mint the first curation signal on the subgraph and saves on gas costs. By adding your signal to the signal later provided by Curators, your subgraph will also have a higher chance of ultimately serving queries. +Kürasyon sinyaline sahip subgraph'ler, merkeziyetsiz ağda indekslenebilmeleri için indeksleyicilere gösterilir. Subgrpah'leri ve sinyali tek bir işlemde yayınlayabilirsiniz, bu da subgraph'teki ilk iyileştirme sinyalini basmanıza olanak tanır ve gaz maliyetlerinden tasarruf sağlar. Sinyalinizi daha sonra küratörler tarafından sağlanan sinyale ekleyerek, subgraph'inizin nihai olarak sorguları sunma şansı da artacaktır. -**Now that you’ve published your subgraph, let’s get into how you’ll manage them on a regular basis.** Note that you cannot publish your subgraph to the network if it has failed syncing. This is usually because the subgraph has bugs - the logs will tell you where those issues exist! +**Subgraph'inizi yayınladığınıza göre, bunları düzenli olarak nasıl yöneteceğinize geçelim.** Senkronizasyon başarısız olursa, subgraph'inizi ağda yayınlayamayacağınızı unutmayın. Bunun nedeni genellikle subgraph'te hatalar olmasıdır - günlükler size bu sorunların nerede olduğunu söyleyecektir! -## Versioning your Subgraph with the CLI +## CLI ile Subgraph'inizin Sürümünü Oluşturma -Developers might want to update their subgraph, for a variety of reasons. When this is the case, you can deploy a new version of your subgraph to the Studio using the CLI (it will only be private at this point) and if you are happy with it, you can publish this new deployment to The Graph Explorer. This will create a new version of your subgraph that curators can start signaling on and Indexers will be able to index this new version. +Geliştiriciler, çeşitli nedenlerle subgraph'lerini güncellemek isteyebilir. Böyle bir durumda, CLI'yi kullanarak subgraph'inizin yeni bir sürümünü Stüdyo'ya deploy edebilirsiniz (yalnızca bu noktada özel olacaktır) ve bundan memnunsanız, bu yeni deploy'ı Graph Gezgini'nde yayınlayabilirsiniz. Bu, subgraph'nizin küratörlerin sinyal vermeye başlayabileceği yeni bir sürümünü oluşturacak ve indeksleyiciler bu yeni sürümü indeksleyebilecektir. -Up until recently, developers were forced to deploy and publish a new version of their subgraph to the Explorer to update the metadata of their subgraphs. Now, developers can update the metadata of their subgraphs **without having to publish a new version**. Developers can update their subgraph details in the Studio (under the profile picture, name, description, etc) by checking an option called **Update Details** in The Graph Explorer. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +Yakın zamana kadar geliştiriciler, subgraph'lerinin meta verilerini güncellemek için subgraph'lerinin yeni bir sürümünü deploy etmeye ve explorer'da yayınlamaya zorlandı. Artık geliştiriciler **yeni bir sürüm yayınlamak zorunda kalmadan** subgraph'lerinin meta verilerini güncelleyebilirler. Geliştiriciler, Graph gezgini'nde **ayrıntıları güncelle** adlı bir seçeneği işaretleyerek subgraph ayrıntılarını Stüdyo'da (profil resmi, ad, açıklama vb. altında) güncelleyebilirler. Bu işaretlenirse, yeni bir deploy ile yeni bir sürüm yayınlamak zorunda kalmadan explorer'daki subgraph ayrıntılarını güncelleyen bir zincir üstü işlem oluşturulur. -Please note that there are costs associated with publishing a new version of a subgraph to the network. In addition to the transaction fees, developers must also fund a part of the curation tax on the auto-migrating signal. You cannot publish a new version of your subgraph if curators have not signaled on it. For more information on the risks of curation, please read more [here](/network/curating). +Bir subgraph'in yeni bir versiyonunun ağda yayınlanmasıyla ilgili maliyetlerin olduğunu lütfen unutmayın. İşlem ücretlerine ek olarak, geliştiricilerin ayrıca otomatik geçiş sinyali üzerindeki düzenleme vergisinin bir kısmını da finanse etmesi gerekir. Küratörler üzerinde işaret vermemişlerse, subgraph'inizin yeni bir versiyonunu yayınlayamazsınız. Küratörlüğün riskleri hakkında daha fazla bilgi için lütfen daha fazlasına [buradan](/network/curating) göz atın. -### Automatic Archiving of Subgraph Versions +### Subgraph Sürümlerinin Otomatik Arşivlenmesi -Whenever you deploy a new subgraph version in the Subgraph Studio, the previous version will be archived. Archived versions won't be indexed/synced and therefore cannot be queried. You can unarchive an archived version of your subgraph in the Studio UI. Please note that previous versions of non-published subgraphs deployed to the Studio will be automatically archived. +Subgraph Stüdyo'da yeni bir subgraph sürümü deploy ettiğinizde, önceki sürüm arşivlenir. Arşivlenen sürümler indekslenmez/senkronize edilmez ve bu nedenle sorgulanamaz. Subgraph'inizin arşivlenmiş bir sürümünü Stüdyo kullanıcı arayüzünde arşivden çıkarabilirsiniz. Stüdyo'ya deploy edilen yayınlanmamış subgraph'lerin önceki sürümlerinin otomatik olarak arşivleneceğini lütfen unutmayın. -![Subgraph Studio - Unarchive](/img/Unarchive.png) +![Subgraph Stüdyo - Arşivden Çıkarmak](/img/Unarchive.png) diff --git a/website/pages/tr/developing/creating-a-subgraph.mdx b/website/pages/tr/developing/creating-a-subgraph.mdx index 1fc288833c35..e8b21ade1d7e 100644 --- a/website/pages/tr/developing/creating-a-subgraph.mdx +++ b/website/pages/tr/developing/creating-a-subgraph.mdx @@ -1,46 +1,46 @@ --- -title: Creating a Subgraph +title: Subgraph Oluşturma --- -A subgraph extracts data from a blockchain, processing it and storing it so that it can be easily queried via GraphQL. +Subgraph, verileri bir blok zincirinden çıkarır, işler ve GraphQL aracılığıyla kolayca sorgulanabilmesi için depolar. -![Defining a Subgraph](/img/defining-a-subgraph.png) +![Subgraph Tanımlama](/img/defining-a-subgraph.png) -The subgraph definition consists of a few files: +Subgraph tanımı birkaç dosyadan oluşmaktadır: -- `subgraph.yaml`: a YAML file containing the subgraph manifest +- `subgraph.yaml`: Subgraph manifest'ini içeren bir YAML dosyası -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL +- `schema.graphql`: Subgraph içinde depolanan verileri ve GraphQL üzerinden nasıl sorgulayacağınızı tanımlayan bir GraphQL şeması -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) +- `AssemblyScript Mappings`: Olay verilerinden şemanızda tanımlanan varlıklara çeviri yapan [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) kodu (örneğin bu öğretici içerikte `mapping.ts`) -> In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). +> Subgraph'ınızı Graph'ın merkeziyetsiz ağında kullanmak için [API anahtarı oluşturmanız gerekir](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). Ayrıca subgraph'ınıza en az [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network) [sinyal eklemeniz](/network/curating/#how-to-signal) önerilir. -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. +Manifest dosyasının içeriği hakkında detaylı bilgilere girmeden önce, bir subgraph oluşturmak ve dağıtmak için ihtiyacınız olan [Graph CLI](https://github.com/graphprotocol/graph-cli)'yi yüklemeniz gereklidir. -## Install the Graph CLI +## Graph CLI'ı Yükleyin -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +Graph CLI, JavaScriptle yazılmıştır ve kullanmak için `yarn` veya `npm` kurmanız gerekir; aşağıdaki içerik yarn yüklediğinizi varsaymaktadır. -Once you have `yarn`, install the Graph CLI by running +`Yarn`'a sahip olduğunuzda, Graph CLI'yi çalıştırarak yükleyin -**Install with yarn:** +**Yarn ile kurulum:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**Npm ile kurulum:** ```bash npm install -g @graphprotocol/graph-cli ``` -Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. +Kurulduktan sonra `graph init` komutu, mevcut bir sözleşmeden veya örnek bir subgraph'ten yeni bir subgraph projesi oluşturmak için kullanılabilir. Bu komut, `graph init --product subgraph-studio`'yi geçerek Subgraph Stüdyo'da bir subgraph oluşturmak için kullanılabilir. Halihazırda tercih ettiğiniz ağa konuşlandırılmış bir akıllı sözleşmeniz varsa, bu sözleşmeden yeni bir subgraph'i önyüklemek başlamak için iyi bir yol olabilir. -## From An Existing Contract +## Mevcut Bir Sözleşmeden -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. +Aşağıdaki komut, mevcut bir sözleşmenin tüm olaylarını indeksleyen bir subgraph oluşturur. Sözleşme ABI'sini Etherscan'dan almaya çalışır ve yerel bir dosya yolu istemeye geri döner. İsteğe bağlı argümanlardan herhangi biri eksikse, sizi etkileşimli bir formdan geçirir. ```sh graph init \ @@ -51,49 +51,49 @@ graph init \ [] ``` -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. +``, Subgraph Studio'daki subgraph kimliğidir ve subgraph ayrıntıları sayfanızda bulunabilir. -## From An Example Subgraph +## Örnek Bir Subgraph'dan -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: +`Graph init`'in desteklediği ikinci mod, örnek bir subgraph'dan yeni bir proje oluşturmayı destekler. Aşağıdaki komut bunu yapar: ```sh graph init --studio ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. +Örnek subgraph, kullanıcı avatarlarını yöneten ve avatarlar oluşturulduğunda veya güncellendiğinde `NewGravatar` veya `UpdateGravatar` olaylarını yayınlayan Dani Grant'in Gravity sözleşmesine dayanmaktadır. Subgraph, bu olayları `Gravatar` varlıklarını Graph Node depolama alanına yazarak ve bunların olaylara göre güncellenmesini sağlayarak işler. Aşağıdaki bölümlerde, bu örnek için subgraph manifest'ini oluşturan dosyaların üzerinden geçilecektir. -## Add New dataSources To An Existing Subgraph +## Mevcut Bir Subgraph'a Yeni veriKaynakları(dataSources) Ekleme -Since `v0.31.0` the `graph-cli` supports adding new dataSources to an existing subgraph through the `graph add` command. +`v0.31.0` 'dan itibaren, `graph-cli`, var olan bir subgraph'a `graph add` komutu aracılığıyla yeni veriKaynakları(dataSources) eklemeyi destekler. ```sh graph add
    [] -Options: +Seçenekler: - --abi Path to the contract ABI (default: download from Etherscan) - --contract-name Name of the contract (default: Contract) - --merge-entities Whether to merge entities with the same name (default: false) - --network-file Networks config file path (default: "./networks.json") + --abi Sözleşme ABI'sinin yolu (varsayılan: Etherscan'dan indir) + --contract-name Sözleşme adı (varsayılan: Contract) + --merge-entities Aynı ada sahip varlıkların birleştirilip birleştirilmeyeceği (varsayılan: false) + --network-file Ağ yapılandırma dosyası yolu (varsayılan: "./networks.json") ``` -The `add` command will fetch the ABI from Etherscan (unless an ABI path is specified with the `--abi` option), and will create a new `dataSource` in the same way that `graph init` command creates a `dataSource` `--from-contract`, updating the schema and mappings accordingly. +`add` komutu, ABI'yi Etherscan'den getirecektir (`--abi` seçeneğiyle bir ABI yolu belirtilmedikçe) ve tıpkı `graph init` komutunun şemayı güncelleyerek ve eşleştirerek bir `dataSource` `--from-contract` oluşturması gibi yeni bir `dataSource` oluşturacaktır. -The `--merge-entities` option identifies how the developer would like to handle `entity` and `event` name conflicts: +`--merge-entities` seçeneği, geliştiricinin `entity` ve `event` ad çakışmalarını nasıl ele alacağını belirler: -- If `true`: the new `dataSource` should use existing `eventHandlers` & `entities`. -- If `false`: a new entity & event handler should be created with `${dataSourceName}{EventName}`. +- `true` ise: yeni `dataSource` mevcut `eventHandlers` & `entities`'i kullanmalıdır. +- `false` ise: `${dataSourceName}{EventName}` ile yeni bir entity(varlık) & event handler(olay işleyicisi) oluşturulmalıdır. -The contract `address` will be written to the `networks.json` for the relevant network. +Sözleşme `adresi`, ilgili ağ için `networks.json`'a yazılacaktır. -> **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. +> **Not:** Etkileşimli cli kullanırken, `graph init` başarıyla çalıştırdıktan sonra yeni bir `dataSource` eklemeniz istenecektir. -## The Subgraph Manifest +## Subgraph Manifestosu -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +Subgraph manifest'i `subgraph.yaml`, subgraph'ınız tarafından indekslenen akıllı sözleşmeleri, bu sözleşmelerdeki hangi olaylara dikkat edileceğini ve olay verilerinin Graph Node'un depoladığı ve sorgulamasına izin verdiği varlıklarla nasıl eşleneceğini tanımlar. Subgraph manifestlerinin tüm özelliklerini [burada](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md) bulabilirsiniz. -For the example subgraph, `subgraph.yaml` is: +Örnek subgraph için `subgraph.yaml` şöyledir: ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -134,59 +142,63 @@ dataSources: file: ./src/mapping.ts ``` -The important entries to update for the manifest are: +Manifest için güncellenmesi gereken önemli girdiler şunlardır: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. -- `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. +- `repository`: Subgraph manifest'inin bulunabileceği havuz URL'si. Ayrıca Graph Gezgini tarafından da görüntülenir. -- `features`: a list of all used [feature](#experimental-features) names. +- `features`: kullanılan tüm [özellik(feature)](#experimental-features) adlarının bir listesi. -- `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. +- `dataSources.source`: Subgraph kaynaklarının olduğu akıllı sözleşmenin adresi ve kullanılacak akıllı sözleşmenin ABI'si. Adres isteğe bağlıdır; atlanması, tüm sözleşmelerden eşleşen olayları indekslemeyi sağlar. -- `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.startBlock`: veri kaynağının indekslemeye başladığı isteğe bağlı blok numarası. Çoğu durumda, sözleşmenin oluşturulduğu bloğun kullanılmasını öneririz. -- `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.context`: subgraph eşleştirmelerinde kullanılabilen anahtar-değer çiftleridir. `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List` ve `BigInt` gibi çeşitli veri tipleri desteklenir. Her değişkenin `type` ve `data` özelliklerinin belirtilmesi gerekir. Bu bağlam değişkenlerine daha sonra eşleştirme dosyalarından erişilebilir ve böylece subgraph geliştirme için daha yapılandırılabilir seçenekler sunulmuş olur. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.entities`: veri kaynağının depoya yazdığı varlıklar. Her varlık için şema, schema.graphql dosyasında tanımlanır. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.abis`: kaynak sözleşmesinin yanı sıra eşleştirmelerde içeriden etkileşimde bulunduğunuz diğer akıllı sözleşmeler için bir veya daha fazla isimlendirilmiş ABI dosyası. -- `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. +- `dataSources.mapping.eventHandlers`: bu subgraph'ın tepki verdiği akıllı sözleşme olaylarını ve bu olayları depodaki varlıklara dönüştüren eşleştirme içindeki işleyicileri —./src/mapping.ts örnekte— listeler. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +- `dataSources.mapping.callHandlers`: bu subgraph'ın tepki verdiği akıllı sözleşme fonksiyonlarını ve fonksiyon çağrılarına giriş ve çıkışları depodaki varlıklara dönüştüren eşleştirmedeki işleyicileri listeler. -The triggers for a data source within a block are ordered using the following process: +- `dataSources.mapping.blockHandlers`: Bu subgraph'ın tepki verdiği blokları ve zincire bir blok eklendiğinde çalışacak eşleştirmedeki işleyicileri listeler. Filtre olmadan, blok işleyici her blokta çalıştırılacaktır. Opsiyonel bir çağrı filtresi, işleyiciye bir `filter` alan ile `kind: call` eklenerek sağlanabilir. Bu, blok veri kaynağı sözleşmesine en az bir çağrı içeriyorsa yanlızca işleyiciyi çalıştırır. -1. Event and call triggers are first ordered by transaction index within the block. -2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. +Bir subgraph birden fazla akıllı sözleşmeden veri indeksleyebilir. `dataSources` dizisine indekslenmesi gereken veriden her kontrata bir giriş ekleyin. -These ordering rules are subject to change. +Bir bloktaki veri kaynağı için tetikleyiciler şu işlemlerle sıralanır: -### Getting The ABIs +1. Olay ve çağrı tetikleyicileri, öncelikle bloktaki işlem indeksine göre sıralanır. +2. Aynı işlemdeki olay ve çağrı tetikleyicileri, bir kurala göre sıralanır: önce olay tetikleyicileri, ardından çağrı tetikleyicileri olmak üzere her tür manifest'te tanımlandıkları sıraya göre sıralanır. +3. Blok tetikleyicileri, olay ve çağrı tetikleyicilerinden sonra manifest'te tanımlandıkları sırada göre çalıştırılır. -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: +Bu sıralama kuralları değişebilir. -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +### ABI'leri Alma -## The GraphQL Schema +ABI dosya(lar)ı sözleşme(ler) inizle uygun olmalıdır. ABI dosyalarını edinmek için birkaç yol vardır: -The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. +- Kendi projenizi oluşturuyorsanız, muhtemelen en güncel ABI'lerinize erişiminiz olacaktır. +- Herkese açık bir proje için bir subgraph oluşturuyorsanız, projeyi bilgisayarınıza indirerek [`truffle compile`](https://truffleframework.com/docs/truffle/overview) veya derlemek için solc kullanarak ABI'yi alabilirsiniz. +- ABI'yi ayrıca [Etherscan](https://etherscan.io/)'de de bulabilirsiniz, ancak bu her zaman güvenilir değildir çünkü yüklenen ABI güncelliğini yitirmiş olabilir. Doğru ABI'ye sahip olduğunuzdan emin olun, aksi takdirde subgraph çalıştırma başarısız olacaktır. -## Defining Entities +## GraphQL Şeması -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. +Subgraph'ınızın şeması `schema.graphql` dosyasındadır. GraphQL şemaları, GraphQL arayüzü tanımlama dili kullanılarak tanımlanır. Daha önce bir GraphQL şeması yazmadıysanız, GraphQL tipi sisteme yönelik bu içeriği kontrol etmeniz önerilir. GraphQL şema referans belgeleri [GraphQL API](/querying/graphql-api) bölümünde bulunabilir. -With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. +## Varlıkları Tanımlama -### Good Example +Varlıkları tanımlamadan önce, bir adım geri atıp verilerinizin nasıl yapılandırıldığını ve bağlantılı olduğunu düşünmek önemlidir. Tüm sorgular, subgraph şemasında tanımlandığı şekilde veri modeline ve subgraph tarafından indekslenen varlıklara karşı yapılacaktır. Bu nedenle, subgraph şemasını dapp'iniz için ihtiyaçlarınıza uygun şekilde tanımlamanız iyi bir yaklaşım olacaktır. Varlıkları olaylar veya fonksiyonlar yerine "veri içeren nesneler" olarak farzetmek faydalı olabilir. -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. +Graph ile `schema.graphql`'de basitçe varlık türlerini tanımlarsınız ve Graph Düğümü bu varlık türünün tek bir örneğini ve koleksiyonunu sorgulamak için üst düzey alanlar oluşturur. Bir varlık olarak kabul edilmesi gereken her tür, `@entity` yönergesi ile işaretlenmelidir. Varsayılan olarak varlıklar değişkendir, yani eşlemeler mevcut varlıkları yükleyebilir, değiştirebilir ve o varlığın yeni bir sürümünü depolayabilir. Değişebilirlik bir bedelle gelir ve örneğin zincirden kelimesi kelimesine çıkarılan verileri içerdiklerinden dolayı asla değiştirilmeyecekleri bilinen varlık türleri için, bunları `@entity(immutable: true)` ile değişmez olarak işaretlenmesi önerilir. Eşleştirmeler, değişiklikler varlığın oluşturulduğu aynı blokta gerçekleştiği sürece değişmez varlıklarda değişiklik yapabilir. Değişmez varlıklar çok daha hızlı yazılıp sorgulanabilir, bu nedenle mümkün olduğunca kullanılmalıdır. + +### İyi Bir Örnek + +Aşağıdaki `Gravatar` varlığı, bir Gravatar nesnesi etrafında yapılandırılmıştır ve bir varlığın nasıl tanımlanabileceğine iyi bir örnektir. ```graphql type Gravatar @entity(immutable: true) { @@ -198,9 +210,9 @@ type Gravatar @entity(immutable: true) { } ``` -### Bad Example +### Kötü Bir Örnek -The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. +Aşağıdaki `GravatarAccepted` ve `GravatarDeclined` örnek varlıkları olayları temel alır. Olayların veya fonksiyon çağrılarının varlıklara birebir eşlenmesi önerilmez. ```graphql type GravatarAccepted @entity { @@ -218,36 +230,37 @@ type GravatarDeclined @entity { } ``` -### Optional and Required Fields +### Opsiyonel ve Zorunlu Alanlar -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: +Varlık alanları zorunlu veya opsiyonel olarak tanımlanabilir. Zorunlu alanlar şemada `!` ile belirtilir. Eğer zorunlu bir alan eşleştirme işlemi sırasında ayarlanmazsa, alanı sorgularken şu hatayı alırsınız: ``` Null value resolved for non-null field 'name' ``` -Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. +Her varlık türü bir `id` alanına sahip olmalıdır ve bu alan `Bytes!` veya `String!` türünde olmalıdır. Genellikle, `id` insan tarafından okunabilir metin içeriyorsa `Bytes!` kullanılması önerilir çünkü `Bytes!` türündeki id'leri olan varlıklar, `String!` `id`'leri olanlardan daha hızlı yazılıp sorgulanabilir. `id` alanı birincil anahtar olarak hizmet eder ve aynı türdeki tüm varlıklar arasında benzersiz olması gerekir. Tarihi nedenlerden dolayı, `ID!` türü de kabul edilir ve `String!` ile eşanlamlıdır. -For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. +Bazı varlık türleri için `id`, iki diğer varlığın id'lerinden oluşturulur; bunu `concat` kullanarak yapmak mümkündür, örneğin `let id = left.id.concat(right.id)`, `sol(left)` ve `sağ(right)` id'lerinden id'yi oluşturmak için kullanılır. Benzer şekilde, var olan bir varlığın id'si ve sayıcı `sayısı(count)`kullanarak bir id oluşturmak için `let id = left.id.concatI32(count)` kullanılabilir. Birleştirme işleminin, `sol(left)`'in bu tür tüm varlıklar için aynı uzunlukta olduğu sürece benzersiz id'ler üretmesi garanti edilir, örneğin `left.id`'nin bir `Address` olması. -### Built-In Scalar Types +### Gömülü Skaler(Scalar) Türler -#### GraphQL Supported Scalars +#### GraphQL'in Desteklediği Skalerler -We support the following scalars in our GraphQL API: +GraphQL API'mizde aşağıdaki skalerleri destekliyoruz: -| Type | Description | +| Tür | Tanım | | --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | -| `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | -| `Boolean` | Scalar for `boolean` values. | -| `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | -| `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | -| `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | +| `Baytlar` | Byte dizisi, onaltılık bir dizgi olarak temsil edilir. Ethereum hash değerleri ve adresleri için yaygın olarak kullanılır. | +| `Dizgi(String)` | `string` değerleri için skaler. Null karakterleri desteklenmez ve otomatik olarak kaldırılır. | +| `Boolean` | `boolean` değerleri için skaler. | +| `Int` | GraphQL özellikleri, `Int`'in 32 bayt boyutunda olduğunu tanımlar. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | +| `BigInt` | Büyük tamsayılar. Ethereum'un `uint32`, `int64`, `uint64`, ..., `uint256` türleri için kullanılır. Not: `int32`, `uint24` veya `int8` gibi `uint32`'nin altındaki her şey `i32`olarak temsil edilir. | +| `BigDecimal` | `BigDecimal` Yüksek hassasiyetli ondalık sayılar, bir anlamlı ve bir üsle temsil edilir. Üs aralığı -6143 ila +6144 arasındadır. 34 anlamlı rakama yuvarlanır. | -#### Enums +#### Numaralandırmalar -You can also create enums within a schema. Enums have the following syntax: +Ayrıca bir şema içinde numaralandırmalar da oluşturabilirsiniz. Numaralandırmalar aşağıdaki sözdizimine sahiptir: ```graphql enum TokenStatus { @@ -257,19 +270,19 @@ enum TokenStatus { } ``` -Once the enum is defined in the schema, you can use the string representation of the enum value to set an enum field on an entity. For example, you can set the `tokenStatus` to `SecondOwner` by first defining your entity and subsequently setting the field with `entity.tokenStatus = "SecondOwner"`. The example below demonstrates what the Token entity would look like with an enum field: +Numaralandırma şemada tanımlandıktan sonra, bir varlık üzerinde numaralandırma alanını ayarlamak için numaralandırma değerinin dizgi gösterimini kullanabilirsiniz Örneğin, önce varlığınızı tanımlayarak ve ardından alanı `entity.tokenStatus = "SecondOwner"` ile ayarlayarak `tokenStatus`'u `SecondOwner` olarak ayarlayabilirsiniz. Aşağıdaki örnek, Token varlığının bir numaralandırma alanıyla nasıl görüneceğini göstermektedir: -More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). +Numaralandırma yazmakla alakalı daha fazla ayrıntıyı [GraphQL belgelerinde](https://graphql.org/learn/schema/) bulabilirsiniz. -#### Entity Relationships +#### Varlık İlişkileri -An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. +Bir varlık, şemanızdaki bir veya daha fazla başka varlıkla ilişkili olabilir. Bu ilişkiler, sorgularınızda çaprazlanabilir. Graph'taki ilişkiler tek yönlüdür. İki yönlü ilişkileri simüle etmek, ilişkinin herhangi biri "son" üzerinde tek yönlü bir ilişki tanımlayarak mümkündür. -Relationships are defined on entities just like any other field except that the type specified is that of another entity. +İlişkiler, belirtilen türün başka bir varlığın türü olması dışında, diğer tüm alanlarda olduğu gibi varlıklar üzerinde tanımlanır. -#### One-To-One Relationships +#### Bire Bir İlişkiler -Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: +Bir `TransactionReceipt` varlık türüyle isteğe bağlı bire bir ilişkiye sahip bir `İşlem(Transaction)` varlık türü tanımlayın: ```graphql type Transaction @entity(immutable: true) { @@ -283,9 +296,9 @@ type TransactionReceipt @entity(immutable: true) { } ``` -#### One-To-Many Relationships +#### Birden Çoğa İlişkiler -Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: +Bir Token varlık türü ile zorunlu birden çoğa ilişkisi olan bir `TokenBalance` varlık türü tanımlayın: ```graphql type Token @entity(immutable: true) { @@ -299,15 +312,15 @@ type TokenBalance @entity { } ``` -#### Reverse Lookups +#### Tersine Aramalar -Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. +Ters aramalar, `@derivedFrom` alanı aracılığıyla bir varlıkta tanımlanabilir. Bu, varlık üzerinde sorgulanabilecek ancak eşleştirme API'si aracılığıyla manuel olarak ayarlanamayacak bir sanal alan oluşturur. Aksine, diğer varlık üzerinde tanımlanan ilişkiden türetilir. Bu ilişkiler için, genellikle ilişkinin her iki tarafını da depolamak anlamsızdır ve hem indeksleme hem de sorgu performansı, sadece bir tarafta depolanması ve diğerinde türetilmesi durumunda daha iyi olacaktır. -For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. +Birden çoğa ilişkileriiçin, ilişki her zaman 'birden' tarafında depolanmalı ve her zaman 'çoğa' tarafında türetilmelidir. İlişkinin 'çoğa' tarafında bir dizi varlık depolamak yerine bu şekilde saklanması, subgraph indeksleme ve sorgulaması adına önemli ölçüde daha iyi performans sağlayacaktır. Genel olarak, varlık dizilerini depolamaktan mümkün olduğunca sakınılması gerekmektedir. -#### Example +#### Örnek -We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: +Bir token bakiyelerini token'den erişilebilir hale getirebiliriz. Bunun için bir `tokenBalances` alanı türetmemiz gerekir: ```graphql type Token @entity(immutable: true) { @@ -322,13 +335,13 @@ type TokenBalance @entity { } ``` -#### Many-To-Many Relationships +#### Çoktan Çoğa İlişkiler -For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. +Kullanıcıların her birinin birden çok kuruluşa mensup olabileceği gibi çoktan çoğa ilişkilerde, ilişkiyi modellemenin en basit fakat pek verimli olmayan yolu, ilişkide yer alan iki varlıkta da bir dizi olarak saklamaktır. İlişki simetrik ise, ilişkinin yalnızca bir tarafının saklanması gerekir ve diğer taraf türetilebilir. -#### Example +#### Örnek -Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. +`User` varlık türünden `Organization` varlık türüne bir tersine arama tanımlayın. Aşağıdaki örnekte bu, `Organization` varlığı içindeki `members` özniteliğini arayarak elde edilir. Sorgularda, `User` üzerindeki `organizations` alanı, kullanıcının kimliğini(id) içeren tüm `Organization` varlıklarını bulmak suretiyle çözümlenir. ```graphql type Organization @entity { @@ -344,7 +357,7 @@ type User @entity { } ``` -A more performant way to store this relationship is through a mapping table that has one entry for each `User` / `Organization` pair with a schema like +Bu ilişkiyi daha performanslı bir şekilde depolamanın yolu, bunu her `User`/`Organization` çifti için bir girişe sahip bir eşleştirme tablosu aracılığıyla yapmaktır. Şema olarak şu şekilde olabilir ```graphql type Organization @entity { @@ -366,7 +379,7 @@ type UserOrganization @entity { } ``` -This approach requires that queries descend into one additional level to retrieve, for example, the organizations for users: +Bu yaklaşım, örneğin kullanıcılar için kuruluşları almak için sorguların ek bir seviyeye inmesini gerektirir: ```graphql query usersWithOrganizations { @@ -381,11 +394,11 @@ query usersWithOrganizations { } ``` -This more elaborate way of storing many-to-many relationships will result in less data stored for the subgraph, and therefore to a subgraph that is often dramatically faster to index and to query. +Çoktan çoğa ilişkileri depolamanın daha ayrıntılı bu yolu, subgraph için depolanan veri miktarının azalmasına ve bu sonucunda genellikle indekslenmesi ve sorgulanması önemli ölçüde daha hızlı olan bir subgraph sağlayacaktır. -#### Adding comments to the schema +#### Şemaya notlar/yorumlar ekleme -As per GraphQL spec, comments can be added above schema entity attributes using double quotations `""`. This is illustrated in the example below: +GraphQL spesifikasyonuna göre, şema varlık özniteliklerinin üzerinde çift tırnak işaretleri `""` kullanarak yorumlar eklenebilir. Bu aşağıdaki örnekte gösterilmiştir: ```graphql type MyFirstEntity @entity { @@ -395,13 +408,13 @@ type MyFirstEntity @entity { } ``` -## Defining Fulltext Search Fields +## Tam Metinde Arama Alanlarını Tanımlama -Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. +Tam metinde arama sorguları, metin arama girdisine dayanarak varlıkları filtreler ve sıralar. Tam metin sorguları, sorgu metni girişini indekslenmiş metin verileriyle karşılaştırmadan önce köklere işleyerek benzer kelimeler için eşleşmeler döndürebilir. -A fulltext query definition includes the query name, the language dictionary used to process the text fields, the ranking algorithm used to order the results, and the fields included in the search. Each fulltext query may span multiple fields, but all included fields must be from a single entity type. +Tam metin sorgusu tanımı, sorgu adı, metin alanlarını işlemek için kullanılan dil sözlüğü, sonuçları sıralamak için kullanılan sıralama algoritması ve aramaya dahil edilen alanları içerir. Her tam metin sorgusu birden fazla alana yayılabilir, ancak dahil edilen tüm alanlar tek bir varlık türünden olmalıdır. -To add a fulltext query, include a `_Schema_` type with a fulltext directive in the GraphQL schema. +Tam metin sorgusu eklemek için, GraphQL şemasına tam metin yönergesi içeren bir `_Schema_` türü ekleyin. ```graphql type _Schema_ @@ -424,7 +437,7 @@ type Band @entity { } ``` -The example `bandSearch` field can be used in queries to filter `Band` entities based on the text documents in the `name`, `description`, and `bio` fields. Jump to [GraphQL API - Queries](/querying/graphql-api#queries) for a description of the fulltext search API and more example usage. +`bandSearch` örnek alanı, `name`,`description` ve `bio` alanlarındaki metin belgelerine göre `Band` varlıklarını filtrelemek için sorgularda kullanılabilir. Tam metin arama API'si ve daha fazla örnek kullanımı için [GraphQL API - Sorgulama](/querying/graphql-api#queries)'ya geçin. ```graphql query { @@ -437,49 +450,49 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Özellik Yönetimi](#experimental-features):** `specVersion` `0.0.4` ve sonrasında, `fullTextSearch`, subgraph bildiriminde `features` bölümü altında belirtilmelidir. -### Languages supported +### Desteklenen diller -Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". +Farklı bir dil seçmek, tam metin arama API'sı üzerinde bazen az olsa da kesin bir etkiye sahip olacaktır. Tam metin sorgu alanı tarafından kapsanan alanlar, seçilen dile bağlı olarak incelenir, bu nedenle analiz ve arama sorguları tarafından üretilen sözlükbirimleri dilden dile değişir. Örneğin: desteklenen Türkçe sözlük kullanıldığında "token" kelimesi "toke" olarak kök alınırken, elbette İngilizce sözlük "token" olarak kök alacaktır. -Supported language dictionaries: +Desteklenen dil sözlükleri: -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | +| Kod | Sözlük | +| ----- | ---------- | +| yalın | Genel | +| da | Danca | +| nl | Flemenkçe | +| en | İngilizce | +| fi | Fince | +| fr | Fransızca | +| de | Almanca | +| hu | Macarca | +| it | İtalyanca | +| no | Norveççe | +| pt | Portekizce | +| ro | Romence | +| ru | Rusça | +| es | İspanyolca | +| sv | İsveççe | +| tr | Türkçe | -### Ranking Algorithms +### Algoritmaları Sıralama -Supported algorithms for ordering results: +Sonuçları sıralamak için desteklenen algoritmalar: -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Algoritma | Tanım | +| ------------- | ----------------------------------------------------------------------------------- | +| rank | Sonuçları sıralamak için tam metin sorgusunun eşleştirme kalitesini (0-1) kullanın. | +| proximityRank | Rank'a benzer ancak eşleşmelerin benzerliğini de içerir. | -## Writing Mappings +## Eşleştirmeleri Yazma -The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. +Eşlemeler, verileri belirli bir kaynaktan alır ve şemanızda tanımlanan varlıklara dönüştürür. Eşlemeler, WASM ([WebAssembly](https://webassembly.org/))'ye derlenebilen [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) adlı bir [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) alt kümesinde yazılır. AssemblyScript, normal TypeScript'ten daha katıdır, ancak yine de tanıdık bir sözdizimi sağlar. -For each event handler that is defined in `subgraph.yaml` under `mapping.eventHandlers`, create an exported function of the same name. Each handler must accept a single parameter called `event` with a type corresponding to the name of the event which is being handled. +`subgraph.yaml`'da `mapping.eventHandlers` altında tanımlanan her olay işleyicisi için, aynı isimde dışa aktarılmış bir fonksiyon oluşturun. Her işleyici, işlenen olayın adına karşılık gelen bir türde `event` adında tek bir parametre kabul etmelidir. -In the example subgraph, `src/mapping.ts` contains handlers for the `NewGravatar` and `UpdatedGravatar` events: +Örnek subgraph'ta,`src/mapping.ts` dosyası `NewGravatar` ve `UpdatedGravatar` olayları için işleyiciler içerir: ```javascript import { NewGravatar, UpdatedGravatar } from '../generated/Gravity/Gravity' @@ -506,31 +519,31 @@ export function handleUpdatedGravatar(event: UpdatedGravatar): void { } ``` -The first handler takes a `NewGravatar` event and creates a new `Gravatar` entity with `new Gravatar(event.params.id.toHex())`, populating the entity fields using the corresponding event parameters. This entity instance is represented by the variable `gravatar`, with an id value of `event.params.id.toHex()`. +İlk işleyici, `NewGravatar` olayını alır ve karşılık gelen olay parametrelerini kullanarak varlık alanlarını dolduran `new Gravatar(event.params.id.toHex())` ile yeni bir `Gravatar` varlığı oluşturur. Bu varlık örneği, `event.params.id.toHex()` kimlik değeri olan `gravatar` değişkeni tarafından temsil edilir. -The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. +İkinci işleyici, mevcut `Gravatar`'ı Graph Düğümü deposundan yüklemeye çalışır. Henüz mevcut değilse, talep üzerine oluşturulur. Varlık daha sonra `gravatar.save()` kullanılarak mağazaya geri kaydedilmeden önce yeni olay parametreleriyle eşleşecek şekilde güncellenir. -### Recommended IDs for Creating New Entities +### Yeni Varlıklar Oluşturmak için Önerilen Kimlikler(IDs) -Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. +Her varlığın aynı türdeki tüm varlıklar arasında benzersiz olan bir `id`'si olması gerekir. Bir varlığın `id` değeri, varlık oluşturulduğunda belirlenir. Aşağıda, yeni varlıklar oluştururken hesaba katılması gereken bazı önerilen `id` değerleri verilmiştir. NOT: `id` değeri bir `dizgi` olmalıdır. - `event.params.id.toHex()` - `event.transaction.from.toHex()` - `event.transaction.hash.toHex() + "-" + event.logIndex.toString()` -We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. +Graph Düğümü deposuyla etkileşim için yardımcı programlar, akıllı sözleşme verileri ve varlıklarını işlemek için kolaylık sağlaması açısından [Graph Typescript Kütüphanesi](https://github.com/graphprotocol/graph-ts)'ni sunuyoruz. `mapping.ts`'de `@graphprotocol/graph-ts` 'yi içe aktararak bu kitaplığı eşleştirmelerinizde kullanabilirsiniz. -## Code Generation +## Kod Oluşturma -In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. +Akıllı sözleşmeler, olaylar ve varlıklarla çalışmayı kolay ve tip güvenli hale getirmek amacıyla Graph CLI, subgraph'ın GraphQL şemasından ve veri kaynaklarında bulunan sözleşme ABI'lerinden AssemblyScript türleri oluşturabilir. -This is done with +Bununla yapılır ```sh graph codegen [--output-dir ] [] ``` -but in most cases, subgraphs are already preconfigured via `package.json` to allow you to simply run one of the following to achieve the same: +ancak çoğu durumda, subgraphlar zaten `package.json` aracılığıyla önceden yapılandırılmıştır, bu nedenle aşağıdakilerden birini çalıştırarak aynı sonucu elde etmek mümkündür: ```sh # Yarn @@ -540,37 +553,37 @@ yarn codegen npm run codegen ``` -This will generate an AssemblyScript class for every smart contract in the ABI files mentioned in `subgraph.yaml`, allowing you to bind these contracts to specific addresses in the mappings and call read-only contract methods against the block being processed. It will also generate a class for every contract event to provide easy access to event parameters, as well as the block and transaction the event originated from. All of these types are written to `//.ts`. In the example subgraph, this would be `generated/Gravity/Gravity.ts`, allowing mappings to import these types with. +Bu, `subgraph.yaml`'da belirtilen ABI dosyalarındaki her akıllı sözleşme için bir AssemblyScript sınıfı oluşturacak ve bu sözleşmeleri eşleştirmelerle belirli adreslere bağlamanıza ve işlenen bloğa karşı salt okunur sözleşme yöntemlerini çağırmanıza olanak tanıyacaktır. Ayrıca, her sözleşme olayı için bir sınıf oluşturacak ve olay parametrelerine kolay erişim sağlayacak, ayrıca olayın kaynaklandığı blok ve işlemi sağlayacaktır. Tüm bu tipler `//.ts` dosyasına yazılmaktadır. Örnek subgraph'ta, bu `generated/Gravity/Gravity.ts` olur, böylece eşleştirmelerin bu tipleri iç aktarmasına izin verilir. ```javascript import { - // The contract class: + // Kontrat sınıfı: Gravity, - // The events classes: + // Olayların sınıfları: NewGravatar, UpdatedGravatar, } from '../generated/Gravity/Gravity' ``` -In addition to this, one class is generated for each entity type in the subgraph's GraphQL schema. These classes provide type-safe entity loading, read and write access to entity fields as well as a `save()` method to write entities to store. All entity classes are written to `/schema.ts`, allowing mappings to import them with +Bunun yanı sıra, subgraph'taki GraphQL şemasında bulunan her varlık türü için bir sınıf oluşturulur. Bu sınıflar, tip güvenli varlık yükleme, varlık alanlarına okuma ve yazma erişimi sağlar ve ayrıca bir `save()` yöntemi ile varlıkları depoya yazarlar. Tüm varlık sınıfları `/schema.ts`'ye yazılır, böylece eşleştirmeler şu şekilde bunları içe aktarabilir ```javascript import { Gravatar } from '../generated/schema' ``` -> **Note:** The code generation must be performed again after every change to the GraphQL schema or the ABIs included in the manifest. It must also be performed at least once before building or deploying the subgraph. +> **Not:** Kod oluşturma, GraphQL şemasındaki veya manifeste dahil edilen ABI'lerdeki her değişiklikten sonra tekrar yapılmalıdır. Ayrıca, subgraph oluşturulmadan önce en az bir kez yapılmalıdır. -Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. +Kod oluşturma, `src/mapping.ts`'deki eşleştirme kodunuzu kontrol etmez. Subgraph'ınızı Graph Gezginine dağıtmadan önce bunu kontrol etmek isterseniz, `yarn build` çalıştırabilir ve TypeScript derleyicisinin bulabileceği herhangi bir sözdizimi hatasını düzeltebilirsiniz. -## Data Source Templates +## Veri Kaynağı Şablonları -A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. +EVM uyumlu akıllı sözleşmelerdeki yaygın bir model, bir sözleşmenin her birinin kendi durumu ve olayı olan rastgele sayıda başka sözleşmeler oluşturduğu, yönettiği veya bunlara atıfta bulunduğu kayıt defteri veya fabrika sözleşmelerinin kullanılmasıdır. -The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. +Bu alt sözleşmelerin adresleri önceden bilinebilir veya bilinmeyebilir ve bu sözleşmelerin çoğu zaman içinde oluşturulabilir ve/veya eklenebilir. Bu nedenle, bu gibi durumlarda tek bir veri kaynağı veya sabit sayıda veri kaynağı tanımlamak imkansızdır ve daha dinamik bir yaklaşıma ihtiyaç vardır: _data source templates_. -### Data Source for the Main Contract +### Ana Sözleşme için Veri Kaynağı -First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. +İlk olarak, ana sözleşme için düzenli bir veri kaynağı tanımlarsınız. Aşağıdaki örnek, [Uniswap](https://uniswap.org) borsa(exchange) factory sözleşmesi için basitleştirilmiş bir veri kaynağı göstermektedir. `NewExchange(address,address)` olay işleyicisine dikkat edin. Bu, factory sözleşmesi tarafından zincir üstünde yeni bir takas sözleşmesi oluşturulduğunda yayınlanır. ```yaml dataSources: @@ -595,9 +608,9 @@ dataSources: handler: handleNewExchange ``` -### Data Source Templates for Dynamically Created Contracts +### Dinamik Olarak Oluşturulan Sözleşmeler için Veri Kaynağı Şablonları -Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. +Ardından, manifest'e _veri kaynağı şablonları_ eklersiniz. Bunlar, `source` altında önceden tanımlanmış bir sözleşme adresi olmayan düzenli veri kaynaklarıyla aynıdır. Genellikle, ana sözleşme tarafından yönetilen veya başvurulan her alt-sözleşme türü için bir şablon tanımlarsınız. ```yaml dataSources: @@ -631,27 +644,27 @@ templates: handler: handleRemoveLiquidity ``` -### Instantiating a Data Source Template +### Bir Veri Kaynağı Şablonunun Örneklenmesi -In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. +Son adımda, ana sözleşme eşleştirmesini güncelleyerek bir şablondan dinamik bir veri kaynağı örneği oluşturursunuz. Bu örnekte, ana sözleşme eşlemesini değiştirerek `Exchange` şablonunu içe aktarır ve yeni takas sözleşmesini indekslemek için `Exchange.create(address)` yöntemini çağırırsınız. ```typescript import { Exchange } from '../generated/templates' export function handleNewExchange(event: NewExchange): void { - // Start indexing the exchange; `event.params.exchange` is the - // address of the new exchange contract + // Borsayı indekslemeye başlayın; "event.params.exchange" + // yeni borsa sözleşmesinin adresi Exchange.create(event.params.exchange) } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> **Not:** Yeni bir veri kaynağı, oluşturulduğu blok ve tüm takip eden bloklar için yalnızca çağrıları ve olayları işleyecektir, ancak önceki bloklarda bulunan geçmiş verileri işlemeyecektir. > -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. +> Eğer önceki bloklar, yeni veri kaynağı için ilgili veri içeriyorsa, o veriyi indekslemek için sözleşmenin mevcut durumunu okuyarak ve yeni veri kaynağı oluşturulurken o zaman dilimindeki durumu temsil eden varlıklar oluşturarak yapmak en iyisidir. -### Data Source Context +### Veri Kaynağı Bağlamı -Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: +Veri kaynağı bağlamları, bir şablonu anında özelleştirmek için ek yapılandırma geçişine izin verir. Örneğimizde, borsalar belirli bir alım-satım çifti ile ilişkilendirilir ve bu bilgi `NewExchange` olayına dahil edilir. Bu bilgi, oluşturulan veri kaynağına şöyle aktarılabilir: ```typescript import { Exchange } from '../generated/templates' @@ -663,7 +676,7 @@ export function handleNewExchange(event: NewExchange): void { } ``` -Inside a mapping of the `Exchange` template, the context can then be accessed: +`Exchange` şablonunun eşleştirmesi içinde, bağlama şu şekilde erişilebilir: ```typescript import { dataSource } from '@graphprotocol/graph-ts' @@ -672,11 +685,11 @@ let context = dataSource.context() let tradingPair = context.getString('tradingPair') ``` -There are setters and getters like `setString` and `getString` for all value types. +Tüm değer tipleri için `setString` ve `getString` gibi ayarlayıcılar ve alıcılar vardır. -## Start Blocks +## Başlangıç Blokları -The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. +`startBlock`, veri kaynağının indekslemeye başlayacağı zincirdeki hangi bloktan başlayacağını belirlemenize olanak tanıyan isteğe bağlı bir ayarlamadır. Başlangıç bloğunu belirlemek, veri kaynağının ilgisiz olabilecek potansiyel milyonlarca bloğu atlamasına olanak tanır. Tipik olarak, bir subgraph geliştiricisi, veri kaynağı akıllı sözleşmesinin oluşturulduğu bloğa `startBlock` ayarlar. ```yaml dataSources: @@ -702,23 +715,23 @@ dataSources: handler: handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Not:** Sözleşme oluşturma bloğu hızlı bir şekilde Etherscan'da aranabilir: > -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. +> 1. Arama çubuğuna adresini girerek sözleşmeyi arayın. +> 2. `Contract Creator` bölümünde oluşturma işlemi hash'ına tıklayın. +> 3. İşlem detayları sayfasını yükleyin ve bu sözleşme için başlangıç bloğunu bulacaksınız. -## Call Handlers +## Çağrı İşleyicileri -While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. +Etkinliğin bir sözleşmenin durumunda ilgili değişiklikleri toplamak için etkili bir yol sağlamasıyla birlikte, birçok sözleşme gaz maliyetlerini optimize etmek için günlük oluşturmaktan kaçınır. Bu durumlarda, bir subgraph veri kaynağı sözleşmesine yapılan çağrılara abone olabilir. Bunun için, işlev imzasına ve bu işlevi işleyecek eşleme işleyicisine başvurularak çağrı işleyicileri tanımlanır. Bu çağrıları işlemek için eşleme işleyicisi, `ethereum.Call` olarak adlandırılan ve çağrıya ilişkin yazılım girdileri ve çıktıları olan bir argüman alır. İşlem'in çağrı zincirinin herhangi bir derinliğinde yapılan çağrılar eşleştirmeyi tetikleyecektir, bu sayede veri kaynağı sözleşmesi aracılığıyla proxy sözleşmeleri aracılığıyla gerçekleştirilen faaliyetler yakalanabilir. -Call handlers will only trigger in one of two cases: when the function specified is called by an account other than the contract itself or when it is marked as external in Solidity and called as part of another function in the same contract. +Çağrı işleyicileri yalnızca iki durumdan birinde tetiklenir: belirtilen işlevin sözleşme tarafından değil, başka bir hesap tarafından çağrılması durumunda veya Solidity'de harici olarak işaretlenip aynı sözleşmenin başka bir işlevinin bir parçası olarak çağrılması durumunda yalnızca tetiklenir. -> **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. +> **Not:** Çağrı işleyicileri şu anda Parity izleme API'sine bağlıdır. BNB zinciri ve Arbitrum gibi bazı ağlar bu API'yı desteklemez. Bu ağlardan birini indeksleyen bir subgraph, bir veya daha fazla çağrı işleyicisi içeriyorsa senkronizasyon başlatılmaz. Subgraph geliştiricileri bunun yerine etkinlik işleyicilerini kullanmalıdır. Bunlar çağrı işleyicilerinden çok daha performanslıdır ve her EVM ağı tarafından desteklenir. -### Defining a Call Handler +### Bir Çağrı İşleyici Tanımlama -To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. +Manifestinizde bir çağrı işleyicisi tanımlamak için sadece abone olmak istediğiniz veri kaynağı altında bir `callHandlers` dizisi ekleyin. ```yaml dataSources: @@ -743,11 +756,11 @@ dataSources: handler: handleCreateGravatar ``` -The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. +`function`, çağrıları filtrelemek için normalleştirilmiş işlev imzasıdır. `handler` özelliği, veri kaynağı sözleşmesinde hedef işlev çağrıldığında yürütmek istediğiniz işlevin adıdır. -### Mapping Function +### Eşleştirme fonksiyonu -Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: +Her çağrı işleyicisi, çağrılan işlevin adına karşılık gelen bir tipe sahip tek bir parametre alır. Yukarıdaki örnek subgraphta eşleme, `createGravatar` işlevi çağrıldığında ve bir `CreateGravatarCall` parametresi olarak alındığında işleyici içerir: ```typescript import { CreateGravatarCall } from '../generated/Gravity/Gravity' @@ -762,24 +775,26 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { } ``` -The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. +`handleCreateGravatar` fonksiyonu, `@graphprotocol/graph-ts` tarafından sağlanan `ethereum.Call` alt sınıfı olan ve çağrının yazılmış girdileri ve çıktıları içeren yeni bir `CreateGravatarCall`'u alır. `CreateGravatarCall` türü, `graph codegen` çalıştırıldığında sizin için oluşturulur. + +## Blok İşleyicileri -## Block Handlers +Bir subgraph, sözleşme olaylarına veya işlev çağrılarına abone olmanın yanı sıra, zincire yeni bloklar eklendikçe verilerini güncellemek isteyebilir. Bu işlemi gerçekleştirmek için a subgraph, her blok sonrasında veya önceden tanımlanmış bir filtreye uygun bloklardan sonra bir işlev çalıştırabilir. -In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. +### Desteklenen Filtreler -### Supported Filters +#### Call Filter ```yaml filter: kind: call ``` -_The defined handler will be called once for every block which contains a call to the contract (data source) the handler is defined under._ +_Tanımlanan işleyici, işleyicinin altında tanımlandığı sözleşmeye (veri kaynağı) çağrı içeren her blok için bir kez çağrılacaktır._ -> **Note:** The `call` filter currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more block handlers with a `call` filter, it will not start syncing. +> **Not:** `call` filtresi şu anda Parity izleme API'sine bağlıdır. BNB zinciri ve Arbitrum gibi bazı ağlar bu API'yi desteklemez. Bu ağlardan birini indeksleyen bir subgraph, `call` filtresi olan bir veya daha fazla blok işleyici içeriyorsa, senkronizasyona başlatılmaz. -The absence of a filter for a block handler will ensure that the handler is called every block. A data source can only contain one block handler for each filter type. +Bir blok işleyicisi için filtre olmaması, işleyicinin her blok için çağrılacağı anlamına gelir. Bir veri kaynağı, her filtre türü için yalnızca bir blok işleyicisi içerebilir. ```yaml dataSources: @@ -806,9 +821,48 @@ dataSources: kind: call ``` -### Mapping Function +#### Polling Filtresi + +> **`specVersion` >= 0.0.8 gerektirir** + +> **Not:** Polling filtreleri yalnızca `kind: ethereum` olan dataSources üzerinde kullanılabilir. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +Tanımlanan işleyici her `n` blok için bir kez çağrılacaktır; burada `n`, `every` alanında sağlanan değerdir. Bu yapılandırma, subgraph'ın düzenli blok aralıklarında belirli işlemleri gerçekleştirmesini sağlar. + +#### Once Filtresi + +> **`specVersion` >= 0.0.8 gerektirir** + +> **Not:** Once filtreleri yalnızca `kind: ethereum` olan dataSources üzerinde kullanılabilir. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +Once filtresi ile tanımlanan işleyici, diğer tüm işleyiciler çalışmadan önce yalnızca bir kez çağrılacaktır. Bu yapılandırma, subgraph'ın işleyiciyi indekslemenin başlangıcında belirli görevleri yerine getirmesine olanak sağlayan bir başlatma işleyicisi olarak kullanmasına yarar. -The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Eşleştirme fonksiyonu + +Eşleştirme fonksiyonu, argümanı olarak yalnızca bir `ethereum.Block` alacaktırr. Olaylar için eşleştirme işlevleri gibi, bu işlev depodaki mevcut subgraph varlıklarına erişebilir, akıllı sözleşmeleri çağırabilir ve varlıkları oluşturabilir veya güncelleyebilir. ```typescript import { ethereum } from '@graphprotocol/graph-ts' @@ -820,9 +874,9 @@ export function handleBlock(block: ethereum.Block): void { } ``` -## Anonymous Events +## Anonim Olaylar -If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: +Solidity'de anonim olayları işlemek gerekiyorsa, örnekte olduğu gibi, olayın topic 0'ını sağlayarak bunu başarabilirsiniz: ```yaml eventHandlers: @@ -831,13 +885,13 @@ eventHandlers: handler: handleGive ``` -An event will only be triggered when both the signature and topic 0 match. By default, `topic0` is equal to the hash of the event signature. +Bir olay, yalnızca imza ve topic 0 eşleştiğinde tetiklenir. Varsayılan olarak `topic0`, olay imzasının hash değerine eşittir. -## Transaction Receipts in Event Handlers +## Olay İşleyicilerinde İşlem Makbuzları -Starting from `specVersion` `0.0.5` and `apiVersion` `0.0.7`, event handlers can have access to the receipt for the transaction which emitted them. +`specVersion` `0.0.5` ve `apiVersion` `0.0.7`'den itibaren olay işleyicileri, onları yayınlayan işlemin makbuzuna erişebilir. -To do so, event handlers must be declared in the subgraph manifest with the new `receipt: true` key, which is optional and defaults to false. +Bunun için olay işleyicileri, subgraph manifest dosyasında isteğe bağlı ve varsayılan olarak false olan yeni `receipt: true` anahtarını kullanarak belirtilmelidir. ```yaml eventHandlers: @@ -846,20 +900,20 @@ eventHandlers: receipt: true ``` -Inside the handler function, the receipt can be accessed in the `Event.receipt` field. When the `receipt` key is set to `false` or omitted in the manifest, a `null` value will be returned instead. +İşleyici işlevi içinde, makbuz `Event.receipt` alanında erişilebilir. Manifestte makbuz(`receipt`) anahtarı `false` olarak ayarlandığında veya atlandığında, `null` bir değer döndürülür. -## Experimental features +## Deneysel özellikler -Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: +`specVersion` `0.0.4`'ten itibaren subgraph özellikleri, manifest dosyasının en üst düzeyindeki özellikler(`features`) bölümünde, aşağıdaki tabloda listelendiği gibi `camelCase` adlarıyla açıkça belirtilmelidir: -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | +| Özellik | İsim | +| ------------------------------------------------------------ | --------------------------------------------------- | +| [Ölümcül Olmayan Hatalar](#non-fatal-errors) | `nonFatalErrors` | +| [Tam Metin Arama](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Graftlama](#grafting-onto-existing-subgraphs) | `grafting` | +| [Ethereum Sözleşmelerinde IPFS](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | -For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: +Örneğin, bir subgraph Tam Metin Arama(**Full-Text Search**) ve Ölümcül Olmayan Hatalar(**Non-fatal Errors**) özelliklerini kullanıyorsa, özellikler(`features`) alanı manifestte şöyle olmalıdır: ```yaml specVersion: 0.0.4 @@ -870,27 +924,27 @@ features: dataSources: ... ``` -Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. +Bir özelliği bildirmeden kullanmanın, subgraph dağıtımı sırasında bir **validation error**'a yol açacağını, ancak bir özellik bildirilmiş ancak kullanılmamışsa herhangi bir hata oluşmayacağını unutmayın. -### IPFS on Ethereum Contracts +### Ethereum Sözleşmelerinde IPFS -A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. +IPFS'yi Ethereum ile birleştirerek yaygın bir kullanım durumu, zincir üstünde tutmanın maliyetli olacağı verileri IPFS'te depolamak ve IPFS hash değerine Ethereum sözleşmelerinde referans vermektir. -Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. +Bu tür IPFS hash değerleri verildiğinde, subgraphlar `ipfs.cat` ve `ipfs.map` kullanarak IPFS'ten ilgili dosyaları okuyabilir. Bunu güvenilir bir şekilde yapmak için, Barındırılan hizmet([hosted service](https://thegraph.com/hosted-service)) IPFS düğümü bunları indeksleme sırasında bulabilecek kadar yüksek erişilebilirliğe sahip bir IPFS düğümüne sabitlenmelidir. -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Not:** Graph Ağı henüz `ipfs.cat` ve `ipfs.map`'i desteklemiyor ve geliştiricilerin bu işlevselliği kullanarak subgraphları Studio aracılığıyla ağa dağıtması önerilmez. -> **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. +> **[Özellik Yönetimi](#experimental-features): ** `ipfsOnEthereumContracts`, subgraph manifestinde özellikler(`features`) altında bildirilmelidir. EVM dışı zincirler için, aynı amaç için `nonDeterministicIpfs` takma adı da kullanılabilir. -When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. +Yerel Graph Düğümü çalıştırılırken, bu deneysel işlevselliği kullanarak subgraphları indekslemek için `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` ortam değişkeni ayarlanmalıdır. -### Non-fatal errors +### Ölümcül Olmayan Hatalar -Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. +Halihazırda senkronize edilmiş subgraphlarda indeksleme hataları varsayılan olarak subgraph başarısız olmasına ve senkronizasyonun durmasına neden olur. Hatalara rağmen senkronizasyonun devam etmesi için subgraphlar, hata tetikleyen işleyicinin yapılan değişikliklerini yok sayarak yapılandırılabilir. Bu, subgraph yazarlarının subgraphlarını düzeltmeleri için zaman kazandırırken, sorguların en son blokta sunulmaya devam etmesini sağlar, ancak hata nedeniyle sonuçlar tutarsız olabilir. Bazı hatalar hala her zaman ölümcül olacaktır. Ölümcül olmaması için hatanın belirlenmiş olması gerekmektedir. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Not:** Graph Ağı, henüz ölümcül olmayan hataları desteklemiyor ve geliştiricilerin bu işlevselliği kullanarak subgraphları Studio aracılığıyla ağa dağıtması önerilmez. -Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: +Ölümcül olmayan hataların etkinleştirilmesi, subgraph manifestinde aşağıdaki özellik bayrağının ayarlanmasını gerektirir: ```yaml specVersion: 0.0.4 @@ -900,7 +954,7 @@ features: ... ``` -The query must also opt-in to querying data with potential inconsistencies through the `subgraphError` argument. It is also recommended to query `_meta` to check if the subgraph has skipped over errors, as in the example: +Sorgu, `subgraphError` argümanı aracılığıyla potansiyel tutarsızlıklarla sorgulama yapmak için de seçilebilir. Subgraph'ta hataların atlandığını kontrol etmek için `_meta`'yı sorgulamak da önerilir, örnekte olduğu gibi: ```graphql foos(first: 100, subgraphError: allow) { @@ -912,7 +966,7 @@ _meta { } ``` -If the subgraph encounters an error, that query will return both the data and a graphql error with the message `"indexing_error"`, as in this example response: +Subgraph bir hatayla karşılaşırsa bu sorgu, hem verileri hem de `"indexing_error"` mesajıyla birlikte bir graphql hatasını döndürecektir, örnekte olduğu gibi: ```graphql "data": { @@ -932,11 +986,13 @@ If the subgraph encounters an error, that query will return both the data and a ] ``` -### Grafting onto Existing Subgraphs +### Mevcut Subgraph'ta Graftlama -When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. +> **Not:** Graph Ağı'na ilk yükseltme yapılırken graftlama kullanılması önerilmez. Daha fazla bilgi için[buraya](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network) bakın. -A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: +Bir subgraph ilk olarak dağıtıldığında, ilgili zincirin başlangıç bloğundan (veya her veri kaynağı için belirlenen `startBlock`'tan) olayları indekslemeye başlar. Bazı durumlarda, mevcut bir subgraph'tan verilerin yeniden kullanılması ve çok daha sonraki bir blokta indekslemeye başlanması yararlı olabilir. Bu indeksleme yöntemi _Grafting_ olarak adlandırılır. Grafting, örneğin, eşleştirmelerdeki basit hataları hızlı bir şekilde geçmek veya bir subgraph başarısız olduktan sonra geçici olarak tekrar çalıştırmak için kullanışlıdır. + +Bir subgraph temel bir subgraph üzerine graft edildiğinde, `subgraph.yaml`'daki subgraph belirtimi en üst düzeyde bir `graft` bloğu içerir: ```yaml description: ... @@ -945,49 +1001,49 @@ graft: block: 7345624 # Block number ``` -When a subgraph whose manifest contains a `graft` block is deployed, Graph Node will copy the data of the `base` subgraph up to and including the given `block` and then continue indexing the new subgraph from that block on. The base subgraph must exist on the target Graph Node instance and must have indexed up to at least the given block. Because of this restriction, grafting should only be used during development or during an emergency to speed up producing an equivalent non-grafted subgraph. +Manifesti `graft` bloğu içeren bir subgraph dağıtıldığında, Graph Düğümü verilen bloğa(`block`) kadar olan temel`base` subgraph verilerini kopyalar ve ardından yeni subgraph'a o bloktan devam eder. Temel subgraph, hedef Graph Düğüm örneğinde mevcut olmalı ve en azından verilen bloka kadar indekslemiş olmalıdır. Bu kısıtlama nedeniyle, graftlama yalnızca geliştirme sırasında veya acil durumlarda, eşdeğer graftlanmamış bir subgraph oluşturmaya hız kazandırmak için kullanılmalıdır. -Because grafting copies rather than indexes base data, it is much quicker to get the subgraph to the desired block than indexing from scratch, though the initial data copy can still take several hours for very large subgraphs. While the grafted subgraph is being initialized, the Graph Node will log information about the entity types that have already been copied. +Graftlama, temel verileri indekslemek yerine kopyaladığından, subgraph'ı istenen bloğa getirmek sıfırdan indekslemeye nazaran çok daha hızlıdır, ancak ilk veri kopyası çok büyük subgraphlar için yine birkaç saat sürebilir. Graftlanmış subgraph başlatılırken, Graph Düğümü halihazırda kopyalanmış olan varlık türleri hakkında bilgileri kaydedecektir. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Graftlanan subgraph, temel subgraphla tamamen aynı olmayan, ancak onunla uyumlu olan bir GraphQL şeması kullanabilir. Kendi başına geçerli bir subgraph şeması olmalıdır, ancak şu şekillerde temel subgraph şemasından sapabilir: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Varlık türlerini ekler veya kaldırır +- Varlık türlerinden öznitelikleri kaldırır +- Varlık türlerine null yapılabilir öznitelikler ekler +- Null yapılamayan öznitelikleri null yapılabilir özniteliklere dönüştürür +- Numaralandırmalara değerler ekler +- Arayüzleri ekler veya kaldırır +- Arayüzün hangi varlık türleri için uygulandığını değiştirir -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Özellik Yönetimi](#experimental-features):** graftlama(`grafting`) subgraph manifestindeki özellikler(`features`) altında bildirilmelidir. -## File Data Sources +## Dosya Veri Kaynakları -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +Dosya veri kaynakları, indeksleme sırasında zincir dışı verilere sağlam ve genişletilebilir bir şekilde erişmek için yeni bir subgraph fonksiyonudur. Dosya veri kaynakları IPFS'den ve Arweave'den dosya getirmeyi desteklemektedir. -> This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. +> Bu aynı zamanda zincir dışı verilerinin belirlenebilir indekslenmesi için zemin hazırlar ve keyfi HTTP kaynaklı verilerin tanıtılma potansiyelini de beraberinde getirir. -### Overview +### Genel Bakış -Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. +İşleyici yürütme sırasında dosyaları "sıralı" olarak getirmek yerine bu, belirli bir dosya tanımlayıcısı için yeni veri kaynakları olarak üretilebilecek şablonları sunar. Bu yeni veri kaynakları dosyaları alır, başarısız olursa yeniden denener ve dosya bulunduğunda ayrılmış bir işleyici çalıştırır. -This is similar to the [existing data source templates](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates), which are used to dynamically create new chain-based data sources. +Bu, yeni zincir tabanlı veri kaynaklarını dinamik olarak oluşturmak için kullanılan [mevcut veri kaynağı şablonlarına](https://thegraph.com/docs/en/developing/creating-a-subgraph/#data-source-templates) benzer. -> This replaces the existing `ipfs.cat` API +> Bu, mevcut `ipfs.cat` API'sinin yerini alır -### Upgrade guide +### Yükseltme rehberi -#### Update `graph-ts` and `graph-cli` +#### `graph-ts` ve `graph-cli`'yi güncelleyin -File data sources requires graph-ts >=0.29.0 and graph-cli >=0.33.1 +Dosya veri kaynakları, graph-ts >=0.29.0 and graph-cli >=0.33.1 gerektirir -#### Add a new entity type which will be updated when files are found +#### Dosyalar bulunduğunda güncellenecek yeni bir varlık türü ekleyin -File data sources cannot access or update chain-based entities, but must update file specific entities. +Dosya veri kaynakları zincir tabanlı varlıklara erişemez veya bunları güncelleyemez, ancak dosya belirli varlıkları güncellemelidir. -This may mean splitting out fields from existing entities into separate entities, linked together. +Bu, mevcut varlıklardaki alanları ayrı varlıklara bölmeyi gerektirebilir ve bunlar birbirine bağlanabilir. -Original combined entity: +Özgün birleştirilmiş varlık: ```graphql type Token @entity { @@ -1005,7 +1061,7 @@ type Token @entity { } ``` -New, split entity: +Yeni, ayrılmış varlık: ```graphql type Token @entity { @@ -1026,13 +1082,13 @@ type TokenMetadata @entity { } ``` -If the relationship is 1:1 between the parent entity and the resulting file data source entity, the simplest pattern is to link the parent entity to a resulting file entity by using the IPFS CID as the lookup. Get in touch on Discord if you are having difficulty modelling your new file-based entities! +Ana varlık ve sonuç dosya veri kaynak varlığı arasındaki ilişki bire bir ise, en basit kalıp, IPFS CID'yi arama anahtarı olarak kullanarak ana varlığını sonuç dosya varlığına bağlamaktır. Yeni dosya tabanlı varlıklarınızın modellemesiyle ilgili sorun yaşarsanız Discord üzerinden iletişime geçin! -> You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. +> Ana varlıkları bu iç içe geçmiş varlıklar temelinde filtrelemek için [iç içe filtreleri](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) kullanabilirsiniz. -#### Add a new templated data source with `kind: file/ipfs` +#### `kind: file/ipfs` veya `kind: file/arweave` ile yeni bir şablonlu veri kaynağı ekleyin -This is the data source which will be spawned when a file of interest is identified. +Bu, ilgi alanı dosyası tespit edildiğinde oluşturulacak veri kaynağıdır. ```yaml templates: @@ -1050,21 +1106,21 @@ templates: file: ./abis/Token.json ``` -> Currently `abis` are required, though it is not possible to call contracts from within file data sources +> Şu anda `abis` gerekli olsa da, dosya veri kaynaklarından sözleşmeleri çağırmak mümkün değildir -The file data source must specifically mention all the entity types which it will interact with under `entities`. See [limitations](#Limitations) for more details. +Dosya veri kaynağı, varlıklar(`entities`) altında etkileşimde bulunacağı tüm varlık türlerini özellikle belirtmelidir. Daha fazla ayrıntı için sınırlamalara([limitations](#Limitations)) bakın. -#### Create a new handler to process files +#### Dosyaları işlemek için yeni bir işleyici oluşturun -This handler should accept one `Bytes` parameter, which will be the contents of the file, when it is found, which can then be processed. This will often be a JSON file, which can be processed with `graph-ts` helpers ([documentation](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)). +Bu işleyici, bulunduğunda dosyanın içeriği olacak ve daha sonra işlenebilecek bir `Bytes` parametresini kabul etmelidir. Bu genellikle, `graph-ts` yardımcıları ([dökümantasyon](https://thegraph.com/docs/en/developing/assemblyscript-api/#json-api)) ile işlenebilen bir JSON dosyası olacaktır. -The CID of the file as a readable string can be accessed via the `dataSource` as follows: +Dosyanın okunabilir bir dize olarak CID'sine `dataSource` aracılığıyla şu şekilde erişilebilir: ```typescript const cid = dataSource.stringParam() ``` -Example handler: +Örnek işleyici: ```typescript import { json, Bytes, dataSource } from '@graphprotocol/graph-ts' @@ -1091,22 +1147,24 @@ export function handleMetadata(content: Bytes): void { } ``` -#### Spawn file data sources when required +#### Gerektiğinde dosya veri kaynakları oluşturun + +Artık zincir tabanlı işleyicilerin yürütülmesi sırasında dosya veri kaynakları oluşturabilirsiniz: -You can now create file data sources during execution of chain-based handlers: +- Otomatik olarak oluşturulmuş şablonları(`templates`) içe aktarın +- cid'nin IPFS veya Arweave için geçerli içerik tanımlayıcısı olduğu bir eşleştirme içinden `TemplateName.create(cid: string)` öğesini çağırın -- Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +Graph Düğümü, IPFS için [v0 ve v1 içerik tanımlayıcılarını](https://docs.ipfs.tech/concepts/content-addressing/), ve dizinli içerik tanımlayıcılarını desteklemektedir. (örneğin `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +Arweave için, 0.33.0 sürümünden itibaren Graph Düğümü, bir Arweave ağ geçidinden [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) öğelerine göre Arweave'de depolanan dosyaları getirebilir ([örnek dosya](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave, Bundlr aracılığıyla yüklenen işlemleri destekler ve Graph Düğümü, [Bundlr manifestlerine](https://docs.bundlr.network/learn/gateways#indexing) dayalı olarak dosyaları da getirebilir. -Example: +Örnek: ```typescript import { TokenMetadata as TokenMetadataTemplate } from '../generated/templates' const ipfshash = 'QmaXzZhcYnsisuue5WRdQDH6FDvqkLQX1NckLqBYeYYEfm' -//This example code is for a Crypto coven subgraph. The above ipfs hash is a directory with token metadata for all crypto coven NFTs. +//Bu örnek kod, bir Crypto coven subgraph'ı içindir. Yukarıdaki ipfs hash'ı, tüm kripto NFT'leri için token üst verilerine sahip bir dizindir. export function handleTransfer(event: TransferEvent): void { let token = Token.load(event.params.tokenId.toString()) @@ -1116,7 +1174,7 @@ export function handleTransfer(event: TransferEvent): void { token.tokenURI = '/' + event.params.tokenId.toString() + '.json' const tokenIpfsHash = ipfshash + token.tokenURI - //This creates a path to the metadata for a single Crypto coven NFT. It concats the directory with "/" + filename + ".json" + //Bu, tek bir Crypto coven NFT için üst verilere giden bir yol oluşturur. Dizini "/" + dosya adı + ".json" ile birleştirir. token.ipfsURI = tokenIpfsHash @@ -1129,50 +1187,50 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +Bu, Graph Düğümü'nün yapılandırılmış IPFS veya Arweave uç noktasını sorgulayacak yeni bir veri kaynağı dosyası oluşturacak ve bulunamazsa yeniden deneyecek. Dosya bulunduğunda, dosya veri kaynağı işleyicisi çalıştırılacaktır. -This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. +Bu örnek, ana `Token` varlığı ile sonuç `TokenMetadata` varlığı arasındaki arama olarak CID'i kullanmaktadır. -> Previously, this is the point at which a subgraph developer would have called `ipfs.cat(CID)` to fetch the file +> Bu, daha önce bir subgraph geliştiricisi'nin `ipfs.cat(CID)` çağrısını yaparak dosyayı aldığı noktadır -Congratulations, you are using file data sources! +Tebrikler, dosya veri kaynaklarını kullanıyorsunuz! -#### Deploying your subgraphs +#### Subgraph'ınızı dağıtma -You can now `build` and `deploy` your subgraph to any Graph Node >=v0.30.0-rc.0. +Artık subgraph'ınızı oluşturabilir(`build`) ve herhangi bir Graph Düğümüne >=v0.30.0-rc.0 dağıtabilirsiniz(`deploy`). -#### Limitations +#### Sınırlamalar -File data source handlers and entities are isolated from other subgraph entities, ensuring that they are deterministic when executed, and ensuring no contamination of chain-based data sources. To be specific: +Dosya veri kaynağı işleyicileri ve varlıkları yürütüldüklerinde belirleyici olmaları ve zincir tabanlı veri kaynaklarının bozulmasını önlemeleri için, diğer subgraph varlıklarından izole edilir,. Açıkça şunlardır: -- Entities created by File Data Sources are immutable, and cannot be updated -- File Data Source handlers cannot access entities from other file data sources -- Entities associated with File Data Sources cannot be accessed by chain-based handlers +- Dosya Veri Kaynakları tarafından oluşturulan varlıklar değiştirilemez ve güncellenemez +- Dosya Veri Kaynağı işleyicileri, diğer dosya veri kaynaklarından varlıklara erişemez +- Dosya Veri Kaynaklarıyla ilişkili varlıklara zincir tabanlı işleyicilerden erişilemez -> While this constraint should not be problematic for most use-cases, it may introduce complexity for some. Please get in touch via Discord if you are having issues modelling your file-based data in a subgraph! +> Bu kısıtlama çoğu kullanım durumu için sorun oluşturmamalıdır, ancak bazı durumlarda karmaşıklıklığa sebep olabilir. Dosya tabanlı verilerinizi bir subgraph'ta modellemekte zorluk yaşarsanız, lütfen Discord üzerinden bizimle iletişime geçin! -Additionally, it is not possible to create data sources from a file data source, be it an onchain data source or another file data source. This restriction may be lifted in the future. +Ek olarak, zincir üstü bir veri kaynağı veya başka bir dosya veri kaynağı olsun, bir dosya veri kaynağından veri kaynakları oluşturmak mümkün değildir. Bu kısıtlama gelecekte kaldırılabilir. -#### Best practices +#### En iyi uygulamalar -If you are linking NFT metadata to corresponding tokens, use the metadata's IPFS hash to reference a Metadata entity from the Token entity. Save the Metadata entity using the IPFS hash as an ID. +NFT meta verilerini ilgili tokenleri bağlarken, Üst veri varlığına Token varlığından başvurmak için üst verinin IPFS hash değerini kullanın. Üst veri varlığını IPFS hash değerini bir kimlik olarak kullanarak kaydedin. -You can use [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context) when creating File Data Sources to pass extra information which will be available to the File Data Source handler. +Dosya Veri Kaynakları oluştururken, File Data Source işleyicisine kullanılabilir olan ekstra bilgileri geçmek için [DataSource context](https://thegraph.com/docs/en/developing/assemblyscript-api/#entity-and-data-source-context)'i kullanabilirsiniz. -If you have entities which are refreshed multiple times, create unique file-based entities using the IPFS hash & the entity ID, and reference them using a derived field in the chain-based entity. +Birden çok kez yenilenen varlıklarınız varsa, IPFS hash değeri & varlık kimliğini kullanarak benzersiz dosya tabanlı varlıklar oluşturun ve bunları zincir tabanlı bir varlıkta türetilmiş alanda referans gösterin. -> We are working to improve the above recommendation, so queries only return the "most recent" version +> Yukarıdaki öneriyi geliştirmeye çalışıyoruz, bu nedenle sorgular yalnızca "en son" sürümü döndürür -#### Known issues +#### Bilinen Sorunlar -File data sources currently require ABIs, even though ABIs are not used ([issue](https://github.com/graphprotocol/graph-cli/issues/961)). Workaround is to add any ABI. +Dosya veri kaynakları şu anda ABI'leri gerektirir, ancak ABI'ler kullanılmaz ([github issue](https://github.com/graphprotocol/graph-cli/issues/961)). Geçici çözüm, herhangi bir ABI eklemektir. -Handlers for File Data Sources cannot be in files which import `eth_call` contract bindings, failing with "unknown import: `ethereum::ethereum.call` has not been defined" ([issue](https://github.com/graphprotocol/graph-cli/issues/4309)). Workaround is to create file data source handlers in a dedicated file. +Dosya Veri Kaynakları için işleyiciler, "unknown import: `ethereum::ethereum.call` has not been defined" ([github issue](https://github.com/graphprotocol/graph-cli/issues/4309)) ile başarısız olan `eth_call` sözleşme bağlamlarını içe aktaran dosyalarda olamaz. Geçici çözüm yolu, dosya veri kaynağı işleyicilerini ayrılmış bir dosyada oluşturmaktır. -#### Examples +#### Örnekler -[Crypto Coven Subgraph migration](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) +[Crypto Coven Subgraph taşınması](https://github.com/azf20/cryptocoven-api/tree/file-data-sources-refactor) -#### References +#### Referanslar -[GIP File Data Sources](https://forum.thegraph.com/t/gip-file-data-sources/2721) +[GIP Dosyası Veri Kaynakları](https://forum.thegraph.com/t/gip-file-data-sources/2721) diff --git a/website/pages/tr/developing/developer-faqs.mdx b/website/pages/tr/developing/developer-faqs.mdx index 0b925a79dce2..be1af1d3fb83 100644 --- a/website/pages/tr/developing/developer-faqs.mdx +++ b/website/pages/tr/developing/developer-faqs.mdx @@ -1,74 +1,74 @@ --- -title: Developer FAQs +title: Geliştirici SSS --- -## 1. What is a subgraph? +## 1. Subgraph nedir? -A subgraph is a custom API built on blockchain data. Subgraphs are queried using the GraphQL query language and are deployed to a Graph Node using the Graph CLI. Once deployed and published to The Graph's decentralized network, Indexers process subgraphs and make them available to be queried by subgraph consumers. +Bir subgraph, blockchain verileri üzerine inşa edilmiş özel bir API'dır. Subgraph'ler, GraphQL sorgulama dili kullanılarak sorgulanır ve Graph CLI kullanılarak bir Graph node'una deploy edilir. İndeksleyiciler, Graph'in merkeziyetsiz ağına deploy edilip yayınlandıktan sonra subgraph'leri işler ve subgraph tüketicileri tarafından sorgulanmak üzere kullanılabilir hale getirir. -## 2. Can I delete my subgraph? +## 2. Subgraph'ımı silebilir miyim? -It is not possible to delete subgraphs once they are created. +Subgraph'ler oluşturulduktan sonra silinemez. -## 3. Can I change my subgraph name? +## 3. Subgraph ismimi değiştirebilir miyim? -No. Once a subgraph is created, the name cannot be changed. Make sure to think of this carefully before you create your subgraph so it is easily searchable and identifiable by other dapps. +Hayır. Bir subgraph oluşturulduktan sonra adı değiştirilemez. Subgraph'inizi inşa etmeden önce bunu dikkatlice düşündüğünüzden emin olun, böylece diğer merkeziyetsiz uygulamalar tarafından kolayca aranabilir ve tanımlanabilir. -## 4. Can I change the GitHub account associated with my subgraph? +## 4. Subgraph'ımla ilişkili GitHub hesabını değiştirebilir miyim? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Make sure to think of this carefully before you create your subgraph. +Hayır. Bir subgraph oluşturulduktan sonra ilişkili GitHub hesabı değiştirilemez. Subgraph'inizi oluşturmadan önce bunu dikkatlice düşündüğünüzden emin olun. -## 5. Am I still able to create a subgraph if my smart contracts don't have events? +## 5. Akıllı sözleşmelerimin olayları yoksa yine de bir subgraph oluşturabilir miyim? -It is highly recommended that you structure your smart contracts to have events associated with data you are interested in querying. Event handlers in the subgraph are triggered by contract events and are by far the fastest way to retrieve useful data. +Akıllı sözleşmelerinizi, sorgulamak istediğiniz verilerle ilişkili olaylara sahip olacak şekilde yapılandırmanız önemle tavsiye edilir. Subgraph'teki etkinlik işleyicileri, sözleşme etkinlikleri tarafından tetiklenir ve faydalı verileri almanın açık ara en hızlı yoludur. -If the contracts you are working with do not contain events, your subgraph can use call and block handlers to trigger indexing. Although this is not recommended, as performance will be significantly slower. +Çalıştığınız sözleşmeler olay içermiyorsa, subgraph'iniz indekslemeyi tetiklemek için arama ve engelleme işleyicilerini kullanabilir. Performans önemli ölçüde daha yavaş olacağından bu önerilmez. -## 6. Is it possible to deploy one subgraph with the same name for multiple networks? +## 6. Birden fazla ağ için aynı isme sahip bir subgraph'ı dağıtmak mümkün mü? -You will need separate names for multiple networks. While you can't have different subgraphs under the same name, there are convenient ways of having a single codebase for multiple networks. Find more on this in our documentation: [Redeploying a Subgraph](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) +Birden çok ağ için ayrı adlara ihtiyacınız olacak. Aynı ad altında farklı subgraph'lere sahip olamasanız da, birden çok ağ için tek bir kod tabanına sahip olmanın uygun yolları vardır. Bununla ilgili daha fazla bilgiyi belgelerimizde bulabilirsiniz: [Bir Subgraph'i Yeniden Deploy Etme](/deploying/deploying-a-subgraph-to-hosted#redeploying-a-subgraph) -## 7. How are templates different from data sources? +## 7. Şablonların veri kaynaklarından farkı nedir? -Templates allow you to create data sources on the fly, while your subgraph is indexing. It might be the case that your contract will spawn new contracts as people interact with it, and since you know the shape of those contracts (ABI, events, etc) upfront you can define how you want to index them in a template and when they are spawned your subgraph will create a dynamic data source by supplying the contract address. +Şablonlar, subgraph'inizin indekslenirken anında veri kaynakları oluşturmanıza olanak tanır. İnsanlar onunla etkileşime girdikçe sözleşmenizin yeni sözleşmeler oluşturması söz konusu olabilir ve bu sözleşmelerin şeklini (ABI, etkinlikler, vb.) önceden bildiğiniz için, bunları bir şablonda nasıl dizine eklemek istediğinizi tanımlayabilirsiniz. Ortaya çıktığında, subgraph'iniz sözleşme adresini sağlayarak dinamik bir veri kaynağı oluşturacaktır. -Check out the "Instantiating a data source template" section on: [Data Source Templates](/developing/creating-a-subgraph#data-source-templates). +Şu konudaki "Veri kaynağı şablonunu başlatma" bölümüne göz atın: [Veri Kaynağı Şablonları](/developing/creating-a-subgraph#data-source-templates). -## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? +## 8. Yerel dağıtımlarım için graph-node'un en son sürümünü kullandığımdan nasıl emin olabilirim? -You can run the following command: +Aşağıdaki komutu çalıştırabilirsiniz: ```sh docker pull graphprotocol/graph-node:latest ``` -**NOTE:** docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so it is important to do this to make sure you are up to date with the latest version of graph-node. +**NOT:** docker / docker-compose her zaman ilk çalıştırdığınızda çizilen graph node'u sürümünü kullanır, bu nedenle graph node'unun en son sürümüyle güncel durumda olduğunuzdan emin olmak için bunu yapmanız önemlidir. -## 9. How do I call a contract function or access a public state variable from my subgraph mappings? +## 9. Subgraph eşleştirmelerimden bir sözleşme fonksiyonunu nasıl çağırabilirim veya genel bir durum değişkenine nasıl erişebilirim? -Take a look at `Access to smart contract` state inside the section [AssemblyScript API](/developing/assemblyscript-api). +[AssemblyScript API](/developing/assemblyscript-api) bölümünün içindeki `Access to smart contrac` durumuna bir göz atın. -## 10. Is it possible to set up a subgraph using `graph init` from `graph-cli` with two contracts? Or should I manually add another datasource in `subgraph.yaml` after running `graph init`? +## 10. İki sözleşme ile `graph-cli`den `graph init` kullanarak bir subgraph oluşturmak mümkün mü? Yoksa `graph init`'i çalıştırdıktan sonra `subgraph.yaml` dosyasına manuel olarak başka bir veri kaynağı mı eklemeliyim? -Unfortunately, this is currently not possible. `graph init` is intended as a basic starting point, from which you can then add more data sources manually. +Ne yazık ki, bu şu anda mümkün değil. `graph init`, daha sonra manuel olarak daha fazla veri kaynağı ekleyebileceğiniz temel bir başlangıç noktası olarak tasarlanmıştır. -## 11. I want to contribute or add a GitHub issue. Where can I find the open source repositories? +## 11. Katkıda bulunmak veya bir GitHub sorunu eklemek istiyorum. Açık kaynak depolarını nerede bulabilirim? - [graph-node](https://github.com/graphprotocol/graph-node) - [graph-cli](https://github.com/graphprotocol/graph-cli) - [graph-ts](https://github.com/graphprotocol/graph-ts) -## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? +## 12. Olayları işlerken bir varlık için "otomatik oluşturulan" kimlikler oluşturmanın önerilen yolu nedir? -If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. +Etkinlik sırasında yalnızca bir varlık oluşturulursa ve daha iyi bir varlık yoksa işlem hash + günlük dizini benzersiz olur. Bunu Bayt'a dönüştürerek ve ardından `crypto.keccak256` üzerinden aktararak bunları şaşırtabilirsiniz, ancak bu onu daha benzersiz yapmaz. -## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? +## 13. Birden fazla sözleşmenin etkinliklerini gözlemlerken, olayların etkinliklerini gözlemlemek için sözleşme sırasını seçmek mümkün mü? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Bir subgraph içinde, birden çok sözleşmede olup olmadığına bakılmaksızın olaylar her zaman bloklarda göründükleri sırayla işlenir. -## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? +## 14. Olay işleyicileri içinden ağlar (mainnet, Göerli, local) arasında ayrım yapmak mümkün mü? -Yes. You can do this by importing `graph-ts` as per the example below: +Evet. Bunu, aşağıdaki örneğe göre `graph-ts`'i içe aktararak yapabilirsiniz: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -77,66 +77,62 @@ dataSource.network() dataSource.address() ``` -## 15. Do you support block and call handlers on Goerli? +## 15. Göerli'de blok ve çağrı işleyicilerini destekliyor musunuz? -Yes. Goerli supports block handlers, call handlers and event handlers. It should be noted that event handlers are far more performant than the other two handlers, and they are supported on every EVM-compatible network. +Evet. Goerli, blok işleyicileri, çağrı işleyicileri ve olay işleyicileri destekler. Olay işleyicilerin diğer iki işleyiciden çok daha yüksek performans gösterdiğine ve EVM uyumlu her ağda desteklendiğine dikkat edilmelidir. -## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? +## 16. Subgraph eşleştirmelerime ethers.js veya diğer JS kütüphanelerini aktarabilir miyim? -Not currently, as mappings are written in AssemblyScript. One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. +Eşlemeler AssemblyScript'te yazıldığı için şu anda değildir. Buna olası bir alternatif çözüm, ham verileri varlıklarda depolamak ve istemcide JS kitaplıkları gerektiren mantık gerçekleştirmektir. -## 17. Is it possible to specify what block to start indexing on? +## 17. İndekslemeye hangi bloktan başlanacağını belirtmek mümkün mü? -Yes. `dataSources.source.startBlock` in the `subgraph.yaml` file specifies the number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created: Start blocks +Evet. `dataSources.source.startBlock` in the `subgraph.yaml` dosyası, veri kaynağının indekslemeye başladığı bloğun numarasını belirtir. Çoğu durumda, sözleşmenin oluşturulduğu bloğu kullanmanızı öneririz: Başlangıç blokları -## 18. Are there some tips to increase the performance of indexing? My subgraph is taking a very long time to sync +## 18. İndeksleme performansını artırmak için bazı ipuçları var mı? Subgraph'ımın senkronize edilmesi çok uzun zaman alıyor -Yes, you should take a look at the optional start block feature to start indexing from the block that the contract was deployed: [Start blocks](/developing/creating-a-subgraph#start-blocks) +Evet, sözleşmenin deploy edildiği bloktan indekslemeye başlamak için isteğe bağlı blok başlatma özelliğine bir göz atmalısınız: [Başlangıç blokları](/developing/creating-a-subgraph#start-blocks) -## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? +## 19. Subgraph üzerinde doğrudan sorgulama yaparak indekslediği en son blok numarasını belirlemenin bir yol var mı? -Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: +Evet! Aşağıdaki komutu, "organization/subgraphName", altındaki organizasyon ve subgraph'inizin adıyla değiştirerek deneyin: ```sh curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql ``` -## 20. What networks are supported by The Graph? +## 20. Graph hangi ağları destekliyor? -You can find the list of the supported networks [here](/developing/supported-networks). +Desteklenen ağların listesini [burada](/developing/supported-networks) bulabilirsiniz. -## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? +## 21. Bir subgraph'ı yeniden dağıtmadan başka bir hesaba veya uç noktaya çoğaltmak mümkün mü? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +Subgraph'i yeniden deploy etmeniz gerekir, ancak subgraph kimliği (IPFS hash) değişmezse baştan eşitlenmesi gerekmez. -## 22. Is this possible to use Apollo Federation on top of graph-node? +## 22. Apollo Federation'ı graph-node üzerinde kullanmak mümkün mü? -Federation is not supported yet, although we do want to support it in the future. At the moment, something you can do is use schema stitching, either on the client or via a proxy service. +Gelecekte desteklemek istememize rağmen federasyon henüz desteklenmiyor. Şu anda yapabileceğiniz bir şey, istemcide veya bir proxy hizmeti aracılığıyla şema birleştirme kullanmaktır. -## 23. Is there a limit to how many objects The Graph can return per query? +## 23. Graph'ın sorgu başına kaç nesne döndürebileceğine dair bir sınır var mı? -By default, query responses are limited to 100 items per collection. If you want to receive more, you can go up to 1000 items per collection and beyond that, you can paginate with: +Varsayılan olarak, sorgu yanıtları koleksiyon başına 100 öğeyle sınırlıdır. Daha fazlasını almak istiyorsanız koleksiyon başına 1000 öğeye kadar çıkabilirsiniz ve bunun ötesinde aşağıdakilerle sayfalandırabilirsiniz: ```graphql someCollection(first: 1000, skip: ) { ... } ``` -## 24. If my dapp frontend uses The Graph for querying, do I need to write my query key into the frontend directly? What if we pay query fees for users – will malicious users cause our query fees to be very high? +## Dapp önyüzüm Graph'ı sorgulamak için kullanıyorsa, sorgu anahtarını önyüze doğrudan yazmam gerekiyor mu? Kullanıcılar için sorgu ücreti ödesek, kötü niyetli kullanıcılar sorgu ücretlerimizin çok yüksek olmasına neden olabilir mi? -Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. +Şu anda, bir merkeziyetsiz uygulama için önerilen yaklaşım, anahtarı ön uca eklemek ve son kullanıcılara göstermektir. Bununla birlikte, bu anahtarı _yourdapp.io_ ve subgraph gibi bir ana bilgisayar adıyla sınırlayabilirsiniz. Ağ geçidi şu anda Edge & Node tarafından çalıştırılıyor. Bir ağ geçidinin sorumluluğunun bir kısmı, kötü amaçlı davranışları izlemek ve kötü niyetli istemcilerden gelen trafiği engellemektir. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? -Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). +Sizin veya başkalarının barındırılan hizmete dağıttığı subgraphları bulmak için barındırılan hizmete gidin. [Burada](https://thegraph.com/hosted-service) bulabilirsiniz. -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? -The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. +Graph, barındırılan hizmet için asla ücret talep etmeyecektir. Graph merkeziyetsiz bir protokoldür ve merkezi bir hizmet için ücret almak Graph'in değerleriyle uyuşmamaktadır. Barındırılan hizmet, merkeziyetsiz ağa ulaşmaya yardımcı olmak için her zaman geçici bir adım olmuştur. Geliştiriciler, merkeziyetsiz ağa rahatça yükseltebilmek için yeterli süreye sahip olacaklardır. -## 27. When will the Hosted Service be shut down? +## 27. How do I update a subgraph on mainnet? -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? - -If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. +Bir subgraph geliştiricisiyseniz, CLI'yi kullanarak subgraph'ınızın yeni bir sürümünü Subgraph Stüdyo'ya dağıtabilirsiniz. Bu noktada gizli olarak kalacaktır, ancak isterseniz, merkeziyetsiz Graph Gezgini'nde yayınlayabilirsiniz. Bu, subgraph'ınızın Küratörlerin üzerinde sinyal iletmeye başlayabileceği yeni bir sürümünü oluşturacaktır. diff --git a/website/pages/tr/developing/graph-ts/api.mdx b/website/pages/tr/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..5107de4579f6 --- /dev/null +++ b/website/pages/tr/developing/graph-ts/api.mdx @@ -0,0 +1,855 @@ +--- +title: AssemblyScript API'si +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +Bu sayfa subgraph eşleştirmelerini yazarken bullanılabilen yerleşik API'leri belgelemektedir. Hazır olarak iki çeşit API mevcuttur: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Referansı + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Ethereum, JSON, GraphQL ve AssemblyScript gibi farklı tip sistemler arası çeviri yapmak için düşük seviyeli yazılımlar. + +### Sürümler + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Sürüm | Sürüm Notları | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Dahili Türler + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Baytlar + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Adres(Address) + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Unsurların Oluşturulması + +Aşağıdaki, Ethereum olaylarından varlıklar oluşturmak için yaygın bir modeldir. + +```typescript +// ERC20 ABI'dan oluşturulan Transfer olay sınıfını içe aktarın +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// GraphQL şemasından oluşturulan Transfer varlık türünü içe aktarın +import { Transfer } from '../generated/schema' + +// Transfer olayı işleyicisi +export function handleTransfer(event: TransferEvent): void { + // İşlem hash'ını olay kimliği olarak kullanarak bir Transfer varlığı oluşturun + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Olay parametrelerini kullanarak varlığın özelliklerini ayarlayın + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Varlığı depoya kaydedin + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Çakışmaları önlemek için her varlık benzersiz bir kimliğe sahip olmalıdır. Genellikle olay parametreleri, kullanılabilecek benzersiz bir tanımlayıcı içerir. Not: Kimlik olarak işlem hash'ını kullanmak aynı işlemdeki başka hiçbir olayın bu hash'ı kullanarak kimlik olarak varlık oluşturmayacağını varsayar. + +#### Depodan varlık yükleme + +Bir varlık mevcutsa aiağıdaki kod kullanılarak depodan yüklenebilir: + +```typescript +let id = event.transaction.hash // veya kimlik(ID) nasıl oluşturulmuşsa +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Transfer varlığı önceki gibi kullanılır +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Bir blok içinde oluşturulan varlıkları arama + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +Store API, mevcut blokta oluşturulan veya güncellenen varlıkların alınmasını kolaylaştırır. Bunun için tipik bir durum, bir işleyicinin zincir üzerindeki bir etkinlikten bir İşlem oluşturması ve daha sonraki bir işleyicinin varsa bu işleme erişmek istemesidir. İşlemin mevcut olmadığı durumda, subgraph sadece varlığın mevcut olmadığını öğrenmek için veritabanına gitmek zorunda kalacaktır; eğer subgraph yazarı varlığın aynı blokta yaratılmış olması gerektiğini zaten biliyorsa, loadInBlock kullanmak bu veritabanı gidiş gelişini önler. Bazı subgraphlar için, bu kaçırılan aramalar indeksleme süresine önemli ölçüde katkıda bulunabilir. + +```typescript +let id = event.transaction.hash // veya ID nasıl oluşturulurmuşsa +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Transfer varlığını daha önce olduğu gibi kullanın +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Türetilmiş varlıkları arama + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +Bu, türetilmiş varlık alanlarının bir olay işleyicisi içinden yüklenmesini sağlar. Örneğin, aşağıdaki şema göz önüne alındığında: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Belirli bir sahiple ilişkili Token unsurlarını yükleyin +let tokens = holder.tokens.load() +``` + +#### Mevcut varlıkları güncelleme + +Mevcut bir varlığı güncellemenin iki yolu vardır: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Oluşturulan özellik ayarlayıcılar sayesinde çoğu durumda özellikerin değiştirilmesi kolaydır: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +Ayrıca aşağıdaki iki talimattan biriyle özellikleri kaldırmakta mümkündür: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// Bu işe yaramaz +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// Bu çalışır +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Depodan varlık kaldırma + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +Ethereum API'si, akıllı sözleşmelere, genel durum değişkenlerine, sözleşme fonksiyonlarına, olaylara, işlemlere, bloklara ve Ethereum verilerinin kodlama/çözme işlemlerine erişim sağlar. + +#### Ethereum Türleri İçin Destek + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +Aşağıdaki örnek bunu açıklar. Aşağıdaki gibi bir subgraph şeması verildiğinde + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Olaylar ve Blok/İşlem Verileri + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Akıllı Sözleşme Durumuna Erişim + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +Yaygın bir model, bir olayın kaynaklandığı sözleşmeye erişmektir. Bu, aşağıdaki kodla elde edilir: + +```typescript +// Oluşturulan sözleşme sınıfı ve Transfer olayı sınıfını içe aktarın +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Oluşturulan varlık sınıfını içe aktarın +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Sözleşmeyi olayı yayınlayan adresle bağlayın + let contract = ERC20Contract.bind(event.address) + + // Durum değişkenlerine ve işlevlere erişmek için çağrı yapın + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Subgraph parçası olan diğer tüm sözleşmelerde oluşturulan koddan içe aktarılabilir ve geçerli bir adrese bağlanabilir. + +#### Geri Dönen Çağrıları Yönetme + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Bir Geth veya Infura istemcisine bağlı bir Graph düğümünün tüm geri dönüşleri algılamayabileceğini unutmayın, bu durumda Parity istemcisine bağlı bir Graph düğümü kullanmanızı öneririz. + +#### ABI Kodlama/Çözme + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +Daha fazla bilgi için: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Bir veya daha fazla değerin loglanması + +##### Tek bir değerin loglanması + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Görüntüler : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Mevcut bir diziden tek bir girişi loglama + +Aşağıdaki örnekte, bağımsız değişken dizisi üç değer içermesine rağmen dizinin yalnızca ilk değeri loglanır. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Görüntüler: "My value is: A" ('log.info'ya üç değer iletilmiş olsa da) + log.info('My value is: {}', myArray) +} +``` + +#### Mevcut bir diziden birden çok girişi kaydetme + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Görüntüler: "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Mevcut bir diziden belirli bir girişi loglama + +Dizide belirli bir değeri görüntülemek için dizinlenmiş değer bulunmalıdır. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Görüntüler : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Olay bilgilerinin loglanması + +Aşağıdaki örnek, bir olaydan blok numarasını, blok hash'ını ve işlem hash'ını loglar: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +IPFS hash'ı veya yolu verildiğinde, bir dosyayı IPFS'den okuma şu şekilde yapılır: + +```typescript +// Bunu eşleştirmedeki bir olay işleyicinin içine koyun +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +gibi yollar +// bu dizinlerdeki dosyaları içerenler de desteklenir +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // İşlem yapmayla ilgili ayrıntılar için JSONValue belgelerine bakın + // JSON değerleri ile + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Geri çağırmalar da varlık oluşturabilir + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Bunu eşleştirmedeki bir olay işleyicisinin içine koyun +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatif olarak `ipfs.mapJSON` kullanın +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Kripto(Crypto) API'si + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Tip Dönüşümleri Referansı + +| Source(s) | Destination | Conversion function | +| ----------------- | ----------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | Dizgi (onaltılık) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| Dizgi (onaltılık) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Veri Kaynağı Meta Verileri + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Varlık ve VeriKaynağıBağlamı + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### Manifest'teki DataSourceContext + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +Bu bağlama daha sonra subgraph eşleştirme dosyalarınızdan erişilebilir ve böylece daha dinamik ve yapılandırılabilir subgraphlar elde edebilirsiniz. diff --git a/website/pages/tr/developing/graph-ts/common-issues.mdx b/website/pages/tr/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..892ab40dce7f --- /dev/null +++ b/website/pages/tr/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Genel AssemblyScript Sorunları +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/tr/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/tr/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..5c828dc8abec 100644 --- a/website/pages/tr/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/tr/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: Substreams destekli subgraphlar SSS --- -## What are Substreams? +## Substreams nedir? -Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. +[StreamingFast](https://www.streamingfast.io/) tarafından geliştirilen Substreams, zengin block zinciri verisini işleyebilen son derece güçlü bir işleme motorudur. Substreams, block zinciri verilerini hızlı ve sorunsuz bir şekilde son kullanıcı uygulamalarının kullanımına uygun hale getirerek, verileri şekillendirmenize olanak tanır. Daha spesifik olacak olursak, Substreams, block zinciri agnostik olan, paralelize edilmiş ve veri önceliğine odaklanan bir motor olup, blockzinciri verilerinin dönüştürülmesini sağlayan bir katmandır. [Firehose](https://firehose.streamingfast.io/) tarafından desteklenmektedir ve Rust modülleri yazmanıza, topluluk modüllerine inşa etmenize, son derece yüksek performanslı imdeksleme sağlamanıza ve verilerinizi herhangi bir yere göndermenize ([sink](https://substreams.streamingfast.io/developers-guide/sink-targets)) imkan tanır. -Go to the [Substreams Documentation](/substreams) to learn more about Substreams. +Substreams hakkında daha fazla bilgi için [SubstreamsDökümantasyonuna](/substreams) gidin. -## What are Substreams-powered subgraphs? +## Substreams destekli subgraphlar nelerdir? -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +[Substreams destekli subgraphlar](/cookbook/substreams-powered-subgraphs/), Substreams'in gücünü subgraphlar'ın sorgulanabilirliği ile birleştirir. Substreams ile desteklenen bir subgraph yayınladığınızda, Substreams dönüşümleri tarafından üretilen veriler, subgraph varlıkları ile uyumlu olan [varlık değişikliklerini çıktı](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs) olarak verebilir. -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +Eğer subgraph geliştirme konusun aşina iseniz, aklınızda bulundurun ki, Substreams destekli subgraphlar AssemblyScript dönüştürme katmanı tarafından üretilmiş gibi sorgulanabilir ve dinamik ve esnek bir GraphQL API sağlayarak Subgraph avantajlarından yararlanabilir. -## How are Substreams-powered subgraphs different from subgraphs? +## Substreams destekli subgraphlar'ın normal subgraphlar'dan farkı nedir? -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +Subgraphlar, zincir üstü olayları ve bu olayların nasıl AssemblyScript işleyicileri aracılığıyla dönüştürüleceğini belirleyen veri kaynaklarından oluşur. Bu olaylar, olayların zincir üstünde meydana geldikleri sıraya göre işlenir. -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +Buna karşılık, Substreams destekli subgraphlar, Substreams paketini referans alan tek bir veri kaynağına sahiptir ve bu veri kaynağı Graph Düğümü tarafından işlenir. Substreams, geleneksel subgraphlara kıyasla daha detaylı zincir üstü verilere erişebilir ve ayrıca büyük ölçekli paralel işlemden yararlanarak çok daha hızlı işleme süreleri elde edebilir. -## What are the benefits of using Substreams-powered subgraphs? +## Substreams destekli subgraphlar kullanmanın avantajları nelerdir? -Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. +Substreams destekli subgraphlar, Substreams'in tüm avantajlarını subgraphlar'ın sorgulanabilirliği ile birleştirir. Bunlar, Graph'a daha fazla birleştirilebilirlik ve yüksek performanslı indeksleme sağlar. Aynı zamanda yeni veri kullanım durumlarına olanak sağlar. Örneğin, Substreams destekli subgraph'ınızı oluşturduktan sonra, PostgreSQL, MongoDB ve Kafka gibi farklı [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) çıktı almak için [Substreams modüllerinizi](https://substreams.streamingfast.io/developers-guide/modules) yeniden kullanabilirsiniz. -## What are the benefits of Substreams? +## Substreams'in faydaları nelerdir? -There are many benefits to using Substreams, including: +Substreams kullanmanın birçok faydası vardır, bunlar şunlardır: -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- Birleştirilebilir: Substreams modüllerini LEGO blokları gibi birleştirebilir ve topluluk modüllerine dayanarak açık verileri daha da ayrıntılayabilirsiniz. -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- Yüksek performanslı indeksleme: Büyük ölçekli paralel işlemler sayesinde sıradan işlemlere göre onlarca kat daha hızlı indeksleme sağlar (BigQuery gibi). -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- Her yere veri gönderme: Verilerinizi PostgreSQL, MongoDB, Kafka, subgraphlar, düz dosyalar, Google Sheets gibi herhangi bir yere gönderebilirsiniz. -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- Programlanabilir: Kod kullanarak çıkarma işlemlerini özelleştirmek, dönüşüm zamanında toplamalar yapmak ve çıktınızı birden çok hedef için modelleyebilirsiniz. -- Access to additional data which is not available as part of the JSON RPC +- JSON RPC'nin parçası olmayan ek verilere erişim sağlar -- All the benefits of the Firehose. +- Firehose'un tüm faydalarından yararlanır. -## What is the Firehose? +## Firehose nedir? -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +[StreamingFast](https://www.streamingfast.io/) tarafından geliştirilen Firehose, daha önce görülmemiş hızlarda blok zincirinin baştan sona, tam geçmişini işlemek için tasarlanmış bir blok zinciri veri çıkarma katmanıdır. Dosya tabanlı ve akışa odaklı bir yaklaşım sunarak, StreamingFast'in açık kaynaklı teknolojilerinin temel bileşenlerinden biridir ve Substreamler'in temelini oluşturur. -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Firehose hakkında daha fazla bilgi için [documentation](https://firehose.streamingfast.io/) gidin. -## What are the benefits of the Firehose? +## Firehose'un faydaları nelerdir? -There are many benefits to using Firehose, including: +Firehose kullanmanın birçok faydası vardır, bunlar şunlardır: -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- En düşük gecikme ve sorgulama yok: Akışa odaklı bir şekilde, Firehose düğümleri blok verilerini ilk olarak dışarıya göndermek üzere tasarlanmıştır. -- Prevents downtimes: Designed from the ground up for High Availability. +- Kesintisiz çalışma: Yüksek Erişilebilirlik için baştan sona tasarlanmıştır. -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- Hiçbir şeyi kaçırmaz: Firehose akış imleci, fork durumlarını ele almak ve herhangi bir durumda kaldığınız yerden devam etmek için tasarlanmıştır. -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- En zengin veri modeli: Bakiye değişikliklerini, tam çağrı ağacını, dahili işlemleri, kayıtları, depolama değişikliklerini, gaz maliyetlerini ve daha fazlasını içeren en iyi veri modeli. -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- Düz dosyalardan yararlanma: Blok zinciri verileri düz dosyalara çıkarılır, en ucuz ve en optimize hesaplama kaynağı kullanılır. -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## Geliştiriciler, Substreams destekli subgraphlar ve Substreams hakkında daha fazla bilgiye nereden erişebilir geliştiriciler? -The [Substreams documentation](/substreams) will teach you how to build Substreams modules. +[Substreams dökümantasyonu](/substreams), Substreams modülleri nasıl oluşturulacağını öğretecektir. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +[Substreams destekli subgrahplar belgeleri](/cookbook/substreams-powered-subgraphs/), onları Graph üzerinde dağıtmak için nasıl paketleyeceğinizi gösterecektir. -## What is the role of Rust modules in Substreams? +## Rust modüllerinin Substreams içindeki rolü nedir? -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +Rust modülleri, Subgraphs'teki AssemblyScript eşleştiricilerinin karşılığıdır. WASM'ye benzer şekilde derlenirler, ancak programlama modelleri paralel yürütme için olanak sağlar. Rust modülleri, ham blok zinciri verilerine uygulamak istediğiniz dönüşümleri ve birleştirmeleri tanımlar. -See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. +Detaylar için [modüller dökümantasyonuna](https://substreams.streamingfast.io/developers-guide/modules) göz atın. -## What makes Substreams composable? +## Substreams'i birleştirilebilir yapan nedir? -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +Substream kullanırken, kompozisyon dönüşüm katmanında gerçekleşir ve önbelleğe alınmış modüllerin tekrar kullanılmasına olanak sağlar. -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +Örnek olarak, Alice bir merkeziyetsiz borsa fiyat modülü oluşturabilir, Bob ilgisini çeken bazı tokenler için bir hacim aggregator inşa edebilir ve Lisa dört bireysel merkeziyetsiz borsa fiyat modülünü bir araya getirerek bir fiyat oracle'ı oluşturabilir. Tek bir Substreams talebi, tüm bu bireylerin modüllerini bir araya getirir, birleştirir ve çok daha sofistike bir veri akışı sunar. Bu akış daha sonra bir subgraph'ı doldurmak ve tüketiciler tarafından sorgulanmak için kullanılabilir. -## How can you build and deploy a Substreams-powered Subgraph? +## Bir Substreams destekli Subgraph nasıl oluşturulur ve dağıtılır? -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +Bir Substreams destekli Subgraph [defining](/cookbook/substreams-powered-subgraphs/) sonra, onu [Subgraph Stüdyo](https://thegraph.com/studio/) üzerinde dağıtmak için Graph CLI'yi kullanabilirsiniz. -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## Substreams ve Substreams destekli subgraphlar ile ilgili örnekleri nerede bulubilirim? -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +Substreams ve Substreams destekli subgraphlar ile ilgili örnekleri bulmak için [bu Github deposunu](https://github.com/pinax-network/awesome-substreams) ziyaret edebilirsiniz. -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## Substreams ve Substreams destekli subgraphlar, Graph Ağı için ne anlam ifade etmektedir? -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +Bu entegrasyon, topluluk modüllerinden yararlanarak son derece yüksek performanslı indeksleme ve daha fazla birleştirme yapma avantajları sunar. diff --git a/website/pages/tr/developing/supported-networks.json b/website/pages/tr/developing/supported-networks.json index 5e12392b8c7d..5a1ab2608072 100644 --- a/website/pages/tr/developing/supported-networks.json +++ b/website/pages/tr/developing/supported-networks.json @@ -1,9 +1,9 @@ { - "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "network": "Ağ", + "cliName": "CLI Adı", + "chainId": "Zincir Kimliği", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "Merkeziyetsiz Ağ", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/tr/developing/supported-networks.mdx b/website/pages/tr/developing/supported-networks.mdx index 58ce56345f7c..8d710a33a098 100644 --- a/website/pages/tr/developing/supported-networks.mdx +++ b/website/pages/tr/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Desteklenen Ağlar --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,16 +9,16 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. -For a full list of which features are supported on the decentralized network, see [this page](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md). +Merkeziyetsiz ağda hangi özelliklerin desteklendiğinin tam listesi için [bu sayfaya](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) göz atın. -Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Subgraph Studio and decentralized network. +`mainnet` Ethereum'u indeksleyen substreams destekli subgraphlar, Subgraph Stüdyo ve merkeziyetsiz ağ üzerinde desteklenmektedir. ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. -Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. +Graph node'u, bir Firehose entegrasyonu yoluyla diğer protokolleri de indeksleyebilir. NEAR, Arweave ve Cosmos tabanlı ağlar için Firehose entegrasyonları oluşturulmuştur. diff --git a/website/pages/tr/developing/unit-testing-framework.mdx b/website/pages/tr/developing/unit-testing-framework.mdx index 8ffc66465e3a..b6229c498b5c 100644 --- a/website/pages/tr/developing/unit-testing-framework.mdx +++ b/website/pages/tr/developing/unit-testing-framework.mdx @@ -1,30 +1,30 @@ --- -title: Unit Testing Framework +title: Birim Testi Framework'ü --- -Matchstick is a unit testing framework, developed by [LimeChain](https://limechain.tech/), that enables subgraph developers to test their mapping logic in a sandboxed environment and deploy their subgraphs with confidence! +Matchstick, [LimeChain](https://limechain.tech/) tarafından geliştirilen, subgraph geliştiricilerinin eşleştirme mantıklarını bir korumalı alan ortamında test etmelerine ve subgraphlarını güvenle dağıtmalarına olanak tanıyan bir birim testi framework'üdür! -## Getting Started +## Buradan Başlayın -### Install dependencies +### Bağımlılıkları yükleyin -In order to use the test helper methods and run the tests, you will need to install the following dependencies: +Test yardımcı yöntemlerini kullanmak ve testleri çalıştırmak için aşağıdaki bağımlılıkları yüklemeniz gerekecektir: ```sh yarn add --dev matchstick-as ``` -❗ `graph-node` depends on PostgreSQL, so if you don't already have it, you will need to install it. We highly advise using the commands below as adding it in any other way may cause unexpected errors! +❗ `graph-node`, PostgreSQL'ye bağlıdır, bu nedenle henüz yüklemediyseniz PostgreSQL yüklemeniz gerekecektir. Aşağıdaki komutları kullanmanızı şiddetle tavsiye ediyoruz, çünkü başka bir şekilde eklemeniz beklenmedik hatalara neden olabilir! #### MacOS -Postgres installation command: +Postgres yükleme komutu: ```sh brew install postgresql ``` -Create a symlink to the latest libpq.5.lib _You may need to create this dir first_ `/usr/local/opt/postgresql/lib/` +En son libpq.5.lib'e bir sembolik bağ oluşturun. _Bu dizini önce oluşturmanız gerekebilir_ `/usr/local/opt/postgresql/lib/` ```sh ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/opt/postgresql/lib/libpq.5.dylib @@ -32,33 +32,33 @@ ln -sf /usr/local/opt/postgresql@14/lib/postgresql@14/libpq.5.dylib /usr/local/o #### Linux -Postgres installation command (depends on your distro): +Postgres yükleme komutu (dağıtımınıza bağlı olarak değişir): ```sh sudo apt install postgresql ``` -### WSL (Windows Subsystem for Linux) +### WSL (Linux için Windows Alt Sistemleri) -You can use Matchstick on WSL both using the Docker approach and the binary approach. As WSL can be a bit tricky, here's a few tips in case you encounter issues like +Matchstick'i WSL'de Docker yaklaşımı ve ikili yaklaşımı kullanarak kullanabilirsiniz. WSL biraz alengirli olabileceğinden, aşağıdaki gibi sorunlarla karşılaşırsanız birkaç ipucu verelim ``` static BYTES = Symbol("Bytes") SyntaxError: Unexpected token = ``` -or +yada ``` /node_modules/gluegun/build/index.js:13 throw up; ``` -Please make sure you're on a newer version of Node.js graph-cli doesn't support **v10.19.0** anymore, and that is still the default version for new Ubuntu images on WSL. For instance Matchstick is confirmed to be working on WSL with **v18.1.0**, you can switch to it either via **nvm** or if you update your global Node.js. Don't forget to delete `node_modules` and to run `npm install` again after updating you nodejs! Then, make sure you have **libpq** installed, you can do that by running +Lütfen daha yeni bir Node.js sürümünde olduğunuzdan emin olun. graph-cli artık **v10.19.0**'ı desteklemiyor ve bu sürüm, hala WSL'deki yeni Ubuntu görüntülerinin varsayılan sürümüdür. Örneğin, Matchstick'in **v18.1.0** ile WSL'de çalıştığı doğrulandı, **nvm** aracılığıyla veya global Node.js'inizi güncelleyerek buna geçebilirsiniz. Nodejs'nizi güncelledikten sonra `node_modules`'ı silmeyi ve `node_modules`'u tekrar çalıştırmayı unutmayın! Daha sonra, **libpq** yüklü olduğundan emin olun, bunu çalıştırarak yapabilirsiniz ``` sudo apt-get install libpq-dev ``` -And finally, do not use `graph test` (which uses your global installation of graph-cli and for some reason that looks like it's broken on WSL currently), instead use `yarn test` or `npm run test` (that will use the local, project-level instance of graph-cli, which works like a charm). For that you would of course need to have a `"test"` script in your `package.json` file which can be something as simple as +Son olarak, `graph test`'i kullanmayın (global graph-cli yüklemenizi kullanmaktadır ve bazı nedenlerden dolayı şu anda WSL'de bozuk gibi görünüyor), bunun yerine `yarn test` veya `npm run test` kullanın (bu, proje düzeyindeki yerel graph-cli örneğini kullanacaktır, bu da harika çalışır). Bunun için tabiki `package.json` dosyanızda bir `"test"` script'i olması gerektiğini unutmayın, bunun gibi basit bir şey olabilir ```json { @@ -76,57 +76,57 @@ And finally, do not use `graph test` (which uses your global installation of gra } ``` -### Usage +### Kullanış -To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). +**Matchstick**'i subgraph proje'nizde kullanmak için sadece bir terminal açın, proje'nizin kök(root) klasörüne gidin ve basitçe `graph test [options] ` - komutunu çalıştırın - bu en son **Matchstick** ikili dosyasını indirir ve belirtilen testi veya test klasöründeki tüm testleri çalıştırır (verikaynağı bayrağı belirtilmezse mevcut tüm testler). -### CLI options +### CLI seçenekleri -This will run all tests in the test folder: +Bu, test klasöründeki tüm testleri çalıştıracaktır: ```sh graph test ``` -This will run a test named gravity.test.ts and/or all test inside of a folder named gravity: +Bu, gravity.test.ts adında bir testi ve/veya gravity adında bir klasördeki tüm testleri çalıştıracaktır: ```sh graph test gravity ``` -This will run only that specific test file: +Bu sadece belirtilen test dosyasını çalıştıracaktır: ```sh graph test path/to/file.test.ts ``` -**Options:** +**Seçenekler:** ```sh --c, --coverage Run the tests in coverage mode --d, --docker Run the tests in a docker container (Note: Please execute from the root folder of the subgraph) --f, --force Binary: Redownloads the binary. Docker: Redownloads the Dockerfile and rebuilds the docker image. --h, --help Show usage information --l, --logs Logs to the console information about the OS, CPU model and download url (debugging purposes) --r, --recompile Forces tests to be recompiled --v, --version Choose the version of the rust binary that you want to be downloaded/used +-c, --coverage Testleri kapsama modunda çalıştırır +-d, --docker Testleri bir docker konteynerinde çalıştırır (Not: Subgraph kök klasöründen çalıştırın) +-f, --force İkili: İkilinin yeniden indirilmesini sağlar. Docker: Dockerfile'ın yeniden indirilmesi ve docker görüntüsünün yeniden oluşturulması. +-h, --help Kullanım bilgilerini gösterir +-l, --logs İşletim sistemi, CPU modeli ve indirme URL'si hakkında konsola günlük bilgilerini yazar (hata ayıklama amaçlıdır) +-r, --recompile Testlerin yeniden derlenmesini zorlar +-v, --version İndirmek/kullanmak istediğiniz rust ikilisinin sürümünü seçmenize yarar ``` ### Docker -From `graph-cli 0.25.2`, the `graph test` command supports running `matchstick` in a docker container with the `-d` flag. The docker implementation uses [bind mount](https://docs.docker.com/storage/bind-mounts/) so it does not have to rebuild the docker image every time the `graph test -d` command is executed. Alternatively you can follow the instructions from the [matchstick](https://github.com/LimeChain/matchstick#docker-) repository to run docker manually. +`graph-cli 0.25.2`'den itibaren `graph test` `-d` bayrağı ile `matchstick`'i bir docker konteynerinde çalıştırmayı desteklemektedir. Docker uygulaması, [bind mount](https://docs.docker.com/storage/bind-mounts/) kullandığından, `graph test -d` komutu her çalıştırıldığında docker görüntüsünü yeniden oluşturmak zorunda değildir. Alternatif olarak, [matchstick](https://github.com/LimeChain/matchstick#docker-) deposundan docker'ı manuel olarak çalıştırmak için talimatları izleyebilirsiniz. -❗ If you have previously ran `graph test` you may encounter the following error during docker build: +❗ Daha önce `graph test` çalıştırdıysanız, docker build sırasında aşağıdaki hatayla karşılaşabilirsiniz: ```sh error from sender: failed to xattr node_modules/binary-install-raw/bin/binary-: permission denied ``` -In this case create a `.dockerignore` in the root folder and add `node_modules/binary-install-raw/bin` +Bu durumda, kök klasör içinde bir `.dockerignore` dosyası oluşturun ve `node_modules/binary-install-raw/bin`'i ekleyin -### Configuration +### Yapılandırma -Matchstick can be configured to use a custom tests, libs and manifest path via `matchstick.yaml` config file: +Matchstick, `matchstick.yaml` yapılandırma dosyası aracılığıyla özel testler, kütüphaneler ve manifest yolunu kullanacak şekilde yapılandırılabilir: ```yaml testsFolder: path/to/tests @@ -136,25 +136,25 @@ manifestPath: path/to/subgraph.yaml ### Demo subgraph -You can try out and play around with the examples from this guide by cloning the [Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph) +[Demo Subgraph repo](https://github.com/LimeChain/demo-subgraph)sunu klonlayarak bu kılavuzdaki örnekleri deneyebilir ve ve istediğinizi yapabilirsiniz -### Video tutorials +### Öğretici videolar -Also you can check out the video series on ["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h) +Ayrıca, "["How to use Matchstick to write unit tests for your subgraphs"](https://www.youtube.com/playlist?list=PLTqyKgxaGF3SNakGQwczpSGVjS_xvOv3h)" konulu video serisine göz atabilirsiniz -## Tests structure (>=0.5.0) +## Testlerin yapısı (>=0.5.0) -_**IMPORTANT: Requires matchstick-as >=0.5.0**_ +_**ÖNEMLİ: matchstick-as >=0.5.0 gerektirir**_ ### describe() -`describe(name: String , () => {})` - Defines a test group. +`describe(name: String , () => {})` - Bir test grubunu tanımlar. -**_Notes:_** +**_Notlar:_** -- _Describes are not mandatory. You can still use test() the old way, outside of the describe() blocks_ +- _Açıklamalar zorunlu değildir. Hala test() fonksiyonunu describe() bloklarının dışında kullanabilirsiniz_ -Example: +Örnek: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -167,7 +167,7 @@ describe("handleNewGravatar()", () => { }) ``` -Nested `describe()` example: +İç içe `describe()` örneği: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -192,9 +192,9 @@ describe("handleUpdatedGravatar()", () => { ### test() -`test(name: String, () =>, should_fail: bool)` - Defines a test case. You can use test() inside of describe() blocks or independently. +`test(name: String, () =>, should_fail: bool)` - Bir test durumu tanımlar. test() fonksiyonunu describe() blokları içinde veya bağımsız olarak kullanabilirsiniz. -Example: +Örnek: ```typescript import { describe, test } from "matchstick-as/assembly/index" @@ -207,7 +207,7 @@ describe("handleNewGravatar()", () => { }) ``` -or +yada ```typescript test("handleNewGravatar() should create a new entity", () => { @@ -221,11 +221,11 @@ test("handleNewGravatar() should create a new entity", () => { ### beforeAll() -Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. +Dosyadaki tüm testlerden önce bir kod bloğu çalıştırır. `beforeAll`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun başında çalışır. -Examples: +Örnekler: -Code inside `beforeAll` will execute once before _all_ tests in the file. +`beforeAll` içindeki kod, dosyadaki _tüm_ testlerden önce bir kez çalıştırılacaktır. ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -252,7 +252,7 @@ describe("When entity already exists", () => { }) ``` -Code inside `beforeAll` will execute once before all tests in the first describe block +`beforeAll` içindeki kod, ilk describe bloğundaki tüm testlerden önce bir kez çalıştırılacaktır ```typescript import { describe, test, beforeAll } from "matchstick-as/assembly/index" @@ -281,11 +281,11 @@ describe("handleUpdatedGravatar()", () => { ### afterAll() -Runs a code block after all of the tests in the file. If `afterAll` is declared inside of a `describe` block, it runs at the end of that `describe` block. +Dosyadaki tüm testlerden sonra bir kod bloğu çalıştırır. `afterAll`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun sonunda çalışır. -Example: +Örnek: -Code inside `afterAll` will execute once after _all_ tests in the file. +`afterAll` içindeki kod, dosyadaki _tüm_ testlerden sonra bir kez çalıştırılacaktır. ```typescript import { describe, test, afterAll } from "matchstick-as/assembly/index" @@ -310,7 +310,7 @@ describe("handleUpdatedGravatar", () => { }) ``` -Code inside `afterAll` will execute once after all tests in the first describe block +`afterAll` içindeki kod, ilk describe bloğundaki tüm testlerden sonra bir kez çalıştırılacaktır ```typescript import { describe, test, afterAll, clearStore } from "matchstick-as/assembly/index" @@ -342,9 +342,9 @@ describe("handleUpdatedGravatar", () => { ### beforeEach() -Runs a code block before every test. If `beforeEach` is declared inside of a `describe` block, it runs before each test in that `describe` block. +Her testten önce bir kod bloğu çalıştırır. `beforeEach`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun her testinden önce çalıştırılır. -Examples: Code inside `beforeEach` will execute before each tests. +Örnekler: `beforeEach` içindeki kod, her testten önce çalıştırılacaktır. ```typescript import { describe, test, beforeEach, clearStore } from "matchstick-as/assembly/index" @@ -367,7 +367,7 @@ describe("handleNewGravatars, () => { ... ``` -Code inside `beforeEach` will execute only before each test in the that describe +`beforeEach` içindeki kod, yalnızca o describe bloğundaki her testten önce çalıştırılacaktır ```typescript import { describe, test, beforeEach } from 'matchstick-as/assembly/index' @@ -384,7 +384,7 @@ describe('handleUpdatedGravatars', () => { test('Upates the displayName', () => { assert.fieldEquals('Gravatar', '0x0', 'displayName', 'First Gravatar') - // code that should update the displayName to 1st Gravatar + // displayName'i 1. Gravatar olarak güncellemesi gereken kod assert.fieldEquals('Gravatar', '0x0', 'displayName', '1st Gravatar') store.remove('Gravatar', '0x0') @@ -393,7 +393,7 @@ describe('handleUpdatedGravatars', () => { test('Updates the imageUrl', () => { assert.fieldEquals('Gravatar', '0x0', 'imageUrl', '') - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod assert.fieldEquals('Gravatar', '0x0', 'imageUrl', 'https://www.gravatar.com/avatar/0x0') store.remove('Gravatar', '0x0') @@ -405,11 +405,11 @@ describe('handleUpdatedGravatars', () => { ### afterEach() -Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. +Her testten sonra bir kod bloğu çalıştırır. `afterEach`, `describe` bloklarının içinde tanımlanırsa, o `describe` bloğunun her testinden sonra çalıştırılır. -Examples: +Örnekler: -Code inside `afterEach` will execute after every test. +`afterEach` içindeki kod, her testten sonra çalıştırılacaktır. ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -433,7 +433,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // displayName'i 1. Gravatar olarak güncellemesi gereken kod assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -441,14 +441,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -Code inside `afterEach` will execute after each test in that describe +`afterEach` içindeki kod, yalnızca o describe bloğundaki her testten sonra çalıştırılacaktır ```typescript import { describe, test, beforeEach, afterEach } from "matchstick-as/assembly/index" @@ -473,7 +473,7 @@ describe("handleUpdatedGravatar", () => { test("Upates the displayName", () => { assert.fieldEquals("Gravatar", "0x0", "displayName", "First Gravatar") - // code that should update the displayName to 1st Gravatar + // displayName'i 1. Gravatar olarak güncellemesi gereken kod assert.fieldEquals("Gravatar", "0x0", "displayName", "1st Gravatar") }) @@ -481,14 +481,14 @@ describe("handleUpdatedGravatar", () => { test("Updates the imageUrl", () => { assert.fieldEquals("Gravatar", "0x0", "imageUrl", "") - // code that should changes the imageUrl to https://www.gravatar.com/avatar/0x0 + // imageUrl'yi https://www.gravatar.com/avatar/0x0 olarak değiştirmesi gereken kod assert.fieldEquals("Gravatar", "0x0", "imageUrl", "https://www.gravatar.com/avatar/0x0") }) }) ``` -## Asserts +## Açıklamalar ```typescript fieldEquals(entityType: string, id: string, fieldName: string, expectedVal: string) @@ -522,11 +522,11 @@ assertNotNull(value: T) entityCount(entityType: string, expectedCount: i32) ``` -## Write a Unit Test +## Bir Birim Testi Yazın -Let's see how a simple unit test would look like using the Gravatar examples in the [Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts). +[Demo Subgraph](https://github.com/LimeChain/demo-subgraph/blob/main/src/gravity.ts)'taki Gravatar örneklerini kullanarak nasıl basit bir birim test görüneceğini görelim. -Assuming we have the following handler function (along with two helper functions to make our life easier): +Aşağıdaki işleyici fonksiyonuna sahip olduğumuzu varsayarsak (iki yardımcı işlevle birlikte): ```typescript export function handleNewGravatar(event: NewGravatar): void { @@ -577,7 +577,7 @@ export function createNewGravatarEvent( } ``` -We first have to create a test file in our project. This is an example of how that might look like: +Önce projemizde bir test dosyası oluşturmamız gerekiyor. Bunun nasıl görünebileceğine dair bir örnek: ```typescript import { clearStore, test, assert } from 'matchstick-as/assembly/index' @@ -586,23 +586,23 @@ import { NewGravatar } from '../../generated/Gravity/Gravity' import { createNewGravatarEvent, handleNewGravatars } from '../mappings/gravity' test('Can call mappings with custom events', () => { - // Create a test entity and save it in the store as initial state (optional) + // Bir test varlığı oluşturun ve bunu depoya başlangıç durumu olarak kaydedin (isteğe bağlı) let gravatar = new Gravatar('gravatarId0') gravatar.save() - // Create mock events + // Mock etkinlikleri oluşturun let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') let anotherGravatarEvent = createNewGravatarEvent(3546, '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7', 'cap', 'pac') - // Call mapping functions passing the events we just created + // Az önce oluşturduğumuz olayları geçiren çağrı eşleştirme fonksiyonları handleNewGravatars([newGravatarEvent, anotherGravatarEvent]) - // Assert the state of the store + // Deponun durumunu doğrulayın assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') assert.fieldEquals('Gravatar', '12345', 'owner', '0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') assert.fieldEquals('Gravatar', '3546', 'displayName', 'cap') - // Clear the store in order to start the next test off on a clean slate + // Bir sonraki testi temiz bir sayfa üzerinde başlatmak için depoyu boşaltın clearStore() }) @@ -611,38 +611,38 @@ test('Next test', () => { }) ``` -That's a lot to unpack! First off, an important thing to notice is that we're importing things from `matchstick-as`, our AssemblyScript helper library (distributed as an npm module). You can find the repository [here](https://github.com/LimeChain/matchstick-as). `matchstick-as` provides us with useful testing methods and also defines the `test()` function which we will use to build our test blocks. The rest of it is pretty straightforward - here's what happens: +Analiz edilecek çok fazla şey var! Öncelikle fark etmemiz gereken önemli şey AssemblyScript yardımcı kütüphanemiz (npm modülü olarak dağıtılır) `matchstick-as`'den işleri içe aktardığımız. Repositoriyi [burada](https://github.com/LimeChain/matchstick-as) bulabilirsiniz. `matchstick-as` bize yararlı test yöntemleri sağlar ve ayrıca test blokları oluşturmak için kullanacağımız `test()` işlevini tanımlar. Geri kalanı oldukça açık - şöyle olur: -- We're setting up our initial state and adding one custom Gravatar entity; -- We define two `NewGravatar` event objects along with their data, using the `createNewGravatarEvent()` function; -- We're calling out handler methods for those events - `handleNewGravatars()` and passing in the list of our custom events; -- We assert the state of the store. How does that work? - We're passing a unique combination of Entity type and id. Then we check a specific field on that Entity and assert that it has the value we expect it to have. We're doing this both for the initial Gravatar Entity we added to the store, as well as the two Gravatar entities that gets added when the handler function is called; -- And lastly - we're cleaning the store using `clearStore()` so that our next test can start with a fresh and empty store object. We can define as many test blocks as we want. +- İlk durumumuzu ayarlıyor ve bir özel Gravatar varlığı ekliyoruz; +- `createNewGravatarEvent()` fonksiyonunu kullanarak verileriyle birlikte iki `NewGravatar`r olay nesnesini tanımlıyoruz; +- `handleNewGravatars()` yöntemlerimizi bu olaylar için çağırıyoruz ve özel olay listemizi geçiyoruz; +- Depo durumunu doğruluyoruz. Bu nasıl çalışır? - Bir varlık türü ve kimliğinin benzersiz bir kombinasyonunu geçiriyoruz. Ardından, bu varlıkta belirli bir alanı kontrol ediyoruz ve beklediğimiz değeri almasını sağlıyoruz. Hem depoya eklediğimiz ilk Gravatar Varlığı için hem de işleyici işlevi çağrıldığında eklenen iki Gravatar varlığı için bunu yapıyoruz; +- Ve son olarak `clearStore()` kullanarak depoyu temizliyoruz, böylece bir sonraki testimiz temiz ve boş bir depo nesnesiyle başlayabilir. İstediğimiz kadar test bloğu tanımlayabiliriz. -There we go - we've created our first test! 👏 +İşte başardın - ilk testimizi oluşturduk! 👏 -Now in order to run our tests you simply need to run the following in your subgraph root folder: +Şimdi testlerimizi çalıştırmak için subgraph kök klasörünüzde şunu çalıştırmanız yeterlidir: `graph test Gravity` -And if all goes well you should be greeted with the following: +Ve her şey yolunda giderse aşağıdakiyle karşılaşacaksınız: -![Matchstick saying “All tests passed!”](/img/matchstick-tests-passed.png) +![Matchstick "Tüm testler geçildi!" diyor](/img/matchstick-tests-passed.png) -## Common test scenarios +## Çok rastlanan test senaryoları -### Hydrating the store with a certain state +### Belirli bir durumla depoyu doldurma -Users are able to hydrate the store with a known set of entities. Here's an example to initialise the store with a Gravatar entity: +Kullanıcılar bilinen bir varlık kümesiyle depoyu doldurabilirler. Aşağıdaki örnek depoyu Gravatar varlığıyla başlatmak için kullanılabilir: ```typescript let gravatar = new Gravatar('entryId') gravatar.save() ``` -### Calling a mapping function with an event +### Olayla bir eşleştirme fonksiyonu çağırmak -A user can create a custom event and pass it to a mapping function that is bound to the store: +Kullanıcı, depoya bağlı bir eşleştirme fonksiyonuna özel bir olay oluşturabilir ve onu iletebilir: ```typescript import { store } from 'matchstick-as/assembly/store' @@ -654,9 +654,9 @@ let newGravatarEvent = createNewGravatarEvent(12345, '0x89205A3A3b2A69De6Dbf7f01 handleNewGravatar(newGravatarEvent) ``` -### Calling all of the mappings with event fixtures +### Tüm eşitlemeleri olay fikstürleriyle çağırmak -Users can call the mappings with test fixtures. +Kullanıcılar test fikstürleriyle eşleştirmeleri çağırabilirler. ```typescript import { NewGravatar } from '../../generated/Gravity/Gravity' @@ -678,9 +678,9 @@ export function handleNewGravatars(events: NewGravatar[]): void { } ``` -### Mocking contract calls +### Kontrat çağrılarını mocklama -Users can mock contract calls: +Kullanıcılar sözleşme çağrılarını mocklayabilir: ```typescript import { addMetadata, assert, createMockedFunction, clearStore, test } from 'matchstick-as/assembly/index' @@ -700,9 +700,9 @@ let result = gravity.gravatarToOwner(bigIntParam) assert.equals(ethereum.Value.fromAddress(expectedResult), ethereum.Value.fromAddress(result)) ``` -As demonstrated, in order to mock a contract call and hardcore a return value, the user must provide a contract address, function name, function signature, an array of arguments, and of course - the return value. +Gösterildiği gibi, bir sözleşme çağrısını mocklamak ve bir dönüş değeri sabitlemek için kullanıcı, bir sözleşme adresi, işlev adı, işlev imzası, bir argüman dizisi ve elbette dönüş değerini sağlamalıdır. -Users can also mock function reverts: +Kullanıcılar ayrıca fonksiyon geri çevirilerinide mocklayabilirler: ```typescript let contractAddress = Address.fromString('0x89205A3A3b2A69De6Dbf7f01ED13B2108B2c43e7') @@ -711,20 +711,20 @@ createMockedFunction(contractAddress, 'getGravatar', 'getGravatar(address):(stri .reverts() ``` -### Mocking IPFS files (from matchstick 0.4.1) +### IPFS dosyalarını mocklama (Matchstick 0.4.1'den) -Users can mock IPFS files by using `mockIpfsFile(hash, filePath)` function. The function accepts two arguments, the first one is the IPFS file hash/path and the second one is the path to a local file. +Kullanıcılar `mockIpfsFile(hash, filePath)` fonksiyonunu kullanarak IPFS dosyalarını mocklama yeteneğine sahiptirler. Fonksiyon, ilk argümanı IPFS dosya hash/yol'u ve ikinci argümanı yerel bir dosyanın yolu olmak üzere iki argüman kabul eder. -NOTE: When testing `ipfs.map/ipfs.mapJSON`, the callback function must be exported from the test file in order for matchstck to detect it, like the `processGravatar()` function in the test example bellow: +NOT: `ipfs.map/ipfs.mapJSON`'u test ederken matchstck'in bunu algılaması için geri çağrıma işlevinin test dosyasından dışa aktarılması gerekiyor, örneğin aşağıdaki test örneğindeki `processGravatar()` fonksiyonu gibi: -`.test.ts` file: +`.test.ts` dosyası: ```typescript import { assert, test, mockIpfsFile } from 'matchstick-as/assembly/index' import { ipfs } from '@graphprotocol/graph-ts' import { gravatarFromIpfs } from './utils' -// Export ipfs.map() callback in order for matchstck to detect it +// Matchstck'in algılaması için ipfs.map() geri çağrısını dışa aktarın export { processGravatar } from './utils' test('ipfs.cat', () => { @@ -754,7 +754,7 @@ test('ipfs.map', () => { }) ``` -`utils.ts` file: +`utils.ts` dosyası: ```typescript import { Address, ethereum, JSONValue, Value, ipfs, json, Bytes } from "@graphprotocol/graph-ts" @@ -762,10 +762,10 @@ import { Gravatar } from "../../generated/schema" ... -// ipfs.map callback +// ipfs.map geri çağrısı export function processGravatar(value: JSONValue, userData: Value): void { - // See the JSONValue documentation for details on dealing - // with JSON values + // İşlem yapmayla ilgili ayrıntılar için JSONValue belgelerine bakın + // JSON değerleri ile let obj = value.toObject() let id = obj.get('id') @@ -773,13 +773,13 @@ export function processGravatar(value: JSONValue, userData: Value): void { return } - // Callbacks can also created entities + // Geri çağrılar ayrıca varlıklar oluşturabilir let gravatar = new Gravatar(id.toString()) gravatar.displayName = userData.toString() + id.toString() gravatar.save() } -// function that calls ipfs.cat +// ipfs.cat'i çağıran fonksiyon export function gravatarFromIpfs(): void { let rawData = ipfs.cat("ipfsCatfileHash") @@ -802,9 +802,9 @@ export function gravatarFromIpfs(): void { } ``` -### Asserting the state of the store +### Depo durumunu doğrulama -Users are able to assert the final (or midway) state of the store through asserting entities. In order to do this, the user has to supply an Entity type, the specific ID of an Entity, a name of a field on that Entity, and the expected value of the field. Here's a quick example: +Kullanıcılar, varlıkları doğrulayarak deponun nihai (veya orta) durumunu doğrulayabilirler. Bunun için, kullanıcı bir Varlık türü, bir Varlığın belirli bir kimliği, o Varlıktaki bir alanın adı ve alanın beklenen değerini sağlamalıdır. Hızlıca bir örneğe bakalım: ```typescript import { assert } from 'matchstick-as/assembly/index' @@ -816,38 +816,38 @@ gravatar.save() assert.fieldEquals('Gravatar', 'gravatarId0', 'id', 'gravatarId0') ``` -Running the assert.fieldEquals() function will check for equality of the given field against the given expected value. The test will fail and an error message will be outputted if the values are **NOT** equal. Otherwise the test will pass successfully. +assert.fieldEquals () fonksiyonunu çalıştırmak, verilen alanın verilen beklenen değere karşı eşitliğini kontrol edecektir. Değerler eşit **DEĞİLSE** test başarısız olacak ve bir hata mesajı verecektir. Aksi takdirde, test başarılı bir şekilde geçecektir. -### Interacting with Event metadata +### Olay üst verileriyle etkileşim -Users can use default transaction metadata, which could be returned as an ethereum.Event by using the `newMockEvent()` function. The following example shows how you can read/write to those fields on the Event object: +Kullanıcılar `newMockEvent()` fonksiyonunu kullanarak ethereum.Event döndürebilen varsayılan işlem üst verilerini kullanabilir. Aşağıdaki örnek, Olay nesnesindeki bu alanlara nasıl okuma/yazma yapabileceğinizi gösterir: ```typescript -// Read +// Okuma let logType = newGravatarEvent.logType -// Write +// Yazma let UPDATED_ADDRESS = '0xB16081F360e3847006dB660bae1c6d1b2e17eC2A' newGravatarEvent.address = Address.fromString(UPDATED_ADDRESS) ``` -### Asserting variable equality +### Değişken eşitliğini doğrulama ```typescript assert.equals(ethereum.Value.fromString("hello"); ethereum.Value.fromString("hello")); ``` -### Asserting that an Entity is **not** in the store +### Bir varlığın depoda olmadığını(**not**) doğrulama -Users can assert that an entity does not exist in the store. The function takes an entity type and an id. If the entity is in fact in the store, the test will fail with a relevant error message. Here's a quick example of how to use this functionality: +Kullanıcılar, bir varlığın depoda olmadığını doğrulayabilirler. Bu fonksiyon, bir varlık türü ve bir kimlik alır. Eğer varlık gerçekten depoda ise, test ilgili bir hata mesajı vererej başarısız olacaktır. Fonksiyonun nasıl kullanılacağına dair hızlıca bir örneğe bakalım: ```typescript assert.notInStore('Gravatar', '23') ``` -### Printing the whole store (for debug purposes) +### Deponun tamamını yazdırma (hata ayıklama amaçlı) -You can print the whole store to the console using this helper function: +Bu yardımcı fonksiyon kullanılarak deponun tamamı konsola yazdırılabilir: ```typescript import { logStore } from 'matchstick-as/assembly/store' @@ -855,9 +855,9 @@ import { logStore } from 'matchstick-as/assembly/store' logStore() ``` -### Expected failure +### Beklenen başarısızlık -Users can have expected test failures, using the shouldFail flag on the test() functions: +Kullanıcılar, shouldFail bayrağını test() fonksiyonlarında kullanarak beklenen test başarısızlıklarına sahip olabilirler: ```typescript test( @@ -869,11 +869,11 @@ test( ) ``` -If the test is marked with shouldFail = true but DOES NOT fail, that will show up as an error in the logs and the test block will fail. Also, if it's marked with shouldFail = false (the default state), the test executor will crash. +Test, shouldFail = true olarak işaretlenirse ama BAŞARISIZ olmazsa, bu kayıtlarda bir hata olarak görünecek ve test bloğu başarısız olacaktır. Ayrıca, with shouldFail = false (the default state) olarak işaretlenirse, test yürütücüsü çökecektir. -### Logging +### Kayıt tutma -Having custom logs in the unit tests is exactly the same as logging in the mappings. The difference is that the log object needs to be imported from matchstick-as rather than graph-ts. Here's a simple example with all non-critical log types: +Birim testlerinde özel kayıt tutmanın eşitlemelerdeki kayıt tutmaktan tamamen aynıdır. Fark, günlük nesnesinin graph-ts yerine matchstick-as'dan içe aktarılması gerektmesidir. İşte tüm kritik olmayan kayıt türleriyle ilgili basit bir örnek: ```typescript import { test } from "matchstick-as/assembly/index"; @@ -896,7 +896,7 @@ test("Warning", () => { }); ``` -Users can also simulate a critical failure, like so: +Kullanıcılar ayrıca şu şekilde kritik bir hatayı simüle edebilirler: ```typescript test('Blow everything up', () => { @@ -904,11 +904,11 @@ test('Blow everything up', () => { }) ``` -Logging critical errors will stop the execution of the tests and blow everything up. After all - we want to make sure you're code doesn't have critical logs in deployment, and you should notice right away if that were to happen. +Kritik hataları kayıt altına almak, testlerin yürütülmesini durduracak ve her şeyi mahvedecektir. Sonuçta, kodunuzun dağıtımda kritik kayıtları içermediğinden emin olmak istiyoruz ve bunun olması durumunda hemen fark etmeniz gerekiyor. -### Testing derived fields +### Türetilmiş alanların test edilmesi -Testing derived fields is a feature which (as the example below shows) allows the user to set a field in a certain entity and have another entity be updated automatically if it derives one of its fields from the first entity. Important thing to note is that the first entity needs to be reloaded as the automatic update happens in the store in rust of which the AS code is agnostic. +Türetilmiş alanların test edilmesi (Aşağıdaki örnekte olduğu gibi), kullanıcının belirli bir varlıkta bir alanı ayarlamasına ve eğer varlıklarından birini ilk varlıktan türetiyorsa diğer varlığın otomatik olarak güncellenmesine olanak sağlar. Not edilmesi gereken önemli nokta, otomatik güncelleme AS kodunun agnostik olduğu rust'daki depoda gerçekleştiği için ilk varlığın yeniden yüklenmesi gerektiğidir. ```typescript test('Derived fields example test', () => { @@ -931,13 +931,13 @@ test('Derived fields example test', () => { }) ``` -### Testing dynamic data sources +### Dinamik Veri Kaynaklarının Test Edilmesi -Testing dynamic data sources can be be done by mocking the return value of the `context()`, `address()` and `network()` functions of the dataSource namespace. These functions currently return the following: `context()` - returns an empty entity (DataSourceContext), `address()` - returns `0x0000000000000000000000000000000000000000`, `network()` - returns `mainnet`. The `create(...)` and `createWithContext(...)` functions are mocked to do nothing so they don't need to be called in the tests at all. Changes to the return values can be done through the functions of the `dataSourceMock` namespace in `matchstick-as` (version 0.3.0+). +Dinamik veri kaynaklarının test edilmesi dataSource ad alanının `context()`, `address()` ve `network()` fonksiyonlarının geri dönüş değerlerinin mocklanmasıyla yapılabilir. Bu fonksiyonlar şu anda şunları döndürmektedir: `context()` - boş bir varlık döndürür (DataSourceContext), `address()` - `0x0000000000000000000000000000000000000000` döndürür, `network()` - `mainnet` döndürür. `create(...)` ve `createWithContext(...)` fonksiyonları hiçbir şey yapmamak için mocklanmıştır bu nedenle testlerde çağrılmaları gerekmez. Dönüş değerlerinde yapılacak değişiklikler `matchstick-as`'deki (version 0.3.0+)`dataSourceMock` ad alanının fonksiyonlarıyla yapılabilir. -Example below: +Aşağıdaki örnekte: -First we have the following event handler (which has been intentionally repurposed to showcase datasource mocking): +Öncelikle aşağıdaki olay işleyicisine sahibiz (veri kaynağı mocklamasını göstermek için kasıtlı olarak amaç değişikliği): ```typescript export function handleApproveTokenDestinations(event: ApproveTokenDestinations): void { @@ -953,7 +953,7 @@ export function handleApproveTokenDestinations(event: ApproveTokenDestinations): } ``` -And then we have the test using one of the methods in the dataSourceMock namespace to set a new return value for all of the dataSource functions: +Ardından, dataSourceMock ad alanındaki yöntemlerden birini kullanarak tüm veri kaynağı fonksiyonları için yeni bir geri dönüş değeri ayarlayan testi görüyoruz: ```typescript import { assert, test, newMockEvent, dataSourceMock } from 'matchstick-as/assembly/index' @@ -986,41 +986,41 @@ test('Data source simple mocking example', () => { }) ``` -Notice that dataSourceMock.resetValues() is called at the end. That's because the values are remembered when they are changed and need to be reset if you want to go back to the default values. +dataSourceMock.resetValues()'in en sonda çağrıldığına dikkat edin. Bunun nedeni değerler değiştirildiğinde hatırlanır ve varsayılan değerlere geri dönmek istiyorsanız sıfırlanmaları gerekir. -## Test Coverage +## Test Kapsamı -Using **Matchstick**, subgraph developers are able to run a script that will calculate the test coverage of the written unit tests. +Subgraph geliştiricileri **Matchstick'i** kullanarak, yazılan birim testlerinin test kapsamını hesaplayacak bir komut dosyası çalıştırabilirler. -The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. +Test kapsama aracı derlenmiş test `wasm` ikililerini alır ve bunları daha sonra `subgraph.yaml` dosyasında tanımlanan işlevlerin çağrılıp çağrılmadığını görmek için kolayca incelenebilen `wat` dosyalarına dönüştürür. Kod kapsamı (ve bir bütün olarak test) AssemblyScript ve WebAssembly'de çok erken aşamalarda olduğundan, **Matchstick** dallanma kapsamını kontrol edemez. Bunun yerine, belirli bir işleyici çağrılmışsa, bunun için olay/fonksiyonun uygun şekilde taklit edildiği savına güveniyoruz. -### Prerequisites +### Ön Koşullar -To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: +**Matchstick** tarafından sağlanan test kapsama fonksiyonlarını çalıştırmak için önceden hazırlamanız gereken birkaç şey bulunmaktadır: -#### Export your handlers +#### İşleyicilerinizi dışa aktarın -In order for **Matchstick** to check which handlers are being run, those handlers need to be exported from the **test file**. So for instance in our example, in our gravity.test.ts file we have the following handler being imported: +**Matchstick**'in hangi işleyicilerin çalıştığını kontrol etmesi için, bu işleyicilerin **test file**'dan dışa aktarılması gerekir. Mesela, bizim örneğimizde gravity.test.ts dosyamızda aşağıdaki işleyici içe aktarılır: ```typescript import { handleNewGravatar } from '../../src/gravity' ``` -In order for that function to be visible (for it to be included in the `wat` file **by name**) we need to also export it, like this: +Bu fonksiyonun görünür olması için (**adıyla** `wat` dosyasına dahil edilmesi için) ayrıca onuda şöyle dışa aktarmamız gerekmektedir: ```typescript export { handleNewGravatar } ``` -### Usage +### Kullanış -Once that's all set up, to run the test coverage tool, simply run: +Her şey hazır olduğunda, test kapsama aracını çalıştırmak için basitçe şunu çalıştırın: ```sh graph test -- -c ``` -You could also add a custom `coverage` command to your `package.json` file, like so: +Ayrıca `package.json` dosyanıza şu şekilde özel bir kapsama(`coverage`) komutu ekleyebilirsiniz: ```typescript "scripts": { @@ -1029,7 +1029,7 @@ You could also add a custom `coverage` command to your `package.json` file, like }, ``` -That will execute the coverage tool and you should see something like this in the terminal: +Bu, kapsama aracını çalıştıracak ve terminalde buna benzer bir şey göreceksiniz: ```sh $ graph test -c @@ -1068,17 +1068,17 @@ Test coverage: 0.0% (0/6 handlers). Global test coverage: 22.2% (2/9 handlers). ``` -### Test run time duration in the log output +### Tutulan kayıt çıktısında test çalışma süresi -The log output includes the test run duration. Here's an example: +Tutulan kayıt çıktısı test çalışma süresini içerir. İşte buna bir örnek: `[Thu, 31 Mar 2022 13:54:54 +0300] Program executed in: 42.270ms.` -## Common compiler errors +## Çok rastlanan derleyici hataları > Critical: Could not create WasmInstance from valid module with context: unknown import: wasi_snapshot_preview1::fd_write has not been defined -This means you have used `console.log` in your code, which is not supported by AssemblyScript. Please consider using the [Logging API](/developing/assemblyscript-api/#logging-api) +Bu, AssemblyScript tarafından desteklenmeyen `console.log`'u kullandığınız anlamına gelmektedir. Lütfen [Logging API](/developing/assemblyscript-api/#logging-api) kullanmayı düşünün > ERROR TS2554: Expected ? arguments, but got ?. > @@ -1092,8 +1092,8 @@ This means you have used `console.log` in your code, which is not supported by A > > in ~lib/matchstick-as/assembly/defaults.ts(24,12) -The mismatch in arguments is caused by mismatch in `graph-ts` and `matchstick-as`. The best way to fix issues like this one is to update everything to the latest released version. +Argümanlardaki uyumsuzluk, `graph-ts` ve `matchstick-as` arasındaki uyumsuzluktan kaynaklanır. Bu gibi sorunları düzeltmenin en iyi yolu her şeyi en son yayınlanan sürüme güncellemektir. -## Feedback +## Geribildirim -If you have any questions, feedback, feature requests or just want to reach out, the best place would be The Graph Discord where we have a dedicated channel for Matchstick, called 🔥| unit-testing. +Soru, geri bildirim, özellik istekleri veya sadece iletişim kurmak istiyorsanız en iyi yer Matchstick için ayrılmış bir kanal olan 🔥| unit-testing kanalına sahip olduğumuz The Graph Discord'udur. diff --git a/website/pages/tr/docsearch.json b/website/pages/tr/docsearch.json index 8cfff967936d..b6a88dee1c5b 100644 --- a/website/pages/tr/docsearch.json +++ b/website/pages/tr/docsearch.json @@ -1,42 +1,42 @@ { "button": { - "buttonText": "Search", - "buttonAriaLabel": "Search" + "buttonText": "Arama", + "buttonAriaLabel": "Arama" }, "modal": { "searchBox": { - "resetButtonTitle": "Clear the query", - "resetButtonAriaLabel": "Clear the query", - "cancelButtonText": "Cancel", - "cancelButtonAriaLabel": "Cancel" + "resetButtonTitle": "Sorguyu temizle", + "resetButtonAriaLabel": "Sorguyu temizle", + "cancelButtonText": "İptal", + "cancelButtonAriaLabel": "İptal" }, "startScreen": { - "recentSearchesTitle": "Recent", - "noRecentSearchesText": "No recent searches", - "saveRecentSearchButtonTitle": "Save this search", - "removeRecentSearchButtonTitle": "Remove this search from history", - "favoriteSearchesTitle": "Favorite", - "removeFavoriteSearchButtonTitle": "Remove this search from favorites" + "recentSearchesTitle": "Son", + "noRecentSearchesText": "Son arama bulunmamakta", + "saveRecentSearchButtonTitle": "Aramayı kaydet", + "removeRecentSearchButtonTitle": "Bu aramayı geçmişten kaldır", + "favoriteSearchesTitle": "Favori", + "removeFavoriteSearchButtonTitle": "Bu aramayı favorilerden kaldır" }, "errorScreen": { - "titleText": "Unable to fetch results", - "helpText": "You might want to check your network connection." + "titleText": "Sonuçlar getirilemiyor", + "helpText": "Ağ bağlantınızı kontrol etmek isteyebilirsiniz." }, "footer": { - "selectText": "to select", - "selectKeyAriaLabel": "Enter key", - "navigateText": "to navigate", - "navigateUpKeyAriaLabel": "Arrow up", - "navigateDownKeyAriaLabel": "Arrow down", - "closeText": "to close", - "closeKeyAriaLabel": "Escape key", - "searchByText": "Search by" + "selectText": "Seç", + "selectKeyAriaLabel": "Anahtarı girin", + "navigateText": "yönlendirmek", + "navigateUpKeyAriaLabel": "Yukarı ok", + "navigateDownKeyAriaLabel": "Aşağı ok", + "closeText": "Kapat", + "closeKeyAriaLabel": "Kaçış anahtarı", + "searchByText": "Şuna göre ara" }, "noResultsScreen": { - "noResultsText": "No results for", - "suggestedQueryText": "Try searching for", - "reportMissingResultsText": "Believe this query should return results?", - "reportMissingResultsLinkText": "Let us know." + "noResultsText": "için sonuç yok", + "suggestedQueryText": "Aramayı dene", + "reportMissingResultsText": "Bu sorgunun sonuç getirmesi gerektiğine inanıyor musunuz?", + "reportMissingResultsLinkText": "Bize haber ver." } } } diff --git a/website/pages/tr/firehose.mdx b/website/pages/tr/firehose.mdx index 5e2b37ee4bb6..13641c5b57aa 100644 --- a/website/pages/tr/firehose.mdx +++ b/website/pages/tr/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logosu](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose, StreamingFast tarafından Graph Vakfı ile birlikte geliştirdiği yeni bir teknolojidir. Ürün, dosya tabanlı ve akış öncelikli bir yaklaşım kullanarak **blok zinciri verilerini indekslemek için daha önce görülmemiş olanaklar ve hızlar** sağlamaktadır. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose, blok zinciri verilerini yüksek performanslı dosya tabanlı bir stratejiyle çıkarır, dönüştürür ve kaydeder. Blok zinciri geliştiricileri daha sonra Firehose tarafından çıkarılan verilere ikili veri akışları üzerinden erişebilir. Firehose'un Graph'ın orijinal blok zinciri veri çıkarma katmanının yerine geçmesi amaçlanmıştır. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Dökümantasyonu + +Firehose dökümantasyonu şu anda StreamingFast ekibi tarafından [StreamingFast web sitesinde](https://firehose.streamingfast.io/) tutulmaktadır. + +### Buradan Başlayın + +- Firehose'un ne olduğu ve neden oluşturulduğu hakkında genel bilgi edinmek adına bu [Firehose tanıtım yazısını](https://firehose.streamingfast.io/introduction/firehose-overview) okuyun. +- Firehose'u yüklemek ve dağıtmak için [Ön Koşullar](https://firehose.streamingfast.io/introduction/prerequisites) hakkında bilgi edinin. + +### Bilgi Dağarcığınızı Genişletin + +- Mevcut farklı [Firehose bileşenleri](https://firehose.streamingfast.io/architecture/components) hakkında bilgi edinin. diff --git a/website/pages/tr/global.json b/website/pages/tr/global.json index 6a3eb234bfce..4754a8d03971 100644 --- a/website/pages/tr/global.json +++ b/website/pages/tr/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "collapse": "Çöküş", + "expand": "Genişletme", + "previous": "Önceki", + "next": "Sonraki", + "editPage": "Sayfayı Düzenle", + "pageSections": "Sayfa Bölümleri", + "linkToThisSection": "Bu bölüme bağlantı", + "technicalLevelRequired": "Gerekli Teknik Seviye", + "notFoundTitle": "Hata! Bu sayfa kayboldu gitti...", + "notFoundSubtitle": "Doğru adresi kullanıp kullanmadığınızı kontrol edin veya aşağıdaki bağlantıya tıklayarak web sitemize göz atın.", + "goHome": "Anasayfaya Git", "video": "Video" } diff --git a/website/pages/tr/glossary.mdx b/website/pages/tr/glossary.mdx index 2e840513f1ea..20b1a9ca12ba 100644 --- a/website/pages/tr/glossary.mdx +++ b/website/pages/tr/glossary.mdx @@ -1,89 +1,85 @@ --- -title: Glossary +title: Sözlük --- -- **The Graph**: A decentralized protocol for indexing and querying data. +- **Graph**: Verileri indekslemek ve sorgulamak için merkeziyetsiz bir protokol. -- **Query**: A request for data. In the case of The Graph, a query is a request for data from a subgraph that will be answered by an Indexer. +- **Sorgu**: Veri talebi. Graph söz konusu olduğunda, sorgu, indeksleyici tarafından yanıtlanacak olan bir subgraph'ten gelen veri talebidir. -- **GraphQL**: A query language for APIs and a runtime for fulfilling those queries with your existing data. The Graph uses GraphQL to query subgraphs. +- **GraphQL**: API'lar için bir sorgu dili ve bu sorguları mevcut verilerinizle yerine getirmek için bir çalışma zamanı. Graph, subgraph'leri sorgulamak için GraphQL kullanır. -- **Endpoint**: A URL that can be used to query a subgraph. The testing endpoint for Subgraph Studio is `https://api.studio.thegraph.com/query///` and the Graph Explorer endpoint is `https://gateway.thegraph.com/api//subgraphs/id/`. The Graph Explorer endpoint is used to query subgraphs on The Graph's decentralized network. +- **Uç Nokta**: Bir subgraph'ı sorgulamak için kullanılabilecek bir URL'dir. Subgraph Stüdyo için test uç noktası `https://api.studio.thegraph.com/query///` ve Graph Gezgini uç noktası `https://gateway.thegraph.com/api//subgraphs/id/` şeklindedir. Graph Gezgini uç noktası, Graph'ın merkeziyetsiz ağındaki subgraphları sorgulamak için kullanılır. -- **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. +- **Subgraph**: [GraphQL](https://graphql.org/) kullanılarak sorgulanabilen blok zinciri verileri üzerine kurulu özel bir API. Geliştiriciler, Graph'ın merkeziyetsiz ağında subgraph'ler inşa edebilir, deploy edebilir ve yayınlayabilir. Ardından indeksleyiciler, subgraph tüketicileri tarafından sorgulanmak üzere kullanılabilir hale getirmek için subgraph'leri indekslemeye başlayabilir. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. -- **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. +- **İndeksleyiciler**: Blok zincirlerinden gelen verileri indekslemek ve GraphQL sorguları sunmak için indeksleme node'larını çalıştıran ağ katılımcıları. -- **Indexer Revenue Streams**: Indexers are rewarded in GRT with two components: query fee rebates and indexing rewards. +- **İndeksleyici Gelir Akışları**: İndeksleyiciler, GRT'de iki bileşenle ödüllendirilir: Sorgu ücreti indirimleri ve İndeksleme ödülleri. - 1. **Query Fee Rebates**: Payments from subgraph consumers for serving queries on the network. + 1. **Sorgu Ücreti İadeleri**: Ağda sorgular sunmak için subgraph tüketicilerinden yapılan ödemelerdir. - 2. **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are generated via new issuance of 3% GRT annually. + 2. **İndeksleme Ödülleri**: İndeksleyicilerin subgraph'leri indeksleme karşılığında aldığı ödüller. İndeksleme ödülleri, yıllık %3 GRT'nin yeni ihracı yoluyla oluşturulur. -- **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **İndeksleyicinin Kendi Stake'i**: İndeksleyicilerin merkeziyetsiz ağa katılmak için stake ettikleri GRT miktarıdır. Minimum 100.000 GRT'dir ve üst sınır yoktur. -- **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. -- **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. +- **Delegatörler**: GRT'ye sahip olan ve GRT'lerini indeksleyicilere stake eden ağ katılımcıları. Bu, indeksleyicilerin ağdaki subgraph'lerde mevcut paylarını artırmalarına olanak tanır. Buna karşılık, delegatörler, indeksleyicilerin subgraph'leri işlemek için aldıkları indeksleme ödüllerinin bir kısmını alırlar. -- **Curators**: Network participants that identify high-quality subgraphs, and “curate” them (i.e., signal GRT on them) in exchange for curation shares. When Indexers claim query fees on a subgraph, 10% is distributed to the Curators of that subgraph. Indexers earn indexing rewards proportional to the signal on a subgraph. We see a correlation between the amount of GRT signalled and the number of Indexers indexing a subgraph. +- **Delegasyon Vergisi**: GRT'yi indeksleyicilere stake ettiklerinde delegatörler tarafından ödenen %0,5'lik bir ücret. Ücreti ödemek için kullanılan GRT yakılır. -- **Curation Tax**: A 1% fee paid by Curators when they signal GRT on subgraphs. The GRT used to pay the fee is burned. +- **Küratörler**: Yüksek kaliteli subgraph'leri belirleyen ve bunları küratörlük paylaşımları karşılığında "düzenleyen" (yani üzerlerinde GRT sinyali veren) ağ katılımcılarıdır. İndeksleyiciler bir subgraph'te sorgulama ücreti talep ettiğinde, o subgraph'in küratörlerine %10 dağıtılır. İndeksleyiciler, bir subgraph'teki sinyalle orantılı indeksleme ödülleri kazanır. Sinyal verilen GRT miktarı ile bir subgraph'i indeksleyen indeksleyicilerin sayısı arasında bir korelasyon görüyoruz. -- **Subgraph Consumer**: Any application or user that queries a subgraph. +- **Kurasyon Vergisi**: Küratörler tarafından subgraph'lerde GRT sinyali verildiğinde ödenen %1'lik bir ücrettir. Ücreti ödemek için kullanılan GRT yakılır. -- **Subgraph Developer**: A developer who builds and deploys a subgraph to The Graph's decentralized network. +- **Subgraph Tüketicisi**: Bir subgraph'ği sorgulayan herhangi bir uygulama veya kullanıcı. -- **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. +- **Subgraph Geliştiricisi**: Graph'in merkeziyetsiz ağına bir subgraph inşa eden ve dağıtan bir geliştirici. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. +- **Subgraph Manifestosu**: Subgraph'in GraphQL şemasını, veri kaynaklarını ve diğer meta verileri açıklayan bir JSON dosyası. [Burada bir örneğini](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) bulabilirsiniz. -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. -- **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. +- **Tahsis**: Bir indeksleyici, toplam GRT payını (delegatörlerin payı dahil) Graph'in merkeziyetsiz ağında yayınlanan subgraph'lere tahsis edebilir. Tahsisler dört aşamadan birinde mevcuttur. - 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. + 1. **Aktif**: Bir tahsis, zincir üzerinde oluşturulduğunda aktif kabul edilir. Buna tahsis açma denir ve ağa, indeksleyicinin belirli bir subgraph için sorguları aktif olarak indekslediğini ve sunduğunu gösterir. Aktif tahsisler, subgraph'teki sinyal ve tahsis edilen GRT miktarı ile orantılı olarak indeksleme ödülleri tahakkuk ettirir. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. +- **Subgraph Stüdyo**: Subgraph'ler oluşturmak, deploy etmek ve yayınlamak için güçlü bir merkeziyetsiz uygulamadır. - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **İndeksleme Ödülleri**: İndeksleyicilerin subgraph'leri indeksleme karşılığında aldığı ödüllerdir. İndeksleme ödülleri GRT şeklinde dağıtılır. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Stake Ödülleri**: Delegatörlerin GRT'yi indeksleyicilere stake etme karşılığında aldığı ödüllerdir. Stake ödülleri GRT şeklinde dağıtılır. -- **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. +- **GRT**: Graph'in çalışma yardımcı programı belirtecidir. GRT, ağ katılımcılarına ağa katkıda bulunmaları için ekonomik teşvikler sağlar. -- **Delegation Rewards**: The rewards that Delegators receive for delegating GRT to Indexers. Delegation rewards are distributed in GRT. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. -- **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. +- **Graph Node**: Graph Node, subgraph'leri indeksleyen ve elde edilen verileri bir GraphQL API aracılığıyla sorgulanabilir hale getiren bileşendir. Bu nedenle, indeksleyici yığınının merkezinde yer alır ve Graph node'unun doğru çalışması, başarılı bir indeksleyici olabilmek için çok önemlidir. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **İndeksleyici Aracı**: İndeksleyici aracı, indeksleyici yığının bir parçasıdır. Ağa kaydolma, Graph node'larına subgraph deploy sürecini ve tahsisleri yönetme dahil olmak üzere indeksleyicinin zincir üzerindeki etkileşimlerini kolaylaştırır. -- **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +- **Graph Tüketicileri**: Merkeziyetsiz bir şekilde GraphQL tabanlı merkeziyetsiz uygulamalar inşa etmeye yönelik bir kitaplık. -- **Indexer agent**: The Indexer agent is part of the indexer stack. It facilitates the Indexer's interactions on-chain, including registering on the network, managing subgraph deployments to its Graph Node(s), and managing allocations. +- **Graph Gezgini**: Ağ katılımcılarının subgraph'leri keşfetmesi ve protokolle etkileşim kurması için tasarlanmış bir merkeziyetsiz uygulamadır. -- **The Graph Client**: A library for building GraphQL-based dapps in a decentralized way. +- **Graph CLI**: Graph üzerinde inşa ve deploy etmek için bir komut satırı arabirim aracı. -- **Graph Explorer**: A dapp designed for network participants to explore subgraphs and interact with the protocol. +- **Soğuma Süresi**: Yetki parametrelerini değiştiren indeksleyicinin bunu tekrar yapabilmesi için kalan süre. -- **Graph CLI**: A command line interface tool for building and deploying to The Graph. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. -- **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. +- **Bir subgraph'ı Graph Ağı'na _yükseltme_**: Bir subgraph'ı barındırılan hizmetten Graph Ağı'na taşıma işlemi. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **Bir subgraph'ın _güncellenmesi_**: Subgraph manifestosunda, şemasında veya eşleştirmelerinde yapılan güncellemelerle yeni bir subgraph sürümü yayınlama işlemi. -- **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - -- **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. - -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/tr/graphcast.mdx b/website/pages/tr/graphcast.mdx index e397aad36e43..73e85f3970d1 100644 --- a/website/pages/tr/graphcast.mdx +++ b/website/pages/tr/graphcast.mdx @@ -2,20 +2,20 @@ title: Graphcast --- -## Introduction +## Giriş -Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? +İndeksleyici olan arkadaşlarınızdan otomatik olarak öğrenmek veya onlarla paylaşmak istediğiniz bir şey var, ancak bu çok fazla güçlük veya çok fazla gas fee'ye mi mal oluyor? -Currently, the cost to broadcast information to other network participants is determined by gas fees on the Ethereum blockchain. Graphcast solves this problem by acting as an optional decentralized, distributed peer-to-peer (P2P) communication tool that allows Indexers across the network to exchange information in real time. The cost of exchanging P2P messages is near zero, with the tradeoff of no data integrity guarantees. Nevertheless, Graphcast aims to provide message validity guarantees (i.e. that the message is valid and signed by a known protocol participant) with an open design space of reputation models. +Şu anda, diğer ağ katılımcılarına bilgi yayınlama maliyeti, Ethereum blok zincirindeki gaz ücretleri tarafından belirlenmektedir. Graphcast, ağ genelinde indeksleyicilerin gerçek zamanlı olarak bilgi alışverişinde bulunmasına izin veren isteğe bağlı merkeziyetsiz, dağıtılmış eşler arası (P2P) iletişim aracı olarak hareket ederek bu sorunu çözer. P2P mesaj alışverişinin maliyeti sıfıra yakındır ve veri bütünlüğü garantisi olmamasından ödün verilir. Yine de Graphcast, itibar modellerinin açık bir tasarım alanıyla mesaj geçerlilik garantileri (yani mesajın geçerli olduğu ve bilinen bir protokol katılımcısı tarafından imzalandığı) sağlamayı amaçlar. -The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: +Graphcast SDK (Yazılım Geliştirme Kiti), geliştiricilerin, belirli bir amaca hizmet etmek için indeksleyicilerin çalıştırabileceği, sürekli iletişim halinde olan Radio uygulamalarını inşa etmesine olanak tanır. Ayrıca, aşağıdaki kullanım durumları için birkaç Radio oluşturmayı (veya Radio oluşturmak isteyen diğer geliştiricilere/ekiplere destek sağlamayı) amaçlıyoruz: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). -- Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. -- Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. -- Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. -- Self-reporting on stack information including graph-node version, Postgres version, Ethereum client version, etc. +- Subgraph veri bütünlüğünün gerçek zamanlı çapraz kontrolü ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). +- Diğer indeksleyicilerden warp senkronizasyonu subgraph'leri, substream'ler ve Firehose verileri için açık artırmalar ve koordinasyon yürütmek. +- Subgraph istek hacimleri, ücret hacimleri vb. dahil olmak üzere aktif sorgu analitiği hakkında kendi kendini raporlama. +- Subgraph indeksleme süresi, işleyici gaz maliyetleri, karşılaşılan indeksleme hataları vb. dahil olmak üzere indeksleme analitiği hakkında kendi kendini raporlama. +- Graph node sürümü, Postgres sürümü, Ethereum istemci sürümü vb. dahil olmak üzere yığın bilgileri hakkında kendi kendini raporlama. -### Learn More +### Daha fazla bilgi edin -If you would like to learn more about Graphcast, [check out the documentation here.](https://docs.graphops.xyz/graphcast/intro) +Graphcast hakkında daha fazla bilgi edinmek isterseniz, [buradaki belgelere göz atın.](https://docs.graphops.xyz/graphcast/intro) diff --git a/website/pages/tr/index.json b/website/pages/tr/index.json index 9e28e13d5001..b2562b57d6d1 100644 --- a/website/pages/tr/index.json +++ b/website/pages/tr/index.json @@ -1,77 +1,76 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "title": "Başlayalım", + "intro": "Blok zincirlerinden verileri indekslemek ve sorgulamak için merkeziyetsiz bir protokol olan Graph Protokol hakkında bilgi edinin.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Graph Hakkında", + "description": "Graph hakkında daha fazla bilgi edinin" }, "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "title": "Hızlı Başlangıç", + "description": "Atlayın ve Graph ile başlayın" }, "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "title": "Geliştirici SSS", + "description": "Sıkça Sorulan Sorular" }, "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "title": "Bir Uygulama Üzerinden Sorgulama", + "description": "Bir uygulama üzerinden sorgulamayı öğrenin" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Subgraph Oluştur", + "description": "Subgraph'ler oluşturmak için Studio'yu kullanın" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", + "title": "Ağ Rolleri", + "description": "Graph Protokol'ün ağ rolleri hakkında bilgi edinin.", "roles": { "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "title": "Geliştirici", + "description": "Bir subgraph oluşturun veya bir dApp'de mevcut subgraph'leri kullanın" }, "indexer": { - "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "title": "Dizin Oluşturucu", + "description": "Verileri endekslemek ve sorguları sunmak için bir node çalıştırın" }, "curator": { - "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "title": "Küratör", + "description": "Subgraph'lerde sinyal vererek verileri düzenleyin" }, "delegator": { - "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "title": "Yetkilendiren", + "description": "GRT'yi Dizin Oluşturuculara devrederek ağı güvenli hale getirin" } } }, - "readMore": "Read more", + "readMore": "Daha fazlasını okuyun", "products": { - "title": "Products", + "title": "Ürünler", "products": { "subgraphStudio": { - "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" + "title": "Subgraph Stüdyosu", + "description": "Subgraph'ler ve API anahtarları oluşturun, yönetin ve yayınlayın" }, "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "title": "Graph Gezgini", + "description": "Subgraph'leri keşfedin ve protokolle etkileşime girin" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "Barındırılan Hizmet", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Desteklenen Ağlar", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/tr/managing/deprecating-a-subgraph.mdx b/website/pages/tr/managing/deprecating-a-subgraph.mdx index e6adfccad368..b98b8922f534 100644 --- a/website/pages/tr/managing/deprecating-a-subgraph.mdx +++ b/website/pages/tr/managing/deprecating-a-subgraph.mdx @@ -1,18 +1,18 @@ --- -title: Deprecating a Subgraph +title: Bir Subgraph'i Kullanımdan Kaldırma --- -So you'd like to deprecate your subgraph on The Graph Explorer. You've come to the right place! Follow the steps below: +Diyelim ki Graph Gezgini'nde subgraph'inizi kullanımdan kaldırmak istiyorsunuz. Evet, doğru yere geldiniz! Aşağıdaki adımları lütfen takip edin: -1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) -2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. -3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. +1. [Buradan](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) sözleşme adresini ziyaret edin +2. Bağımsız değişkeniniz olarak `SubgraphID` ile `deprecateSubgraph` öğesini çağırın. +3. İşte bu kadar! Subgraph'iniz artık Graph Gezgini'ndeki aramalarda görünmeyecek. -Please note the following: +Lütfen aşağıdaki noktalara dikkat edin: -- The `deprecateSubgraph` function should be called by the owner's wallet. -- Curators will not be able to signal on the subgraph anymore. -- Curators that already signaled on the subgraph will be able to withdraw their signal at an average share price. -- Deprecated subgraphs will be indicated with an error message. +- `deprecateSubgraph` işlevi, sahibinin cüzdanı tarafından çağrılmalıdır. +- Küratörler artık ilgili subgraph'te sinyal veremeyecektir. +- Subgraph'te hali hazırda sinyal vermiş olan küratörler, sinyallerini ortalama bir hisse fiyatından geri çekebilecekler. +- Kullanımdan kaldırılan subgraph'ler bir hata mesajıyla belirtilir. -If you interacted with the deprecated subgraph, you'll be able to find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. +Kullanımdan kaldırılan subgraph'le etkileşimde bulunduysanız, kullanıcı profilinizde sırasıyla "Subgraph'ler", "İndeksleyiciler" veya "Kürasyon" sekmesi altında bulabilirsiniz. diff --git a/website/pages/tr/managing/transferring-subgraph-ownership.mdx b/website/pages/tr/managing/transferring-subgraph-ownership.mdx index 1ca1c621a9c9..4c91e5db7f42 100644 --- a/website/pages/tr/managing/transferring-subgraph-ownership.mdx +++ b/website/pages/tr/managing/transferring-subgraph-ownership.mdx @@ -1,39 +1,39 @@ --- -title: Transferring Subgraph Ownership +title: Subgraph Sahipliğini Aktarma --- -The Graph supports the transfer of the ownership of a subgraph. +Graph, bir subgraph'in mülkiyetinin devrini destekler. -When you deploy a subgraph to mainnet, an NFT will be minted to the address that deployed the subgraph. The NFT is based on a standard ERC721, so it can be easily transferred to different accounts. +Bir subgraph'i mainnet'te deploy ettiğinizde, subgraph'i deploy eden adrese bir NFT mintlenecektir. NFT, standart bir ERC721'i temel alır, dolayısıyla farklı hesaplara kolayca aktarılabilir. -Whoever owns the NFT controls the subgraph. If the owner decides to sell the NFT, or transfer it, they will no longer be able to make edits or updates to that subgraph on the network. +NFT'nin sahibi kim olursa olsun, subgraph'i kontrol eder. Sahibi, NFT'yi satmaya veya devretmeye karar verirse, artık ağ üzerinde o subgraph'te düzenleme veya güncelleme yapamaz. -In addition to adding more flexibility to the development lifecycle, this functionality makes certain use cases more convenient, such as moving your control to a multisig or a community member creating it on behalf of a DAO. +Geliştirme yaşam döngüsüne daha fazla esneklik eklemenin yanı sıra, bu işlevsellik, kontrolünüzü bir multisig'e veya onu DAO adına oluşturan bir topluluk üyesine taşımak gibi belirli kullanım durumlarını daha kolay hale getirir. -## Viewing your subgraph as an NFT +## Subgraph'ınızı NFT olarak görüntüleme -To view your subgraph as an NFT, you can visit an NFT marketplace like OpenSea: +Subgraph'inizi bir NFT olarak görüntülemek için OpenSea gibi bir NFT pazar yerini ziyaret edebilirsiniz: ``` https://opensea.io/your-wallet-address ``` -Or a wallet explorer like **Rainbow.me**: +Veya **Rainbow.me** gibi bir cüzdan gezgini: ``` https://rainbow.me/your-wallet-addres ``` -## Transferring ownership of a subgraph +## Bir subgraph'ın sahipliğini devretme -To transfer ownership of a subgraph, you can use the UI built into Subgraph Studio: +Bir subgraph'in sahipliğini aktarmak için, Subgraph Studio'da yerleşik olarak bulunan kullanıcı arayüzünü kullanabilirsiniz: -![Subgraph Ownership Transfer](/img/subgraph-ownership-transfer-1.png) +![Subgraph Sahipliği Aktarımı](/img/subgraph-ownership-transfer-1.png) -And then choose the address that you would like to transfer the subgraph to: +Ardından, subgraph'i aktarmak istediğiniz adresi seçin: -![Subgraph Ownership Trasfer](/img/subgraph-ownership-transfer-2.png) +![Subgraph Sahipliği Aktarımı](/img/subgraph-ownership-transfer-2.png) -You can also use the built-in UI of NFT marketplaces like OpenSea: +OpenSea gibi NFT pazar yerlerinin yerleşik kullanıcı arayüzünü de kullanabilirsiniz: -![Subgraph Ownership Trasfer from NFT marketplace](/img/subgraph-ownership-transfer-nft-marketplace.png) +![NFT pazarından Subgraph Sahipliği Aktarımı](/img/subgraph-ownership-transfer-nft-marketplace.png) diff --git a/website/pages/tr/mips-faqs.mdx b/website/pages/tr/mips-faqs.mdx index 73efe82662cb..9b0c4451e9b6 100644 --- a/website/pages/tr/mips-faqs.mdx +++ b/website/pages/tr/mips-faqs.mdx @@ -1,125 +1,127 @@ --- -title: MIPs FAQs +title: MIP SSS --- -## Introduction +## Giriş -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. +> Not: MIPs programı Mayıs 2023 itibariyle kapanmıştır. Katılan tüm İndeksleyicilere teşekkür ederiz! -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +Graph ekosistemine katılmak için heyecan verici bir zaman! Yaniv Tal, Graph Day 2022](https://thegraph.com/graph-day/2022/) sırasında Graph ekosisteminin uzun yıllardır üzerinde çalıştığı bir an olan [barındırılan hizmetin kullanımdan kaldırılacağını](https://thegraph.com/blog/sunsetting-hosted-service/) duyurdu. -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. +Barındırılan hizmetin kullanımdan kaldırılması ve tüm faaliyetlerinin merkeziyetsiz ağa taşınmasını desteklemek için Graph Vakfı [Geçiş Altyapısı Sağlayıcıları (MIPs) programını] \(https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program) duyurdu. -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. +MIPs programı, Ethereum ana ağının dışındaki zincirleri indekslemek ve Graph protokolü'nün merkeziyetsiz ağı çok zincirli bir altyapı katmanına genişletmesine yardımcı olmak için kaynaklarla İndeksleyicilere desteklemeyi amaçlayan bir teşvik programıdır. -### Useful Resources +MIPs programı, GRT arzının %0,75'inin (75 milyon GRT), %0,5'ini ağın önyüklenmesine katkıda bulunan İndeksleyicileri ödüllendirmek ve %0,25'ini çok zincirli subgraphler kullanan subgraph geliştiricileri için Ağ Hibelerine tahsis etmiştir. -- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) -- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) -- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) -- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) -- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +### Yararlı Kaynaklar -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? +- [Vincent (Victor) Taglia'dan İndeksleyici 2ools](https://indexer-2ools.vincenttaglia.com/#/) +- [Graph Ağı'nda Nasıl Etkili Bir İndeksleyici Olunur?](https://thegraph.com/blog/how-to-become-indexer/) +- [İndeksleyici Bilgi Merkezi](https://thegraph.academy/indexers/) +- [Tahsis Optimizatörü](https://github.com/graphprotocol/allocationopt.jl) +- [Tahsis Optimizasyon Aracı](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -Yes, it is indeed. +### 1. Bir subgraph başarısız olsa bile geçerli bir indeksleme kanıtı (POI) oluşturmak mümkün müdür? -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. +Evet, gerçekten de öyle. -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). +Bağlam için, tahkim tüzüğü [tüzük hakkında daha fazla bilgiyi buradan edinebilirsiniz] \(https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), başarısız bir subgraph için POI oluşturma metodolojisini belirtir. -### 2. Which chain will the MIPs program incentivise first? +Topluluk üyelerinden [SunTzu](https://github.com/suntzu93), bu süreci tahkim tüzüğünün metodolojisine uygun olarak otomatikleştirmek için bir script oluşturdu. Github deposuna göz atın [here](https://github.com/suntzu93/get_valid_poi_subgraph). -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. +### 2. MIPs programı ilk olarak hangi zinciri teşvik edecek? -### 3. How will new chains be added to the MIPs program? +Merkeziyetsiz ağ üzerinde desteklenecek ilk zincir Gnosis Zinciri! Eskiden xDAI olarak bilinen Gnosis Zinciri, EVM tabanlı bir zincirdir. Gnosis Zinciri, çalışan düğümlerin kullanıcı dostu olması, İndeksleyici hazırlığı, Graph ile uyumu ve web3 içinde benimsenmesi dolayısıyla ilk olarak seçilmiştir. -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. +### 3. MIPs programına yeni zincirler nasıl eklenecek? -### 4. How will we know when the network is ready for a new chain? +Yeni zincirler, MIPs programı boyunca İndeksleyici hazırlığı, talep ve topluluk görüşüne dayalı olarak duyurulacaktır. Zincirler ilk olarak test ağında desteklenecek ve daha sonra bu zinciri ana ağda desteklemek için bir GIP başarılı şekilde geçecektir. MIPs programına katılan indeksleyiciler hangi zincirleri desteklemek istediklerini seçecek ve subgraphlar'a hizmet vererek ağda sorgu ücretleri ve indeksleme ödülleri kazanmanın yanı sıra zincir başına ödüller kazanacaklardır. MIPs katılımcıları performanslarına, ağ ihtiyaçlarına hizmet etme becerilerine ve topluluk desteğine göre puanlanacaktır. -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. +### 4. Ağın yeni bir zincir için hazır olduğunu nasıl anlayacağız? -### 5. How are rewards divided per chain? +Graph Vakfı, hazır olma durumunu en iyi şekilde değerlendirmek adına QoS performans ölçümlerini, ağ performansını ve topluluk kanallarını izleyecektir. Öncelik, çok zincirli merkeziyetsiz uygulamaların subgraphlar'ını geçirebilmeleri için ağın performans ihtiyaçlarını karşılamasını sağlamaktır. -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. +### 5. Ödüller zincir başına nasıl paylaştırılır? -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? +Zincirlerin düğümleri senkronize etme gereksinimleri, sorgu hacmi ve benimseme açısından farklılık gösterdiği göz önüne alındığında, tüm geri bildirimlerin ve öğrenimlerin elde edilmesini sağlamak için zincir başına ödüller o zincirin döngüsünün sonunda kararlaştırılacaktır. Aynı zamanda İndeksleyiciler, zincir ağ üzerinde desteklendiğinde sorgu ücretleri ve indeksleme ödülleri de kazanabileceklerdir. -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. +### 6. MIPs programındaki tüm zincirleri indekslememiz mi gerekiyor yoksa sadece bir zincir seçip onu indeksleyebilir miyiz? -### 7. When will rewards be distributed? +İstediğiniz zinciri indeksleyebilirsiniz! MIPs programının amacı, İndeksleyicileri istedikleri zincirleri indekslemeleri ve ilgilendikleri web3 ekosistemlerini desteklemeleri için ihtiyaç duydukları araç ve bilgilerle donatmaktır. Bununla birlikte, her zincir için test ağından ana ağa kadar aşamalar bulunmaktadır. İndekslediğiniz zincirler için tüm aşamaları tamamladığınızdan emin olun. Aşamalar hakkında daha fazla bilgi edinmek için [MIPs notion sayfasına] \(https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) göz atın. -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. +### 7. Ödüller ne zaman dağıtılacak? -### 8. How does scoring work? +MIPs ödülleri zincir başına, performans ölçütleri karşılandığında ve geçirilen subgraphlar bu İndeksleyiciler tarafından desteklendiğinde dağıtılacaktır. Zincir döngüsünün ortalarında zincir başına toplam ödüller hakkında bilgi almaya bakın. -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: +### 8. Puanlama nasıl işliyor? -**Subgraph Coverage** +İndeksleyiciler, program boyunca puanlamaya dayalı ödüller için liderlik tablosunda yarışacaklardır. Program puanlaması aşağıdakilere göre yapılacaktır: -- Are you providing maximal support for subgraphs per chain? +**Subgraph Kapsamı** -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. +- Zincir başına subgraphlar için maksimum destek sağlıyor musunuz? -**Quality Of Service** +- MIP'ler sırasında, büyük İndeksleyicilerin destekledikleri zincir başına subgraphlar'ın %50'sinden fazlasını stake etmeleri beklenir. -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? +**Hizmet Kalitesi** -- Is the Indexer supporting dapp developers being reactive to their needs? +- İndeksleyici zincire iyi bir Hizmet Kalitesi (gecikme süresi, güncel veri, çalışma süresi, vb.) ile hizmet veriyor mu? -Is Indexer allocating efficiently, contributing to the overall health of the network? +- İndeksleyici, merkeziyetsiz uygulama geliştiricilerinin ihtiyaçlarına karşı reaktif olmalarını destekliyor mu? -**Community Support** +İndeksleyici ağın genel sağlığına katkıda bulunarak verimli bir şekilde tahsis ediyor mu? -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? +**Topluluk Desteği** -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? +- İndeksleyici, diğer İndeksleyicilerle çoklu zincire hazırlanmalarına yardımcı olmak için işbirliği yapıyor mu? -### 9. How will the Discord role be assigned? +- İndeksleyici program boyunca çekirdek geliştiricilere geri bildirim sağlıyor mu veya Forum'daki İndeksleyicilerle bilgi paylaşıyor mu? -Moderators will assign the roles in the next few days. +### 9. Discord rolü nasıl verilecek? -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? +Moderatörler önümüzdeki birkaç gün içinde rolleri verecektir. -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. +### 10. Programı bir test ağı üzerinde başlatmak ve daha sonra ana ağa geçmek sorun olur mu? Düğümümü tanımlayabilecek ve ödülleri dağıtırken dikkate alabilecek misiniz? -### 11. At what point do you expect participants to add a mainnet deployment? +Evet, aslında sizden bunu yapmanız bekleniyor. Birkaç aşama Görli'de ve bir tanesi de ana ağda. -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +### 11. Katılımcıların hangi noktada bir ana ağ dağıtımı eklemesini bekliyorsunuz? -### 12. Will rewards be subject to vesting? +Aşama 3 sırasında bir ana ağ indeksleyicisine sahip olma gereksinimi olacaktır. Bu konuda daha fazla bilgi [yakında bu notion sayfasında paylaşılacaktır.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. +### 12. Ödüller hak edişe(vesting) tabi olacak mı? -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? +Programın sonunda dağıtılacak yüzde, hak edişe tabi olacaktır. Bu konuda daha fazla bilgi İndeksleyici Sözleşmesinde paylaşılacaktır. -Yes +### 13. Birden fazla üyesi olan ekipler için, tüm ekip üyelerine bir MIPs Discord rolü verilecek mi? -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? +Evet -Yes +### 14. MIPs test ağına katılmak için graph küratör programından kilitli tokenleri kullanmak mümkün mü? -### 15. During the MIPs program, will there be a period to dispute invalid POI? +Evet -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation +### 15. MIPs programı sırasında, geçersiz POI'ye itiraz etmek için bir süre olacak mı? -### 17. Can we combine two vesting contracts? +Henüz kararlaştırılmadı. Daha fazla bilgi için lütfen bu sayfayı düzenli aralıklarla ziyaret edin veya talebiniz acilse lütfen info@thegraph.foundation adresine e-posta gönderin -No. The options are: you can delegate one to the other one or run two separate indexers. +### 17. İki hakediş sözleşmesini birleştirebilir miyiz? -### 18. KYC Questions? +Hayır. Seçenekler şunlardır: Birini diğerine devredebilir veya iki ayrı indeksleyici çalıştırabilirsiniz. -Please email info@thegraph.foundation +### 18. KYC Soruları? -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? +Lütfen info@thegraph.foundation adresine e-posta gönderin -Yes +### 19. Gnosis zincirini indekslemeye hazır değilim, hazır olduğumda başka bir zincirden ona geçip indekslemeye başlayabilir miyim? -### 20. Are there recommended regions to run the servers? +Evet -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. +### 20. Sunucuları çalıştırmak için önerilen bölgeler var mı? -### 21. What is “handler gas cost”? +Bölgeler hakkında tavsiyelerde bulunmuyoruz. Konum seçerken, kripto para birimleri için büyük pazarların nerede olduğunu göz önünde bulundurmayı düşünebilirsiniz. -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. +### 21. "İşleyici gas maliyeti" nedir? + +Bir işleyiciyi yürütmenin maliyetinin deterministik ölçüsüdür. Adından da anlaşılabileceği gibi, blok zincirlerindeki gas maliyetiyle ilgisi yoktur. diff --git a/website/pages/tr/network/benefits.mdx b/website/pages/tr/network/benefits.mdx index 839a0a7b9cf7..633b6a7740c8 100644 --- a/website/pages/tr/network/benefits.mdx +++ b/website/pages/tr/network/benefits.mdx @@ -1,96 +1,96 @@ --- -title: The Graph Network vs. Self Hosting +title: Graph Ağı vs Kendi Kendine Barındırma socialImage: https://thegraph.com/docs/img/seo/benefits.jpg --- -The Graph’s decentralized network has been engineered and refined to create a robust indexing and querying experience—and it’s getting better every day thanks to thousands of contributors around the world. +Graph'ın merkeziyetsiz ağı, sağlam bir indeksleme ve sorgulama deneyimi oluşturmak için tasarlanıp iyileştirildi ve dünya genelindeki binlerce katılımcı sayesinde her geçen gün daha da iyiye gidiyor. -The benefits of this decentralized protocol cannot be replicated by running a `graph-node` locally. The Graph Network is more reliable, more efficient, and less expensive. +Bu merkeziyetsiz protokolün faydaları, yerel olarak bir `graph node`'u çalıştırılarak arttırılamaz. Graph ağı daha güvenilir, daha verimli ve daha ucuzdur. -Here is an analysis: +İşte bir analiz: -## Why You Should Use The Graph Network +## Neden Graph Ağını Kullanmalısınız -- 60-98% lower monthly cost -- $0 infrastructure setup costs -- Superior uptime -- Access to 438 Indexers (and counting) -- 24/7 technical support by global community +- %60-98 arası daha düşük aylık maliyet +- 0$ altyapı kurulum maliyeti +- Üstün çalışma süresi +- Dünya çapındaki yüzlerce bağımsız İndeksleyiciye erişim +- Küresel topluluk tarafından 7/24 teknik destek -## The Benefits Explained +## Faydaların Açıklaması -### Lower & more Flexible Cost Structure +### Daha Düşük & Daha Esnek Maliyet Yapısı -No contracts. No monthly fees. Only pay for the queries you use—with an average cost-per-query of $0.0002. Queries are priced in USD and paid in GRT. +Sözleşme yok. Aylık ücret yok. Yalnızca kullandığınız sorgular için ödeme yaparsınız; ortalama sorgu başına maliyet 0,0002 USD'dir. Sorgular USD cinsinden fiyatlandırılır ve GRT olarak ödenir. -Query costs may vary; the quoted cost is the average at time of publication (December 2022). +Sorgu maliyetleri değişebilir; alıntılanan maliyet, yayın tarihindeki (Aralık 2022) ortalama maliyettir. -## Low Volume User (less than 30,000 queries per month) +## Düşük Hacimli Kullanıcı (ayda 30.000'den az sorgu) -| Cost Comparison | Self Hosted | Graph Network | +| Maliyet Karşılaştırması | Kendi Kendine Barındırılan | Graph Ağı | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $0+ | ~$15 per month | -| Engineering time | $400 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | -| Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $750+ per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $750+ | ~$15 | - -## Medium Volume User (3,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Aylık sunucu maliyeti\* | Aylık 350$ | 0$ | +| Sorgu maliyetleri | 0$ + | Aylık yaklaşık 15$ | +| Mühendislik süresi | Aylık 400$ | Yok, küresel olarak dağıtılmış indeksleyiciler ile ağa entegre edilmiştir | +| Aylık sorgular | Alt yeteneklerle sınırlı | 30.000 (otomatik ölçeklendirme) | +| Sorgu başına maliyet | 0$ | 0.0005$ | +| Altyapı | Merkezi | Merkeziyetsiz | +| Coğrafi fazlalıklar | Ek node başına 750$+ | Dahil | +| Çalışma süresi | Değişmekte | 99.9%+ | +| Toplam Aylık Maliyet | $750+ | ~$15 | + +## Orta Hacimli Kullanıcı (ayda 3.000.000'den fazla sorgu) + +| Maliyet Karşılaştırması | Kendi Kendine Barındırılan | Graph Ağı | | :-: | :-: | :-: | -| Monthly server cost\* | $350 per month | $0 | -| Query costs | $500 per month | $750 per month | -| Engineering time | $800 per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 3,000,000+ | -| Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | -| Engineering expense | $200 per hour | Included | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $1,650+ | $750 | - -## High Volume User (30,000,000+ queries per month) - -| Cost Comparison | Self Hosted | Graph Network | +| Aylık sunucu maliyeti\* | Aylık 350$ | 0$ | +| Sorgu maliyetleri | Aylık 500$ | Aylık 750$ | +| Mühendislik süresi | Aylık 800$ | Yok, küresel olarak dağıtılmış indeksleyiciler ile ağa entegre edilmiştir | +| Aylık sorgular | Alt yeteneklerle sınırlı | 3,000,000+ | +| Sorgu başına maliyet | 0$ | 0.00025$ | +| Altyapı | Merkezi | Merkeziyetsiz | +| Mühendislik gideri | Saatlik 200$ | Dahil | +| Coğrafi yedekleme | Ek node başına 1200$ | Dahil | +| Çalışma süresi | Değişmekte | 99.9%+ | +| Toplam Aylık Maliyet | 1,650$+ | 750$ | + +## Yüksek Hacimli Kullanıcı (ayda 30.000.000'den fazla sorgu) + +| Maliyet Karşılaştırması | Kendi Kendine Barındırılan | Graph Ağı | | :-: | :-: | :-: | -| Monthly server cost\* | $1100 per month, per node | $0 | -| Query costs | $4000 | $4,500 per month | -| Number of nodes needed | 10 | Not applicable | -| Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | -| Queries per month | Limited to infra capabilities | 30,000,000+ | -| Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | -| Geographic redundancy | $1,200 in total costs per additional node | Included | -| Uptime | Varies | 99.9%+ | -| Total Monthly Costs | $11,000+ | $4,500 | +| Aylık sunucu maliyeti\* | Node başına aylık 1100$ | 0$ | +| Sorgu maliyetleri | 4000$ | Aylık 4500$ | +| Gerekli node sayısı | 10 | Şart yok | +| Mühendislik süresi | Ayda 6.000 $ veya daha fazlası | Yok, küresel olarak dağıtılmış indeksleyiciler ile ağa entegre edilmiştir | +| Aylık sorgular | Alt yeteneklerle sınırlı | 30,000,000+ | +| Sorgu başına maliyet | 0$ | 0.00015$ | +| Altyapı | Merkezi | Merkeziyetsiz | +| Coğrafi yedekleme | Ek node başına 1200$ | Dahil | +| Çalışma süresi | Değişmekte | 99.9%+ | +| Toplam Aylık Maliyet | 11,000$+ | 4500$ | -\*including costs for backup: $50-$100 per month +\*yedekleme maliyetleri dahil: aylık $50-$100 -Engineering time based on $200 per hour assumption +Mühendislik süresi, saat başına 200$ varsayımına dayalı -using the max query budget function in the budget billing tab, while maintaining high quality of service +yüksek hizmet kalitesini korurken bütçe faturalandırma sekmesindeki maksimum sorgu bütçe işlevini kullanma -Estimated costs are only for Ethereum Mainnet subgraphs — costs are even higher when self hosting a `graph-node` on other networks. +Tahmini maliyetler yalnızca Ethereum Mainnet subgraph'leri içindir - diğer ağlarda `graph-node`'unu kendi kendine barındırırken maliyetleriniz daha da yüksektir. -Curating signal on a subgraph is an optional one-time, net-zero cost (e.g., $1k in signal can be curated on a subgraph, and later withdrawn—with potential to earn returns in the process). +Bir subgraph'te kürasyon sinyali, isteğe bağlı, tek seferlik, net sıfır maliyettir (örneğin, 1.000$ değerindeki sinyal bir subgraph'te gönderilebilir ve daha sonra geri çekilebilir; bu süreçte getiri elde etme potansiyeli vardır). -Some users may need to update their subgraph to a new version. Due to Ethereum gas fees, an update costs ~$50 at time of writing. +Bazı kullanıcıların subgraphlerini yeni bir sürüme güncellemeleri gerekebilir. Ethereum gaz ücretleri nedeniyle, bir güncelleme bu içeriği yazdığımız tarihe göre yaklaşık 50 $ 'a mal olmaktadır. -Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower than Ethereum mainnet. +[Arbitrum'daki](/arbitrum/arbitrum-faq) gaz ücretlerinin Ethereum ana ağından önemli ölçüde daha düşük olduğunu unutmayın. -## No Setup Costs & Greater Operational Efficiency +## Kurulum Maliyeti Yok & Daha Fazla Operasyonel Verimlilik -Zero setup fees. Get started immediately with no setup or overhead costs. No hardware requirements. No outages due to centralized infrastructure, and more time to concentrate on your core product . No need for backup servers, troubleshooting, or expensive engineering resources. +Sıfır kurulum ücreti. Kurulum veya genel gider maliyeti olmadan hemen başlayın. Donanım gereksinimi de yok. Merkezi altyapı nedeniyle kesinti yok ve temel ürününüze odaklanmak için daha fazla zamana sahipsiniz. Yedekleme sunucularına, sorun gidermeye veya pahalı mühendislik kaynaklarına gerek duymazsınız. -## Reliability & Resiliency +## Güvenilirlik & Esneklik -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +Graph'ın merkeziyetsiz ağı, kullanıcılara bir `graph node`'unu kendi kendine barındırırken sahip olmadıkları coğrafi yedeklemeye de erişim sağlar. Ağın küresel güvenliğini sağlayan yüzlerce bağımsız indeksleyici tarafından ulaşılan %99,9+ çalışma süresi sayesinde sorgular güvenilir bir şekilde sunulur. -Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. +Dip not: Graph Ağı, yerel olarak bir `graph node`'u çalıştırmaya kıyasla daha ucuz, kullanımı daha kolay ve üstün sonuçlar üretir. -Start using The Graph Network today, and learn how to [upgrade your subgraph to The Graph's decentralized network](/cookbook/upgrading-a-subgraph). +Graph Ağını bugün kullanmaya başlayın ve [subgraph'ınızı Graph'in merkeziyetsiz ağına nasıl yükselteceğinizi](/cookbook/upgrading-a-subgraph) öğrenin. diff --git a/website/pages/tr/network/curating.mdx b/website/pages/tr/network/curating.mdx index 797d9b9dd896..32b601cd7550 100644 --- a/website/pages/tr/network/curating.mdx +++ b/website/pages/tr/network/curating.mdx @@ -1,96 +1,96 @@ --- -title: Curating +title: Kürasyon --- -Curators are critical to the Graph decentralized economy. They use their knowledge of the web3 ecosystem to assess and signal on the subgraphs that should be indexed by The Graph Network. Through the Explorer, curators are able to view network data to make signaling decisions. The Graph Network rewards curators who signal on good quality subgraphs with a share of the query fees that subgraphs generate. Curators are economically incentivized to signal early. These cues from curators are important for Indexers, who can then process or index the data from these signaled subgraphs. +Küratörler, Graph merkeziyetsiz ekonomi için kritik öneme sahiptir. Graph Network tarafından indekslenmesi gereken subgraph’leri değerlendirmek ve bunlar hakkında sinyal vermek için web3 ekosistemi hakkındaki bilgilerini kullanırlar. Küratörler, gezgin aracılığıyla sinyalizasyon kararları vermek için ağ verilerini görüntüleyebilir. Graph Ağı, kaliteli subgraph’lerde sinyal veren küratörleri ödüllendirir ve subgraph’lerin oluşturduğu sorgu ücretlerinden pay alır. Küratörler ekonomik olarak erken sinyal vermeye teşvik edilir. Küratörlerden gelen bu ipuçları, daha sonra bu işaretlenmiş subgraphlerden verileri işleyebilen veya dizine ekleyebilen indeksleyiciler için önemlidir. -When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. +Küratörler sinyal verirken, subgraph'ın belirli bir sürümünde sinyal vermeye veya otomatik taşıma kullanarak sinyal vermeye karar verebilirler. Otomatik geçiş kullanılarak sinyal verildiğinde, bir Küratörün payları her zaman geliştirici tarafından yayınlanan en son sürüme taşınacaktır. Bunun yerine belirli bir sürümde sinyal vermeye karar verirseniz, paylar her zaman bu belirli sürümde kalacaktır. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Kürasyonun riskli olduğunu unutmayın. Lütfen güvendiğiniz subgraph’lerin küratörlüğünü yaptığınızdan emin olmak için gereken özeni gösterin. Bir subgraph oluşturmak izin gerektirmez, bu nedenle insanlar subgraph’ler oluşturabilir ve onlara istedikleri adı verebilirler. Seçme riskleri hakkında daha fazla bilgi için [Graph Akademi'nin Kürasyon Kılavuzu](https://thegraph.academy/curators/)'na göz atın. -## Bonding Curve 101 +## Bağlanma Eğrisi 101 -First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. +Önce bir adım geri atıyoruz. Her bir subgraph’in, bir kullanıcı eğriye sinyal **eklediğinde** kürasyon paylarının basıldığı bir bağlanma eğrisi vardır. Her subgraph’in bağlanma eğrisi benzersizdir. Bağlanma eğrileri, bir subgraph’te kürasyon payı basma fiyatı, basılan hisse sayısına göre doğrusal olarak artacak şekilde tasarlanmıştır. -![Price per shares](/img/price-per-share.png) +![Hisse başına fiyat](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +Sonuç olarak, fiyat doğrusal olarak artar, bu da hisse satın almanın zaman içinde daha pahalı hale geleceği anlamına gelir. İşte ne demek istediğimize dair bir örnek, aşağıdaki bağlanma eğrisine bir göz atın: -![Bonding curve](/img/bonding-curve.png) +![Bağlanma eğrisi](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Bir subgraph için paylaşımlar yapan iki küratörümüz olduğunu düşünelim: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. -- **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. +- Küratör A, subgraph’te ilk sinyal verendir. Eğriye 120.000 GRT ekleyerek 2000 adet hisse basabiliyorlar. +- Küratör B'nin sinyali daha sonra bir noktada subgraph’tedir. Küratör A ile aynı miktarda hisse almak için eğriye 360.000 GRT eklemek zorunda kalacaklardı. +- Her iki küratör de toplam küratörlük hisselerinin yarısına sahip olduğundan, eşit miktarda küratör telif hakkı alacaklardır. +- Küratörlerden herhangi biri şimdi 2000 kürasyon hissesini yakacak olsaydı, 360.000 GRT alacaktı. +- Kalan küratör artık o subgraph için tüm küratör telif ücretlerini alacaktı. GRT'yi çekmek için hisselerini yakacak olsalardı, 120.000 GRT alacaklardı. +- **ÖZETLE:** Küratörlük hisselerinin GRT değeri, bağlanma eğrisi tarafından belirlenir ve değişken olabilir. Büyük kayıplar verme potansiyeli vadır. Erken sinyal vermek, her paylaşım için daha az GRT koymanız anlamına gelir. Ek olarak, bu, aynı subgraph için sonraki küratörlerden GRT başına daha fazla küratör telif hakkı kazandığınız anlamına gelir. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +Genel olarak, bir bağlanma eğrisi, belirteç arzı ile varlık fiyatı arasındaki ilişkiyi tanımlayan matematiksel bir eğridir. Spesifik subgraph düzenleme durumunda, **her bir subgraph hissesinin fiyatı yatırılan her token ile artar** ve **her bir hissenin fiyatı satılan her token ile düşer.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +Graph söz konusu olduğunda, [Bancor'un bir bağlanma eğrisi formülü](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) uygulamasından yararlanılır. -## How to Signal +## Nasıl Sinyal Gönderilir -Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) +Artık bağlanma eğrisinin nasıl çalıştığıyla ilgili temel bilgileri ele aldığımıza göre, bir subgraph’te sinyal vermeye bu şekilde devam edeceksiniz. Graph Gezgini'ndeki Küratör sekmesinde, küratörler ağ istatistiklerine dayalı olarak belirli subgraph’lerde sinyal verebilir ve sinyali iptal edebilir. Bunu Explorer(Gezgin)'da nasıl yapacağınıza ilişkin adım adım genel kılavuz için [burayı](/network/explorer) tıklayın. -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Bir küratör, belirli bir subgraph versiyonunda sinyal vermeyi seçebilir veya sinyallerinin o subgraph’in en yeni üretim yapısına otomatik olarak taşınmasını seçebilir. Her ikisi de geçerli stratejilerdir ve kendi artıları ve eksileri vardır. -Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. +Belirli bir sürümde sinyal verme, özellikle bir subgraph birden fazla dApp tarafından kullanıldığında kullanışlıdır. Bir dApp'in subgraph’i yeni özelliklerle düzenli olarak güncellemesi gerekebilir. Başka bir dApp, daha eski, iyi test edilmiş bir subgraph sürümünü kullanmayı tercih edebilir. İlk iyileştirmenin ardından, %1'lik bir standart vergi tahakkuk ettirilir. -Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. +Sinyalinizin otomatik olarak en yeni üretim yapısına taşınması, sorgu ücretleri tahakkuk etmeye devam etmenizi sağlamak için değerli olabilir. Her kürasyon yaptığınızda, %1 kürasyon vergisi alınır. Ayrıca her geçişte %0,5 kürasyon vergisi ödersiniz. Subgraph geliştiricilerin sık sık yeni sürümler yayınlamaları önerilmez - otomatik olarak taşınan tüm derleme paylaşımları için %0,5'lik bir iyileştirme vergisi ödemeleri gerekir. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Not:** Belirli bir subgraph’e sinyal gönderen ilk adres, ilk küratör olarak kabul edilir ve sonraki küratörlerin geri kalanından çok daha fazla yoğun çalışma yapması gerekir çünkü ilk küratör kürasyon paylaşım belirteçlerini başlatır, bağlanma eğrisini başlatır ve ayrıca belirteçleri Graph proxy'sine aktarır. -## What does Signaling mean for The Graph Network? +## Sinyal Gönderme, Graph Ağı için Ne Anlama Geliyor? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +Nihai tüketicilerin bir subgraph’i sorgulayabilmesi için öncelikle subgraph’in indekslenmesi gerekir. İndeksleme, sonuçların daha hızlı bulunabilmesi için dosyalara, verilere ve meta verilere bakıldığı, kataloglandığı ve ardından indekslendiği bir süreçtir. Bir subgraph’in verilerinin aranabilir olması için düzenlenmesi gerekir. -And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. +Ve bu nedenle, indeksleyicilerin hangi subgraph’leri indekslemeleri gerektiğini tahmin etmeleri gerekseydi, hangi subgraph’lerin kaliteli olduğunu doğrulamalarının hiçbir yolu olmayacağından dolgun sorgu ücretleri kazanma şansları düşük olurdu. Küratörlüğe giriş yapalım. -Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! +Küratörler Graph ağını verimli hale getirir ve sinyal verme, küratörlerin indeksleyicilere bir subgraph’in dizine eklenmesinin iyi olduğunu bildirmek için kullandıkları süreçtir; burada GRT, bir subgraph için bir bağlanma eğrisine eklenir. İndeksleyiciler, doğası gereği bir küratörden gelen sinyale güvenebilir, çünkü sinyalleme üzerine, küratörler subgraph için bir kürasyon payı basar ve onlara subgraph’in yönlendireceği gelecekteki sorgu ücretlerinin bir kısmına hak verir. Küratör sinyali, Graph Küratörlük Paylaşımları (GCS) adı verilen ERC20 belirteçleri olarak temsil edilir. Daha fazla sorgu ücreti kazanmak isteyen küratörler, GRT'lerini ağa güçlü bir ücret akışı oluşturacağını tahmin ettikleri subgraph’lere sinyal vermelidir. Küratörlerin kötü davranışlarından dolayı kesinti yapılamaz, ancak ağın bütünlüğüne zarar verebilecek kötü karar vermeyi caydırmak için küratörlere bir depozito vergisi uygulanır. Küratörler ayrıca, düşük kaliteli bir subgraph üzerinde küratörlük yapmayı seçerlerse daha az sorgu ücreti alırlar, çünkü işlenecek daha az sorgu veya bu sorguları işlemek için daha az indeksleyici olacaktır. Aşağıdaki şemaya bir göz atın! -![Signaling diagram](/img/curator-signaling.png) +![Sinyal diyagramı](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +İndeksleyiciler, Graph Gezgini'nde gördükleri iyileştirme sinyallerine dayalı olarak dizine eklenecek subgraph’leri bulabilirler (ekran görüntüsü alt kısımda yer alıyor). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Subgraph gezgini](/img/explorer-subgraphs.png) -## Risks +## Riskler -1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. -2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. -3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. - - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. +1. Graph'ta sorgu pazarı doğası gereği gençtir ve gelişmekte olan pazar dinamikleri nedeniyle %APY'nizin beklediğinizden daha düşük olma riski vardır. +2. Kürasyon Ücreti - bir Küratör subgraph üzerinde GRT sinyali verdiğinde, %1 kürasyon kesintisine uğrar. Bu ücret yakılır ve geri kalanı bağlanma eğrisinin rezerv arzına eklenir. +3. Küratörler GRT'yi çekmek için hisselerini yaktığında, kalan hisselerin GRT değeri düşecektir. Bazı durumlarda küratörlerin hisselerini **bir anda** yakmaya karar verebileceğini unutmayın. Bu durum, bir dApp geliştiricisinin sürüm oluşturma/geliştirme ve subgraph’ini sorgulamayı bırakması veya bir subgraph’in başarısız olması durumunda yaygın olabilir. Sonuç olarak, geri kalan küratörler ilk GRT'lerinin yalnızca bir kısmını geri alabilir. Daha düşük risk profiline sahip bir ağ rolü için bkz [Delegatörler](/network/delegating). +4. Bir subgraph, bir hata nedeniyle başarısız olabilir. Başarısız bir subgraph, sorgu ücreti tahakkuk ettirmez. Sonuç olarak, geliştiricinin hatayı düzeltip yeni bir sürüm dağıtmasını beklemeniz gerekecek. + - Bir subgraph’in en yeni sürümüne abone olduysanız, paylaşımlarınız o yeni sürüme otomatik olarak taşınacaktır. Bu, %0,5 kürasyon vergisine tabi olacaktır. + - Belirli bir subgraph sürümünde sinyal verdiyseniz ve başarısız olursa, küratörlük paylaşımlarınızı manuel olarak yakmanız gerekecektir. Küratör olmakla ilişkili bir risk olan, küratörlük eğrisine başlangıçta yatırdığınızdan daha fazla veya daha az GRT alabileceğinizi unutmayın. Daha sonra yeni subgraph versiyonunda sinyal verebilirsiniz, böylece %1'lik bir kürasyon vergisine tabi olursunuz. -## Curation FAQs +## Sıkça Sorulan Sorular -### 1. What % of query fees do Curators earn? +### 1. Küratörler sorgu ücretlerinin yüzde kaçını kazanıyor? -By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. +Bir subgraph’te sinyal vererek, bu subgraph’in ürettiği tüm sorgu ücretlerinden pay alacaksınız. Tüm sorgu ücretlerinin %10'u, küratörlerin kürasyon paylarına orantılı olarak gider. Bu %10 yönetişime tabidir. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Hangi subgraph’lerin yüksek kalitede olduğuna nasıl karar verebilirim? -Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: +Yüksek kaliteli subgraph’ler bulmak karmaşık bir iştir, ancak buna pek çok farklı şekilde yaklaşılabilir. Bir küratör olarak, sorgu hacmini artıran güvenilir subgraph’ler aramak istersiniz. Güvenilir bir subgraph, eksiksiz, doğruysa ve bir dApp'in veri ihtiyaçlarını destekliyorsa değerli olabilir. Kötü tasarlanmış bir subgraph’in gözden geçirilmesi veya yeniden yayınlanması gerekebilir ve bu da başarısız olabilir. Küratörlerin bir subgraph’in değerli olup olmadığını değerlendirmek için bir subgraph’in mimarisini veya kodunu gözden geçirmesi çok önemlidir. Sonuç olarak: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future -- Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. +- Küratörler, tek bir subgraph’in gelecekte nasıl daha yüksek veya daha düşük bir sorgu hacmi oluşturabileceğini denemek ve tahmin etmek için bir ağ hakkındaki anlayışlarını kullanabilirler +- Küratörler, Graph Gezgini aracılığıyla kullanılabilen metrikleri de anlamalıdır. Geçmiş sorgu hacmi ve subgraph geliştiricisinin kim olduğu gibi ölçümler, bir subgraph’in sinyal vermeye değer olup olmadığını belirlemeye yardımcı olabilir. -### 3. What’s the cost of updating a subgraph? +### 3. Bir subgraph'ı güncellemenin maliyeti nedir? -Migrating your curation shares to a new subgraph version incurs a curation tax of 1%. Curators can choose to subscribe to the newest version of a subgraph. When curation shares get auto-migrated to a new version, Curators will also pay half curation tax, ie. 0.5%, because updating subgraphs is an on-chain action that costs gas. +Kürasyon paylarınızı yeni bir subgraph sürümüne geçirmek %1'lik bir kürasyon kesintisini doğurur. Küratörler bir subgraph'ın en yeni sürümüne abone olmayı seçebilirler. Kürasyon payları yeni bir sürüme otomatik olarak taşındığında, Küratörler de kürasyon kesintisinin yarısını, yani %0,5'ini ödeyecektir, çünkü subgraphları güncellemek gas maliyeti olan bir zincir içi eylemdir. -### 4. How often can I update my subgraph? +### 4. Subgraph'ımı ne sıklıkla güncelleyebilirim? -It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. +Subgraph'ınızı çok sık güncellememeniz önerilir. Daha fazla ayrıntı için yukarıdaki soruya bakın. -### 5. Can I sell my curation shares? +### 5. Kürasyon hisselerimi satabilir miyim? -Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. +Kürasyon hisseleri, aşina olabileceğiniz diğer ERC20 tokenleri gibi "satın alınamaz" veya "satılamaz"dır. Yalnızca belirli bir subgraph için bağlanma eğrisi boyunca basılabilir (oluşturulabilir) veya yakılabilir (yok edilebilir). Yeni bir sinyal basmak için gereken GRT miktarı ve mevcut sinyali yaktığınızda alacağınız GRT miktarı, bu bağlanma eğrisi tarafından belirlenir. Bir küratör olarak, küratörlük hisselerinizi GRT'yi çekmek için yaktığınızda, başlangıçta yatırdığınızdan daha fazla veya daha az GRT alabileceğinizi bilmelisiniz. -Still confused? Check out our Curation video guide below: +Kafanız hala karışık mı? Endişelenmeyin. Aşağıdaki kürasyon video kılavuzumuza lütfen bir göz atın: diff --git a/website/pages/tr/network/delegating.mdx b/website/pages/tr/network/delegating.mdx index 4a6d6e00b73e..ba7c066ce8a2 100644 --- a/website/pages/tr/network/delegating.mdx +++ b/website/pages/tr/network/delegating.mdx @@ -1,98 +1,98 @@ --- -title: Delegating +title: Delegasyon --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +Delegatörler, GRT'yi bir veya daha fazla indeksleyiciye devreden (yani stake eden) ağ katılımcılarıdır. Delegatörler, kendileri bir Graph Node’u çalıştırmadan ağın güvenliğini sağlamaya katkıda bulunurlar. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +Delegatörler, bir indeksleyiciye yetki vererek, indeksleyicinin sorgu ücretlerinin ve ödüllerinin bir kısmını kazanır. Bir indeksleyicinin işleyebileceği sorgu miktarı, indeksleyicinin kendi (ve stake edilen) payına ve indeksleyicinin her sorgu için aldığı ücrete bağlıdır, bu nedenle, bir indeksleyiciye ne kadar pay tahsis edilirse, o kadar çok potansiyel sorgu işleyebilir. -## Delegator Guide +## Delegatör Kılavuzu -This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: +Bu kılavuz, Graph ağında nasıl etkili bir delegatör olunacağını açıklayacaktır. Delegatörler, stake ettikleri paylarına göre tüm indeksleyiciler ile birlikte protokolün kazançlarını paylaşır. Bir delegatör, indeksleyicileri birden çok faktöre dayalı olarak seçmek için en iyi muhakemesini kullanmalıdır. Bilgiler internette yaygın olarak bulunduğundan, bu kılavuzun Metamask'ı doğru şekilde ayarlamak gibi adımların üzerinden geçmeyeceğini lütfen unutmayın. Bu kılavuzda üç bölüm bulunmaktadır: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator -- A video guide showing the steps to delegate in the Graph Network UI +- Graph Network'te tokenları stake etmenin riskleri +- Delegatör olarak beklenen getiriler nasıl hesaplanır +- Graph Network kullanıcı arayüzünde stake etme adımlarını gösteren bir video kılavuzu ve rehber makale -## Delegation Risks +## Delegasyon Riskleri -Listed below are the main risks of being a Delegator in the protocol. +Protokolde delegatör olmanın ana riskleri aşağıda listelenmiştir. -### The delegation tax +### Delegasyon Vergisi -Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. +Delegatörler kötü davranış nedeniyle cezalandırılamaz, ancak ağın bütünlüğüne zarar verebilecek kötü kararlar vermeyi caydırmak için delegatörler üzerinde bir vergi vardır. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +Her stake ettiğinizde sizden %0,5 ücret alınacağını anlamak önemlidir. Bu, 1000 GRT'yi stake ediyorsanız, otomatik olarak 5 GRT yakacağınız anlamına gelir. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Bu, güvende olmak için, bir delegatörün bir indeksleyiciye stake ederek geri dönüşlerinin ne olacağını hesaplaması gerektiği anlamına gelir. Örneğin, bir delegatör, delegasyonuna ilişkin %0,5 vergiyi geri kazanmasının kaç gün süreceğini hesaplayabilir. -### The delegation unbonding period +### Delegasyonun Sonlandırılma Süresi -Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. +Bir delegatör, stak ettiği tokenları geri almak istediğinde, tokenları 28 günlük bir soğuma süresine tabidir. Bu, 28 gün boyunca tokenlarını transfer edemeyecekleri veya herhangi bir ödül kazanamayacakları anlamına gelir. -One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT. +Dikkate alınması gereken bir şey de akıllıca bir indeksleyici seçmektir. Güvenilir olmayan veya iyi bir iş çıkarmayan bir indeksleyici seçerseniz, stake ettiğiniz token’ları geri almak istersiniz. Bu da GRT'yi yakmak kadar kötü olabilecek olan bir durum gibidir, yani ödül kazanmak için birçok fırsatı kaybedeceğiniz anlamına gelir.
    - ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + ![Delegation unbonding](/img/Delegation-Unbonding.png) _Delegasyon Kullanıcı Arayüzündeki %0,5 ücreti ve 28 günlük + ayrılma süresini bir yere not edin._
    -### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Delegatörler İçin Adil Bir Ödül Ödemesi Olan Güvenilir Bir İndeksleyici Seçme -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Bu anlaşılması gereken önemli bir kısımdır. Öncelikle, Delegasyon Parametreleri olan çok önemli üç değeri tartışalım. -Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards. +İndeksleme Ödül Kesintisi - İndeksleme ödül kesintisi, ödüllerin indeksleyicinin kendisi için tutacağı kısmıdır. Bu, %100'e ayarlanırsa, delegatörn olarak 0 indeksleme ödülü alacağınız anlamına gelir. Kullanıcı arayüzünde %80 görürseniz, bu bir delegatör olarak %20 alacağınız anlamına gelir. Önemli bir not - ağın başlangıcında, indeksleme ödülleri, ödüllerin çoğuna sahip olacaktır.
    - ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *En iyi indeksleyici, delegatörlere ödüllerin %90'ını veriyor. + Ortadaki, delegatörlere %20 veriyor. Alttaki, delegatörlere ~%83 veriyor.*
    -- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Sorgu Ücreti Kesintisi - Bu, tam olarak indeksleme ödül kesintisi gibi çalışır. Ancak bu, özellikle indeksleyicinin topladığı sorgu ücretlerinin getirileri içindir. Ağın başlangıcında, sorgu ücretlerinden elde edilen getirilerin, indeksleme ödülüne kıyasla çok küçük olacağı belirtilmelidir. Ağdaki sorgu ücretlerinin ne zaman daha belirgin olmaya başlayacağını belirlemek için ağa dikkat edilmesi önerilir. -As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. +Gördüğünüz gibi, doğru indeksleyiciyi seçerken üzerinde düşünülmesi gereken çok şey var. Bu nedenle, delegatörleri tutarlı bir şekilde ödüllendirmek için en iyi sosyal itibara ve teknik itibara sahip indeksleyicilerin kim olduğunu belirlemek için Graph Discord üzerinde vakit geçirmenizi önemle tavsiye ederiz. İndeksleyicilerin çoğu Discord'da çok aktiftir ve sorularınızı yanıtlamaktan mutluluk duyacaktır. Birçoğu test ağında aylardır indeksleme yapıyor ve delagatörlerin ağın sağlığını ve başarısını iyileştirdiği için iyi bir getiri elde etmesine yardımcı olmak için ellerinden gelenin en iyisini yapıyor. -### Calculating Delegators expected return +### Delegatörlerin Tahmini Getirisini Hesaplama -A Delegator has to consider a lot of factors when determining the return. These include: +Bir delegatör, getiriyi belirlerken birçok faktörü dikkate almak zorundadır. Bunlar: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- Teknik bir delegatör, indeksleyicinin kendileri için mevcut olan stake edilmiş tokenları kullanma becerisine de bakabilir. Bir indeksleyici, mevcut tüm tokenları tahsis etmiyorsa, kendileri veya delegatörleri için olabilecekleri maksimum kârı elde etmiyorlardır. +- Şu anda ağda bulunan bir indeksleyici, 1 ile 28 gün arasında herhangi bir zamanda bir tahsisi kapatmayı ve ödülleri toplamayı seçebilir. Bu nedenle, bir indeksleyicinin henüz toplamadığı birçok ödülü olabilir ve bu nedenle toplam ödülleri düşük olabilir. Bu ilk günlerde dikkate alınmalıdır. -### Considering the query fee cut and indexing fee cut +### Sorgu Ücreti ve İndeksleme Ücreti Kesintilerini Dikkate Almak -As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: +Yukarıdaki bölümlerde açıklandığı gibi, sorgu ücreti kesintisi ve indeksleme ücreti kesintilerini ayarlama konusunda şeffaf ve dürüst bir indeksleyici seçmelisiniz. Bir delegatör, ne kadar bir zaman arabelleğine sahip olduklarını görmek için parametrelerin soğuma süresine de bakmalıdır. Bu yapıldıktan sonra, delegatörlerin aldığı ödüllerin miktarını hesaplamak oldukça basittir: -![Delegation Image 3](/img/Delegation-Reward-Formula.png) +![3. Delegasyon Görseli](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### İndeksleyicinin Stake Havuzunu Dikkate Almak -Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: +Bir delegatörün göz önünde bulundurması gereken bir başka şey de, stake havuzunun ne kadarına sahip olduklarıdır. Tüm stake ödülleri, delegatörün havuza yatırdığı miktara göre belirlenen havuzun basit bir şekilde yeniden dengelenmesiyle eşit olarak paylaşılır. Bu, delagatöre havuzdan bir pay verir: -![Share formula](/img/Share-Forumla.png) +![Formül paylaş](/img/Share-Forumla.png) -Using this formula, we can see that it is actually possible for an Indexer who is offering only 20% to Delegators, to actually be giving Delegators an even better reward than an Indexer who is giving 90% to Delegators. +Bu formülü kullanarak, delegatörlere sadece %20 teklif eden bir indeksleyicinin, delegatörlere %90 veren bir indeksleyiciden daha iyi bir ödül vermesinin gerçekten mümkün olduğunu görebiliriz. -A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. +Bu nedenle bir delegatör, delegatörlere %20 sunan indeksleyicinin daha iyi bir getiri sunduğunu belirlemek için minik bir matematik yapabilir. -### Considering the delegation capacity +### Delegasyon Kapasitesini Dikkate Almak -Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. +Dikkate alınması gereken bir diğer husus da delegasyon kapasitesidir. Şu anda delegasyon oranı 16 olarak ayarlanmıştır. Bu, bir indeksleyici 1.000.000 GRT stake ettiyse, delegasyon kapasitesinin protokolde kullanabilecekleri 16.000.000 GRT stake edilmiş token olduğu anlamına gelir. Bu miktarı aşan herhangi bir stake edilmiş token, tüm delegatör ödüllerini azaltacaktır. -Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. +Bir indeksleyicinin kendisine atanmış 100.000.000 GRT'ye sahip olduğunu ve kapasitelerinin yalnızca 16.000.000 GRT olduğunu hayal edin. Bu, etkili bir şekilde, 84.000.000 GRT token’ının token kazanmak için kullanılmadığı anlamına gelir. Ve tüm delegatör ve indeksleyiciler, olabileceğinden çok daha az ödül kazanacaktır. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Bu nedenle, bir delegatör, her zaman bir indeksleyicinin delegasyon kapasitesini göz önünde bulundurmalı ve karar verme süreçlerinde bunu hesaba katmalıdır. -## Delegator FAQs and Bugs +## Delegatör Sıkça Sorulan Soruları -### MetaMask "Pending Transaction" Bug +### MetaMask "Bekleyen İşlem" Hatası -**When I try to delegate my transaction in MetaMask appears as "Pending" or "Queued" for longer than expected. What should I do?** +**İşlemimi Metamask'te gerçekleştirirken, beklenenden daha uzun süre "Beklemede" veya "Sıraya Alındı" olarak görünüyor. Ne yapmalıyım?** -At times, attempts to delegate to indexers via MetaMask can fail and result in prolonged periods of "Pending" or "Queued" transaction attempts. For example, a user may attempt to delegate with an insufficient gas fee relative to the current prices, resulting in the transaction attempt displaying as "Pending" in their MetaMask wallet for 15+ minutes. When this occurs, subsequent transactions can be attempted by a user, but these will not be processed until the initial transaction is mined, as transactions for an address must be processed in order. In such cases, these transactions can be cancelled in MetaMask, but the transactions attempts will accrue gas fees without any guarantee that subsequent attempts will be successful. A simpler resolution to this bug is restarting the browsesr (e.g., using "abort:restart" in the address bar), which will cancel all previous attempts without gas being subtracted from the wallet. Several users that have encountered this issue and have reported successful transactions after restarting their browser and attempting to delegate. +Zaman zaman, Metamask aracılığıyla indeksleyicilere stake girişimleri başarısız olabilir ve uzun süreli "Beklemede" veya "Sıraya Alınmış" işlem uyarılarıyla sonuçlanabilir. Örneğin, bir kullanıcı, mevcut fiyatlara göre yetersiz bir gaz ücreti ile stake işlemi girişiminde bulunabilir ve bu da işlem girişiminin Metamask cüzdanında 15+ dakika boyunca "Beklemede" olarak görüntülenmesine neden olabilir. Bu gerçekleştiğinde, bir kullanıcı tarafından sonraki işlemler denenebilir, ancak bunlar, bir adres için işlemlerin sırayla işlenmesi gerektiğinden, ilk işlem çıkarılıncaya kadar işlenmez. Bu gibi durumlarda Metanask'te bu işlemler iptal edilebilir ancak işlem denemeleri, sonraki girişimlerin başarılı olacağına dair herhangi bir garanti olmaksızın gas ücreti tahakkuk edecektir. Bu hatanın daha basit bir çözümü, tarayıcıları yeniden başlatmaktır (örneğin, adres çubuğundan "iptal: yeniden başlat'ı" kullanarak). -## Video guide for the network UI +## Ağ kullanıcı arayüzü için video kılavuzu -This guide provides a full review of this document, and how to consider everything in this document while interacting with the UI. +Bu kılavuz, bu belgenin tam bir incelemesini ve kullanıcı arabirimiyle etkileşim kurarken bu belgedeki her şeyin nasıl dikkate alınacağını sağlar. diff --git a/website/pages/tr/network/developing.mdx b/website/pages/tr/network/developing.mdx index 9c543348259d..cd3f95a246af 100644 --- a/website/pages/tr/network/developing.mdx +++ b/website/pages/tr/network/developing.mdx @@ -1,53 +1,53 @@ --- -title: Developing +title: Geliştirme --- -Developers are the demand side of The Graph ecosystem. Developers build subgraphs and publish them to The Graph Network. Then, they query live subgraphs with GraphQL in order to power their applications. +Geliştiriciler, Graph ekosisteminin üreten tarafıdır. Geliştiriciler subgraph'ler inşa eder ve bunları Graph ağında yayınlar. Ardından, uygulamalarını güçlendirmek için aktif subgraph'leri GraphQL ile sorgularlar. -## Subgraph Lifecycle +## Subgraph Yaşam Döngüsü -Subgraphs deployed to the network have a defined lifecycle. +Ağa deploy edilen subgraph'ler tanımlanmış bir yaşam döngüsüne sahiptir. -### Build locally +### Yerel olarak geliştirme -As with all subgraph development, it starts with local development and testing. Developers can use the same local setup whether they are building for The Graph Network, the hosted service or a local Graph Node, leveraging `graph-cli` and `graph-ts` to build their subgraph. Developers are encouraged to use tools such as [Matchstick](https://github.com/LimeChain/matchstick) for unit testing to improve the robustness of their subgraphs. +Tüm subgraph geliştirmelerinde olduğu gibi, süreç yerel geliştirme ve test etme ile başlar. Geliştiriciler, subgraph'lerini oluşturmak için `graph-cli` ve `graph-ts`'den yararlanarak Graph Network, barındırılan hizmet veya yerel bir Graph node'u için aynı yerel kurulumu kullanabilirler. Geliştiricilerin, subgraph'lerinin sağlamlığını artırmak için birim testi için [Matchstick](https://github.com/LimeChain/matchstick) gibi araçları kullanmaları önerilir. -> There are certain constraints on The Graph Network, in terms of feature and network support. Only subgraphs on [supported networks](/developing/supported-networks) will earn indexing rewards, and subgraphs which fetch data from IPFS are also not eligible. +> Graph Network'te özellik ve ağ desteği açısından belirli kısıtlamalar vardır. Yalnızca [desteklenen ağlar](/developing/supported-networks) üzerindeki subgraph'ler indeksleme ödülleri kazanır ve IPFS'den veri getiren subgraph'ler uygun değildir. -### Deploy to the Subgraph Studio +### Subgraph Stüdyo'ya dağıtma -Once defined, the subgraph can be built and deployed to the [Subgraph Studio](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/). The Subgraph Studio is a sandbox environment which will index the deployed subgraph and make it available for rate-limited development and testing. This gives developers an opportunity to verify that their subgraph does not encounter any indexing errors, and works as expected. +Tanımlandıktan sonra subgraph inşa edilebilir ve [Subgraph Stüdyo](https://thegraph.com/docs/en/deploying/subgraph-studio-faqs/)'ya deploy edilebilir. Subgraph Studio, deploy edilen subgraph'i indeksleyip belirli oranda geliştirme ve test için uygun hale getirecek sanal bir ortamıdır. Bu, geliştiricilere subgraph'lerinin herhangi bir indeksleme hatasıyla karşılaşmadığını ve beklendiği gibi çalıştığını doğrulama fırsatı verir. -### Publish to the Network +### Ağda Yayınlama -When the developer is happy with their subgraph, they can publish it to The Graph Network. This is an on-chain action, which registers the subgraph so that it is discoverable by Indexers. Published subgraphs have a corresponding NFT, which is then easily transferable. The published subgraph has associated metadata, which provides other network participants with useful context and information. +Geliştirici subgraph'inden memnun kaldığında, onu Graph ağında yayınlayabilir. Bu, indeksleyiciler tarafından keşfedilebilmesi için subgraph'i kaydeden zincir üstü bir eylemdir. Yayınlanan subgraph'lere karşılık gelen ve kolayca transfer edilebilen bir NFT mevcuttur. Yayınlanan subgraph, diğer ağ katılımcılarına yararlı bağlam ve bilgiler sağlayan ilişkili meta verilere sahiptir. -### Signal to Encourage Indexing +### İndekslemeyi Teşvik Eden Sinyal -Published subgraphs are unlikely to be picked up by Indexers without the addition of signal. Signal is locked GRT associated with a given subgraph, which indicates to Indexers that a given subgraph will receive query volume, and also contributes to the indexing rewards available for processing it. Subgraph developers will generally add signal to their subgraph, in order to encourage indexing. Third party Curators may also signal on a given subgraph, if they deem the subgraph likely to drive query volume. +Yayınlanan subgraph'lerin, sinyal eklenmeden indeksleyiciler tarafından dikkate alınması olası değildir. Sinyal, belirli bir subgraph'le ilişkili kilitli GRT'dir; bu, indeksleyicilere belirli bir subgraph'in sorgu hacmi alacağını belirtir ve ayrıca onu işlemek için mevcut indeksleme ödüllerine katkıda bulunur. Subgraph geliştiricileri, indekslemeyi teşvik etmek için genellikle subgraph'lerine sinyal ekler. Diğer yandan küratörler, bir subgraph'in sorgu hacminin artıracağını düşünürlerse, belirli bir subgraph'te de sinyal verebilirler. -### Querying & Application Development +### Sorgulama & Uygulama Geliştirme -Once a subgraph has been processed by Indexers and is available for querying, developers can start to use the subgraph in their applications. Developers query subgraphs via a gateway, which forwards their queries to an Indexer who has processed the subgraph, paying query fees in GRT. +Bir subgraph indeksleyiciler tarafından işlenip sorgulama için uygun hale geldikten sonra, geliştiriciler subgraph'i uygulamalarında kullanmaya başlayabilir. Geliştiriciler, sorgularını GRT cinsinden sorgu ücretleri ödeyerek subgraph'i işleyen indeksleyiciye ileten bir ağ geçidi aracılığıyla subgraph'leri sorgular. -In order to make queries, developers must generate an API key, which can be done in the Subgraph Studio. This API key must be funded with GRT, in order to pay query fees. Developers can set a maximum query fee, in order to control their costs, and limit their API key to a given subgraph or origin domain. The Subgraph Studio provides developers with data on their API key usage over time. +Sorgu için geliştiricilerin, Subgraph Stüdyo'da yapılabilecek bir API anahtarı oluşturması gerekir. Sorgu ücretlerinin ödenmesi için bu API anahtarının GRT ile finanse edilmesi gerekir. Geliştiriciler, maliyetlerini kontrol etmek için bir maksimum sorgu ücreti belirleyebilir ve API anahtarlarını belirli bir subgraph veya kaynak alan adı ile sınırlayabilir. Subgraph Stüdyo, geliştiricilere zaman içinde API anahtarı kullanımlarıyla ilgili veriler sağlar. -Developers are also able to express an Indexer preference to the gateway, for example preferring Indexers whose query response is faster, or whose data is most up to date. These controls are set in the Subgraph Studio. +Geliştiriciler ayrıca ağ geçidine bir indeksleyici tercihlerini iletebilirler. Örneğin, sorgu yanıtı daha hızlı olan veya verileri en güncel olan indeksleyicileri tercih edebilirler. Bu kontroller ise Subgraph Stüdyo'da ayarlanır. -### Updating Subgraphs +### Subgraphları Güncelleme -After a time a subgraph developer may want to update their subgraph, perhaps fixing a bug or adding new functionality. The subgraph developer may deploy new version(s) of their subgraph to the Subgraph Studio for rate-limited development and testing. +Bir süre sonra bir subgraph geliştiricisi, belki bir hatayı düzelterek veya yeni işlevler ekleyerek subgraph'ini güncellemek isteyebilir. Subgraph geliştiricisi, belirli oranda geliştirme ve test için kendi subgraph'inin yeni sürüm/sürümlerini Subgraph Stüdyo'ya deploy edebilir. -Once the Subgraph Developer is ready to update, they can initiate a transaction to point their subgraph at the new version. Updating the subgraph migrates any signal to the new version (assuming the user who applied the signal selected "auto-migrate"), which also incurs a migration tax. This signal migration should prompt Indexers to start indexing the new version of the subgraph, so it should soon become available for querying. +Subgraph Geliştiricisi yükseltmeye hazır olduğunda, subgraphlarını yeni sürüme yönlendirmek için bir işlem başlatabilir. Subgraph'ın güncellenmesi, her sinyali yeni sürüme geçirir (sinyali uygulayan kullanıcının "otomatik geçiş" seçeneğini seçtiğini varsayarsak) ve bu da bir geçiş kesintisine neden olur. Bu sinyal geçişi, İndeksleyicilerin subgraph'ı yeni sürümünü indekslemeye başlamasını sağlamalıdır, böylece yakında sorgulama için kullanılabilir hale gelecektir. -### Deprecating Subgraphs +### Subgraphları Kullanımdan Kaldırma -At some point a developer may decide that they no longer need a published subgraph. At that point they may deprecate the subgraph, which returns any signalled GRT to the Curators. +Bir noktada bir geliştirici, artık yayınlanmış bir subgraph'e ihtiyaç duymadığına karar verebilir. Bu noktada, herhangi bir sinyal verilmiş GRT'yi küratörlere döndüren subgraph'i kullanımdan kaldırabilirler. -### Diverse Developer Roles +### Çeşitli Geliştirici Rolleri -Some developers will engage with the full subgraph lifecycle on the network, publishing, querying and iterating on their own subgraphs. Some may be focused on subgraph development, building open APIs which others can build on. Some may be application focused, querying subgraphs deployed by others. +Bazı geliştiriciler, kendi subgraph'lerini yayınlayarak, sorgulayarak ve yineleyerek ağdaki tam subgraph yaşam döngüsüyle ilgileneceklerdir. Bazıları, diğerlerinin üzerine inşa edebileceği açık API'lar oluşturarak subgraph geliştirmeye odaklanabilir. Bazıları uygulama odaklı olabilir, başkaları tarafından deploy edilen subgraph'leri sorgulayabilir. -### Developers and Network Economics +### Geliştiriciler ve Ağ Ekonomisi -Developers are a key economic actor in the network, locking up GRT in order to encourage indexing, and crucially querying subgraphs, which is the network's primary value exchange. Subgraph developers also burn GRT whenever a subgraph is updated. +Geliştiriciler ağda önemli bir ekonomik aktördür, indekslemeyi teşvik amacıyla GRT kilitlerler ve en önemlisi ağın birincil değer değişimi olan subgraphları sorgularlar. Subgraph geliştiricileri ayrıca bir subgraph güncellendiğinde GRT yakarlar. diff --git a/website/pages/tr/network/explorer.mdx b/website/pages/tr/network/explorer.mdx index b3a549900b83..cea8c4afd564 100644 --- a/website/pages/tr/network/explorer.mdx +++ b/website/pages/tr/network/explorer.mdx @@ -1,203 +1,203 @@ --- -title: Graph Explorer +title: Graph Gezgini --- -Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): +Graph Gezgini'ne ya da bizim deyimimizle subgraphlar ve ağ verileri dünyasına açılan merkeziyetsiz portalınıza hoş geldiniz. 👩🏽‍🚀 Graph Gezgini, diğer subgraph geliştiricileri, merkeziyetsiz uygulama geliştiricileri, Küratörler, İndeksleyiciler ve Delegatörlerle etkileşime girebileceğiniz birden fazla bölümden oluşur. Graph Gezgini'ne genel bir bakış için aşağıdaki videoya göz atın (veya aşağıdan okumaya devam edin): -## Subgraphs +## Subgraph'ler -First things first, if you just finished deploying and publishing your subgraph in the Subgraph Studio, the Subgraphs tab on the top of the navigation bar is the place to view your own finished subgraphs (and the subgraphs of others) on the decentralized network. Here, you’ll be able to find the exact subgraph you’re looking for based on the date created, signal amount, or name. +Her şeyden önce, Subgraph Stüdyo'da subgraph'ınızı dağıtmayı ve yayınlamayı yeni tamamladıysanız, gezinme çubuğunun üst kısmındaki Subgraphlar sekmesi, merkeziyetsiz ağda kendinizin tamamlanmış subgraphlar'ını (ve başkalarının subgraphlar'ını) görüntüleyebileceğiniz yerdir. Burada, oluşturulma tarihine, sinyal miktarına veya adına göre tam olarak aradığınız subgraph'ı bulabileceksiniz. -![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) +![Gezgin Gürüntüsü 1](/img/Subgraphs-Explorer-Landing.png) -When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. +Bir subgraph tıkladığınızda, test alanında sorguları test edebilecek ve bilinçli kararlar vermek için ağ ayrıntılarından yararlanabileceksiniz. Ayrıca, kendi subgraph'ınız veya başkalarının subgraphlar'ında GRT sinyali vererek indeksleyicilerin bunun önemi ve kalitesinden haberdar olmasını sağlayabileceksiniz. Bu oldukça önemlidir, çünkü bir subgraph'ta sinyal vermek, pnun indekslenmesini teşvik eder, bu da onların nihayetinde sorguları sunmak için ağda görünmeleri anlamına gelir. -![Explorer Image 2](/img/Subgraph-Details.png) +![Gezgin Gürüntüsü 2](/img/Subgraph-Details.png) -On each subgraph’s dedicated page, several details are surfaced. These include: +Her bir subgraph'ın özel sayfasında, çeşitli ayrıntılar ortaya çıkmaktadır. Bunlar şunları içerir: -- Signal/Un-signal on subgraphs -- View more details such as charts, current deployment ID, and other metadata -- Switch versions to explore past iterations of the subgraph -- Query subgraphs via GraphQL -- Test subgraphs in the playground -- View the Indexers that are indexing on a certain subgraph -- Subgraph stats (allocations, Curators, etc) -- View the entity who published the subgraph +- Subgraphlar üzerinde sinyal/sinyalsizlik +- Grafikler, mevcut dağıtım kimliği ve diğer üst veri gibi daha fazla ayrıntı görüntüleme +- Subgraph'ın geçmiş yinelemelerini keşfetmek için sürümleri değiştirme +- GraphQL aracılığıyla subgraphlar'ı sorgulama +- Test alanında(playground) subgraphlar'ı test etme +- Belirli bir subgraph üzerinde indeksleme yapan İndeksleyicileri görüntüleme +- Subgraph istatistikleri (tahsisler, Küratörler, vb.) +- Subgraph'ı yayınlayan varlığı görüntüleme -![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) +![Gezgin Gürüntüsü 3](/img/Explorer-Signal-Unsignal.png) -## Participants +## Katılımcılar -Within this tab, you’ll get a bird’s eye view of all the people that are participating in the network activities, such as Indexers, Delegators, and Curators. Below, we’ll go into an in-depth review of what each tab means for you. +Bu kısımda İndeksleyiciler, Delegatörler ve Küratörler gibi ağ faaliyetlerine katılan tüm kişilerin kuş bakışı bir görüş elde edeceksiniz. Aşağıda, her bir kısmın sizin için ne anlama geldiğini derinlemesine inceleyeceğiz. -### 1. Indexers +### 1. İndeksleyiciler -![Explorer Image 4](/img/Indexer-Pane.png) +![Gezgin Gürüntüsü 4](/img/Indexer-Pane.png) -Let’s start with the Indexers. Indexers are the backbone of the protocol, being the ones that stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. In the Indexers table, you’ll be able to see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made off of query fees and indexing rewards. Deep dives below: +İndeksleyiciler ile başlayalım. İndeksleyiciler protokolün bel kemiğidir, subgraphlar'a stake eden, indeksleyen ve subgraphlar'ı kullanan herkese sorgu sunan kişilerdir. İndeksleyiciler tablosunda, bir İndeksleyicinin temsilci parametrelerini, hisselerini, her bir subgraph'a ne kadar stake ettiklerini, sorgu ücretlerini ve indeksleme ödüllerinden ne kadar gelir elde ettiklerini görebileceksiniz. Derinlemesine incelemeler aşağıda: -- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators -- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards -- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters -- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior -- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed -- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing -- Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated -- Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. -- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time -- Indexer Rewards - this is the total indexer rewards earned by the Indexer and their Delegators over all time. Indexer rewards are paid through GRT issuance. +- Sorgu Ücreti Kesintisi - İndeksleyici'nin Delegatörlerle bölüşürken tuttuğu sorgu ücreti indirimlerinin %'si +- Efektif Ödül Kesintisi - delegasyon havuzuna uygulanan indeksleme ödülü kesintisi. Eğer negatifse, indeksleyicinin ödüllerinin bir kısmını verdiği anlamına gelir. Pozitifse, İndeksleyicinin ödüllerinin bir kısmını elinde tuttuğu anlamına gelir +- Kalan Bekleme Süresi - İndeksleyici'nin yukarıdaki delegatör parametrelerini değiştirebilmesi için kalan süre. Bekleme süreleri, İndeksleyiciler tarafından delegatör parametrelerini güncellediklerinde ayarlanır +- Depozito - Bu, İndeksleyici'nin kötü niyetli veya yanlış davranışı sonucunda kesilebilecek yatırılmış payıdır +- Delege edilmiş - İndeksleyici tarafından tahsis edilebilen ancak kesilemeyen Delegatörler'in payları +- Tahsis edilmiş - İndeksleyiciler'in indeksledikleri subgraphlar'a aktif olarak ayırdıkları paydır +- Mevcut Delegasyon Kapasitesi - İndekslendiricilerin aşırı delege edilmiş duruma gelmeden önce alabilecekleri delege edilebilecek pay miktarı +- Maksimum Delegasyon Kapasitesi - Endekserin verimli bir şekilde kabul edebileceği en yüksek delegasyon miktarıdır. Fazla delege edilmiş pay, tahsisler veya ödül hesaplamaları için kullanılamaz. +- Sorgu Ücretleri - bu, son kullanıcıların bir İndeksleyiciden gelen sorgular için tüm zaman içinde ödediği toplam ücretlerdir +- İndeksleyici Ödülleri - bu, İndeksleyici ve Delegatörler tarafından tüm zaman boyunca kazanılan toplam indeksleyici ödülleridir. İndeksleyici ödülleri GRT ihracı yoluyla ödenir. -Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. Indexing parameters are set by clicking on the right-hand side of the table, or by going into an Indexer’s profile and clicking the “Delegate” button. +İndeksleyiciler hem sorgu ücretleri hem de indeksleme ödülleri kazanabilir. İşlevsel olarak bu, ağ katılımcıları GRT'yi bir İndeksleyiciye delege ettiğinde gerçekleşir. Bu, İndeksleyicilerin İndeksleyici parametrelerine bağlı olarak sorgu ücretleri ve ödüller almasına olanak sağlar. İndeksleme parametreleri, tablonun sağ tarafına tıklanarak veya bir İndeksleyicinin profiline girip "Delegate" düğmesine tıklanarak ayarlanır. -To learn more about how to become an Indexer, you can take a look at the [official documentation](/network/indexing) or [The Graph Academy Indexer guides.](https://thegraph.academy/delegators/choosing-indexers/) +Nasıl İndeksleyici olunacağı hakkında daha fazla bilgi edinmek için [resmi dökümantasyona](/network/indexing) veya [Graph Akademi İndeksleyici kılavuzlarına](https://thegraph.academy/delegators/choosing-indexers/) göz atabilirsiniz. -![Indexing details pane](/img/Indexing-Details-Pane.png) +![İndeksleme ayrıntıları bölmesi](/img/Indexing-Details-Pane.png) -### 2. Curators +### 2. Küratörler -Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially attractive subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. +Küratörler, hangi subgraphlar'ın en yüksek kalitede olduğunu belirlemek için subgraphlar'ı analiz eder. Bir Küratör potansiyel olarak cazip bir subgraph bulduğunda, bağlanma eğrisi üzerinde sinyal vererek onu kürate edebilir. Küratörler bunu yaparak, İndeksleyicilere hangi subgraphlar'ın yüksek kaliteli olduğunu ve indekslenmesi gerektiğini bildirir. -Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. By depositing GRT, Curators mint curation shares of a subgraph. As a result, Curators are eligible to earn a portion of the query fees that the subgraph they have signaled on generates. The bonding curve incentivizes Curators to curate the highest quality data sources. The Curator table in this section will allow you to see: +Küratörler topluluk üyeleri, veri kullanıcıları ve hatta GRT tokenlerini bir bağlanma eğrisine yatırarak kendi subgraphlar'ı hakkında sinyal veren subgraph geliştiricileri olabilir. Küratörler GRT yatırarak bir subgraph'ın kürasyon paylarını basarlar. Sonuç olarak Küratörler, sinyal verdikleri subgraph'ın ürettiği sorgu ücretlerinin bir kısmını almaya hak kazanırlar. Bağlanma eğrisi, Küratörleri en yüksek kaliteli veri kaynaklarının küratörlüğünü yapmaya teşvik eder. Bu bölümdeki Küratör tablosu şunları görmenizi sağlayacaktır: -- The date the Curator started curating -- The number of GRT that was deposited -- The number of shares a Curator owns +- Küratör'ün küratörlüğe başladığı tarih +- Yatırılan GRT sayısı +- Küratör'ün sahip olduğu hisse sayısı -![Explorer Image 6](/img/Curation-Overview.png) +![Gezgin Gürüntüsü 6](/img/Curation-Overview.png) -If you want to learn more about the Curator role, you can do so by visiting the following links of [The Graph Academy](https://thegraph.academy/curators/) or [official documentation.](/network/curating) +Küratör rolü hakkında daha fazla bilgi edinmek istiyorsanız, bunu [Graph Akademi](https://thegraph.academy/curators/) 'nin aşağıdaki bağlantılarını veya [resmi dökümantasyonunu](/network/curating) ziyaret ederek yapabilirsiniz. -### 3. Delegators +### 3. Delegatörler -Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers seek to attract Delegators by offering them a portion of the indexing rewards and query fees that they earn. +Delegatörler, Graph Ağı'nın güvenliğinin ve merkeziyetsizliğinin korunmasında kilit bir rol oynar. GRT tokenlerini bir veya birden fazla indeksleyiciye delege ederek (yani "stake ederek") ağa katılırlar. TDelegatörler olmadan, İndeksleyicilerin önemli ödüller ve ücretler kazanma olasılığı daha düşüktür. Bu nedenle İndeksleyiciler, kazandıkları indeksleme ödüllerinin ve sorgu ücretlerinin bir kısmını Delegatörlere sunarak onları kendilerine delege etmeye teşvik etmeye çalışırlar. -Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! +Delegatörler ise İndeksleyicileri sırasıyla geçmiş performans, indeksleme ödül oranları ve sorgu ücreti kesintileri gibi bir dizi farklı faktöre göre seçerler. Topluluk içinde sahip oldukları itibar da bu konuda bir faktör olabilir! Seçilen indeksleyicilerle [Graph Discord sunucusu](https://discord.gg/graphprotocol) veya [Graph Forum](https://forum.thegraph.com/)'u üzerinden bağlantı kurmanız önerilir! -![Explorer Image 7](/img/Delegation-Overview.png) +![Gezgin Gürüntüsü 7](/img/Delegation-Overview.png) -The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: +Delegatörler tablosu, topluluktaki aktif Delegatörleri ve aşağıdaki gibi metrikleri görmenizi sağlayacaktır: -- The number of Indexers a Delegator is delegating towards -- A Delegator’s original delegation -- The rewards they have accumulated but have not withdrawn from the protocol -- The realized rewards they withdrew from the protocol -- Total amount of GRT they have currently in the protocol -- The date they last delegated at +- Bir Delegatör'ün delege ettiği İndeksleyici sayısı +- Bir Delegatör'ün orijinal delegasyonu +- Biriktirdikleri ancak protokolden çekmedikleri ödüller +- Protokolden çekildiler gerçekleşmiş ödüller +- Şu anda protokolde bulunan sahip oldukları toplam GRT miktarı +- En son delegasyon aldıkları tarih -If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). +Nasıl Delegatör olunacağı hakkında daha fazla bilgi edinmek istiyorsanız, başka yere bakmanıza gerek yok! Tek yapmanız gereken [resmi dökümantasyona](/network/delegating) veya [Graph Akademi](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers)'ye bakmak. -## Network +## Ağ -In the Network section, you will see global KPIs as well as the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. +Ağ bölümünde, küresel APG'lerin yanı sıra her bir dönem bazına geçme ve ağ metriklerini daha ayrıntılı olarak analiz etme olanaklarını göreceksiniz. Bu ayrıntılar size ağın zaman içinde nasıl performans gösterdiğine dair bir fikir verecektir. -### Activity +### Aktivite -The activity section has all the current network metrics as well as some cumulative metrics over time. Here you can see things like: +Aktivite bölümünde tüm mevcut ağ ölçümlerinin yanı sıra zaman içindeki bazı kümülatif metrikler de yer almaktadır. Burada aşağıdaki gibi şeyleri görebilirsiniz: -- The current total network stake -- The stake split between the Indexers and their Delegators -- Total supply, minted, and burned GRT since the network inception -- Total Indexing rewards since the inception of the protocol -- Protocol parameters such as curation reward, inflation rate, and more -- Current epoch rewards and fees +- Mevcut toplam ağ payı +- İndeksleyiciler ve Delegatörler arasındaki pay paylaşımı +- Ağın başlangıcından bu yana toplam arz, basılan ve yakılan GRT +- Protokolün başlangıcından bu yana toplam İndeksleme ödülleri +- Kürasyon ödülü, enflasyon oranı ve daha fazlası gibi protokol parametreleri +- Mevcut dönem ödülleri ve ücretleri -A few key details that are worth mentioning: +Bahsetmeye değer birkaç önemli ayrıntı: -- **Query fees represent the fees generated by the consumers**, and they can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. -- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once the Indexers close their allocations towards the subgraphs they’ve been indexing. Thus the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). +- **Sorgu ücretleri kullanıcılar tarafından üretilen ücretleri temsil eder** ve subgraphlara yönelik tahsisleri kapatıldıktan ve sundukları veriler tüketiciler tarafından doğrulandıktan sonra en az 7 dönemlik bir sürenin ardından (aşağıya bakınız) İndeksleyiciler tarafından talep edilebilir (veya edilemez). +- **İndeksleme ödülleri, İndeksleyicilerin dönem boyunca ağ ihracından talep ettikleri ödül miktarını temsil eder.** Protokol ihracı sabit olmasına rağmen, ödüller yalnızca İndeksleyiciler indeksledikleri subgraphlara yönelik tahsislerini kapattıklarında basılır. Bu nedenle, her dönem başına ödül sayısı değişir (yani, bazı dönemler boyunca, İndeksleyiciler günlerce açık olan tahsisatları toplu olarak kapatmış olabilir). -![Explorer Image 8](/img/Network-Stats.png) +![Gezgin Gürüntüsü 8](/img/Network-Stats.png) -### Epochs +### Dönemler -In the Epochs section, you can analyze on a per-epoch basis, metrics such as: +Dönemler bölümünde, aşağıdaki gibi metrikleri dönem bazında analiz edebilirsiniz: -- Epoch start or end block -- Query fees generated and indexing rewards collected during a specific epoch -- Epoch status, which refers to the query fee collection and distribution and can have different states: - - The active epoch is the one in which Indexers are currently allocating stake and collecting query fees - - The settling epochs are the ones in which the state channels are being settled. This means that the Indexers are subject to slashing if the consumers open disputes against them. - - The distributing epochs are the epochs in which the state channels for the epochs are being settled and Indexers can claim their query fee rebates. - - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers, thus being finalized. +- Dönem başlangıç veya bitiş bloğu +- Belirli bir dönem boyunca oluşturulan sorgu ücretleri ve toplanan indeksleme ödülleri +- Sorgu ücreti toplama ve dağıtımını ifade eden ve farklı durumlara sahip olabilen dönem durumu: + - Aktif dönem, İndeksleyicilerin halihazırda pay tahsis ettiği ve sorgu ücretlerini topladığı dönemdir + - Uzlaşma dönemleri, bildirim kanallarının uzlaştırıldığı dönemlerdir. Bu, kullanıcıların kendilerine karşı itirazda bulunması halinde İndeksleyicilerin kesintiye maruz kalacağı anlamına gelir. + - Dağıtım dönemleri, bildirim kanallarının dönemler için yerleştiği ve İndeksleyicilerin sorgu ücreti iadelerini talep edebildiği dönemlerdir. + - Sonlandırılmış dönemler, İndeksleyiciler tarafından talep edilecek sorgu ücreti iadesi kalmamış, dolayısıyla sonlandırılmış dönemlerdir. -![Explorer Image 9](/img/Epoch-Stats.png) +![Gezgin Gürüntüsü 9](/img/Epoch-Stats.png) -## Your User Profile +## Kullanıcı Profiliniz -Now that we’ve talked about the network stats, let’s move on to your personal profile. Your personal profile is the place for you to see your network activity, no matter how you’re participating on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see: +Ağ istatistiklerinden bahsettiğimize göre, şimdi kişisel profilinize geçelim. Kişisel profiliniz, ağa nasıl katılıyor olursanız olun, ağ etkinliğinizi görebileceğiniz yerdir. Kripto cüzdanınız kullanıcı profiliniz olarak işlev görecek ve Kullanıcı Panosu ile şunları görebileceksiniz: -### Profile Overview +### Profile Genel Bakış -This is where you can see any current actions you took. This is also where you can find your profile information, description, and website (if you added one). +Burası yaptığınız tüm mevcut eylemleri görebileceğiniz yerdir. Ayrıca profil bilgilerinizi, açıklamanızı ve web sitenizi de (eğer eklediyseniz) burada bulabilirsiniz. -![Explorer Image 10](/img/Profile-Overview.png) +![Gezgin Gürüntüsü 10](/img/Profile-Overview.png) -### Subgraphs Tab +### Subgraphlar Sekmesi -If you click into the Subgraphs tab, you’ll see your published subgraphs. This will not include any subgraphs deployed with the CLI for testing purposes – subgraphs will only show up when they are published to the decentralized network. +Subgraphlar sekmesine tıklarsanız, yayınlanmış subgraphlar'ı göreceksiniz. Bu, test amacıyla CLI ile dağıtılan herhangi bir subgraph'ı içermeyecektir - subgraphlar yalnızca merkeziyetsiz ağda yayınlandıklarında görünecektir. -![Explorer Image 11](/img/Subgraphs-Overview.png) +![Gezgin Gürüntüsü 11](/img/Subgraphs-Overview.png) -### Indexing Tab +### İndeksleme Sekmesi -If you click into the Indexing tab, you’ll find a table with all the active and historical allocations towards the subgraphs, as well as charts that you can analyze and see your past performance as an Indexer. +İndeksleme sekmesine tıklarsanız, subgraplar'a yönelik tüm aktif ve geçmiş tahsisleri içeren bir tablonun yanı sıra bir İndeksleyici olarak geçmiş performansınızı analiz edebileceğiniz ve görebileceğiniz grafikler bulacaksınız. -This section will also include details about your net Indexer rewards and net query fees. You’ll see the following metrics: +Bu bölümde ayrıca net İndeksleyici ödülleriniz ve sorgu ücretlerinizle ilgili ayrıntılar da yer alacaktır. Aşağıdaki metrikleri göreceksiniz: -- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed -- Total Query Fees - the total fees that users have paid for queries served by you over time -- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT -- Fee Cut - the % of query fee rebates that you will keep when you split with Delegators -- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators -- Owned - your deposited stake, which could be slashed for malicious or incorrect behavior +- Delege Edilen Pay - delegatörler'in sizin tarafınızdan tahsis edilebilen fakat kesilemeyen payları +- Toplam Sorgu Ücretleri - kullanıcıların zaman içinde sizin tarafınızdan sunulan sorgular için ödedikleri toplam ücretler +- İndeksleyici Ödülleri - GRT olarak aldığınız İndeksleyici ödüllerinin toplam tutarı +- Ücret Kesintisi - Delegatörlerle ayrıldığınızda elinizde kalacak sorgu ücreti iadelerinin yüzdesi +- Ödül Kesintisi - Delegatörlerle ayrılırken İndeksleyici ödüllerinin elinizde kalacak yüzdesi +- Depozito - kötü niyetli veya yanlış davranışlarınız sonucu kesilebilecek yatırılmış payınız -![Explorer Image 12](/img/Indexer-Stats.png) +![Gezgin Gürüntüsü 12](/img/Indexer-Stats.png) -### Delegating Tab +### Delegasyon Sekmesi -Delegators are important to the Graph Network. A Delegator must use their knowledge to choose an Indexer that will provide a healthy return on rewards. Here you can find details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. +Delegatörler Graph Ağı için önem arz etmektedir. Bir Delegatör, sağlıklı ödül getirisi sağlayacak bir İndeksleyici seçmek için bildiklerini kullanmalıdır. Burada, aktif ve geçmiş delegasyonlarınızın ayrıntılarını ve delege ettiğiniz İndeksleyicilerin metriklerini bulabilirsiniz. -In the first half of the page, you can see your delegation chart, as well as the rewards-only chart. To the left, you can see the KPIs that reflect your current delegation metrics. +Sayfanın ilk yarısında, delegasyon grafiğinizin yanı sıra yalnızca ödül grafiğini de görebilirsiniz. Sol tarafta, mevcut delegasyon metriklerinizi yansıtan APG'leri görebilirsiniz. -The Delegator metrics you’ll see here in this tab include: +Bu sekmede göreceğiniz Delegatör metrikleri şunları içermektedir: -- Total delegation rewards -- Total unrealized rewards -- Total realized rewards +- Toplam delegasyon ödülleri +- Toplam gerçekleşmemiş ödüller +- Toplam gerçekleşmiş ödüller -In the second half of the page, you have the delegations table. Here you can see the Indexers that you delegated towards, as well as their details (such as rewards cuts, cooldown, etc). +Sayfanın ikinci yarısında delegasyonlar tablosu yer alır. Burada delege ettiğiniz İndeksleyicileri ve ayrıntılarını (ödül kesintileri, bekleme süreleri vb.) görebilirsiniz. -With the buttons on the right side of the table, you can manage your delegation - delegate more, undelegate, or withdraw your delegation after the thawing period. +Tablonun sağ tarafındaki düğmelerle delegasyonunuzu yönetebilirsiniz - daha fazla delege edebilir, delegasyonunuzu geri alabilir veya serbest kalma döneminden sonra delegasyonunuzu geri çekebilirsiniz. -Keep in mind that this chart is horizontally scrollable, so if you scroll all the way to the right, you can also see the status of your delegation (delegating, undelegating, withdrawable). +Bu grafiğin yatay olarak kaydırılabilir olduğunu unutmayın, bu nedenle sağa doğru kaydırırsanız, delegasyonunuzun durumunu da görebilirsiniz (delege edilen, delegeden çıkarılan, geri çekilebilir). -![Explorer Image 13](/img/Delegation-Stats.png) +![Gezgin Gürüntüsü 13](/img/Delegation-Stats.png) -### Curating Tab +### Kürasyon Sekmesi -In the Curation tab, you’ll find all the subgraphs you’re signaling on (thus enabling you to receive query fees). Signaling allows Curators to highlight to Indexers which subgraphs are valuable and trustworthy, thus signaling that they need to be indexed on. +Kürasyon sekmesinde, sinyal verdiğiniz (sonucunda sorgu ücreti almanızı sağlayan) tüm subgraphlar'ı bulacaksınız. Sinyalleme, Küratörlerin İndeksleyicilere hangi subgraphlar'ın değerli ve güvenilir olduğunu belirtmesine ve böylece indekslenmeleri gerektiğinin belirtilmesine olanak tanır. -Within this tab, you’ll find an overview of: +Bu sekmede, aşağıdakilerin genel bir bakışını bulacaksınız: -- All the subgraphs you're curating on with signal details -- Share totals per subgraph -- Query rewards per subgraph -- Updated at date details +- Sinyal ayrıntılarıyla birlikte küratörlüğünü yaptığınız tüm subgraphlar +- Subraph başına pay toplamları +- Subgraph başına sorgu ödülleri +- Güncelleme tarih detayları -![Explorer Image 14](/img/Curation-Stats.png) +![Gezgin Gürüntüsü 14](/img/Curation-Stats.png) -## Your Profile Settings +## Profil Ayarlarınız -Within your user profile, you’ll be able to manage your personal profile details (like setting up an ENS name). If you’re an Indexer, you have even more access to settings at your fingertips. In your user profile, you’ll be able to set up your delegation parameters and operators. +Kullanıcı profilinizde, kişisel profil ayrıntılarınızı yönetebileceksiniz (bir ENS adı almak gibi). Eğer bir İndeksleyiciyseniz, daha fazla ayara erişiminiz olacaktır. Kullanıcı profilinizde, delegasyon parametrelerinizi ve operatörlerinizi ayarlayabileceksiniz. -- Operators take limited actions in the protocol on the Indexer's behalf, such as opening and closing allocations. Operators are typically other Ethereum addresses, separate from their staking wallet, with gated access to the network that Indexers can personally set -- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. +- Operatörler, İndeksleyici adına protokolde tahsislerin açılması ve kapatılması gibi sınırlı işlemlerde bulunur. Operatörler tipik olarak, stake cüzdanlarından ayrı, İndeksleyicilerin kişisel olarak ayarlayabileceği ağa kontrollü bir şekilde erişime sahip diğer Ethereum adresleridir +- Delegasyon parametreleri, GRT'nin siz ve Delegatörleriniz arasındaki dağılımını kontrol etmenizi sağlar. -![Explorer Image 15](/img/Profile-Settings.png) +![Gezgin Gürüntüsü 15](/img/Profile-Settings.png) -As your official portal into the world of decentralized data, The Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. +Merkeziyetsiz veri dünyasındaki resmi portalınız olan Graph Gezgini, ağdaki rolünüz ne olursa olsun çeşitli eylemlerde bulunmanıza olanak tanır. Profil ayarlarınıza adresinizin yanındaki açılır menüyü açıp Ayarlar düğmesine tıklayarak ulaşabilirsiniz.
    ![Wallet details](/img/Wallet-Details.png)
    diff --git a/website/pages/tr/network/indexing.mdx b/website/pages/tr/network/indexing.mdx index c40fd87a22fe..797eb551966f 100644 --- a/website/pages/tr/network/indexing.mdx +++ b/website/pages/tr/network/indexing.mdx @@ -1,48 +1,48 @@ --- -title: Indexing +title: İndeksleme --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +İndeksleyiciler, indeksleme ve sorgu işleme hizmetleri sağlamak için Graph Token'leri (GRT) stake eden Graph Ağındaki düğüm operatörleridir. İndeksleyiciler, hizmetleri karşılığında sorgu ücretleri ve indeksleme ödülleri kazanırlar. Ayrıca üstel bir indirim fonksiyonuna göre geri ödenen sorgu ücretleri de kazanırlar. -GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. +Ağ üzerinde stake edilen GRT'ler, bir çözülme süresine tabidir ve indeksleyiciler kötü niyetliyse ve uygulamalara yanlış veriler sunar veya hatalı şekilde indekslenir ise kesilebilir. İndeksleyiciler ayrıca, ağa katkıda bulunmak üzere delegatörlerden stake dilen pay için ödüller kazanır. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +İndeksleyiciler, subgraph'in kürasyon sinyaline dayalı olarak indekslenecek subgraph'leri seçer; burada küratörler, hangi subgraph'lerin yüksek kaliteli olduğunu ve önceliklendirilmesi gerektiğini belirtmek için GRT'yi paylaşır. Tüketiciler (örn. uygulamalar), indeksleyicilerin kendi subgraph'leri için sorguları işlediği parametreleri ve sorgu ücreti fiyatlandırması için tercihleri de ayarlayabilir. -## FAQ +## SSS -### What is the minimum stake required to be an Indexer on the network? +### Ağda indeksleyici olmak için gereken minimum gereksinim nedir? -The minimum stake for an Indexer is currently set to 100K GRT. +Bir indeksleyici için minimum stake tutarı şu anda 100K GRT olarak ayarlanmıştır. -### What are the revenue streams for an Indexer? +### Bir indeksleyici için gelir akışları nelerdir? -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. +**Sorgu ücreti ödülleri** - Ağda sorgu sunma karşılığında yapılan ödemeler. Bu ödemeler, bir indeksleyici ile bir ağ geçidi arasındaki durum kanalları aracılığıyla aracılık edilir. Bir ağ geçidinden gelen her sorgu isteği, bir ödeme ve karşılık gelen yanıt, sorgu sonucunun geçerliliğinin bir kanıtını içerir. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**İndeksleme ödülleri** - Protokol genelinde yıllık %3'lük bir enflasyonla oluşturulan indeksleme ödülleri, ağ için subgraph dağıtımlarını indeksleyen indeksleyicilere dağıtılır. -### How are indexing rewards distributed? +### İndeksleme ödülleri nasıl dağıtılır? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +İndeksleme ödülleri, yıllık %3 ihraç olarak belirlenen protokol enflasyonundan gelir. Her birindeki kürasyon sinyalinin oranına göre subgraph'ler arasında dağıtılırlar, ardından o subgraph'te tahsis edilen paylarına göre indeksleyicilere orantılı olarak dağıtılırlar. **Ödül almaya hak kazanabilmek için bir tahsisin, tahkim sözleşmesi tarafından belirlenen standartları karşılayan geçerli bir indeksleme kanıtı (POI) ile kapatılması gerekir.** -Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. +Ödülleri hesaplamak için topluluk tarafından çok sayıda araç oluşturulmuştur. Bunların bir koleksiyonunu [Topluluk Kılavuzları koleksiyonu](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c) bölümünde düzenlenmiş olarak bulabilirsiniz. Ayrıca [Discord sunucusundaki](https://discord.gg/graphprotocol) #Delegators ve #Indexers kanallarında araçların güncel bir listesini bulabilirsiniz. Burada, indeksleyici yazılım yığınına entegre edilmiş bir [önerilen tahsis optimizatörüne](https://github.com/graphprotocol/AllocationOpt.jl) bağlantı sağlıyoruz. -### What is a proof of indexing (POI)? +### İndeksleme kanıtı (POI) nedir? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POI'ler ağda, bir indeksleyicinin tahsis ettikleri subgraph'leri indekslediğini doğrulamak için kullanılır. Mevcut dönemin ilk bloğu için bir POI, indeksleme ödüllerine hak kazanabilmek için söz konusu tahsis için bir tahsisi kapatırken sunulmalıdır. Bir blok için POI, o bloğa kadar olan belirli bir subgraph konuşlandırması için tüm varlık depolama işlemlerinin özetidir. -### When are indexing rewards distributed? +### İndeksleme ödülleri ne zaman dağıtılır? -Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). +Tahsisler, aktif oldukları ve 28 dönem içinde tahsis edildikleri sürece sürekli olarak ödül toplamaya devam eder.İndeksleme ödülleri, indeksleyiciler tahsislerini kapattığında toplanır ve dağıtılır. Aktif oldukları sürece tahsisler sürekli olarak ödül kazanmaya devam eder. İndeksleyiciler, tahsislerini manuel olarak kapatmaya zorlayabilir veya maksimum tahsis ömründen sonra (her 28 dönemde bir) otomatik olarak kapanmalarına izin verebilir (1 dönem = ~24 saat). -### Can pending indexing rewards be monitored? +### Bekleyen indeksleme ödülleri izlenebilir mi? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +RewardsManager sözleşmesi, belirli bir tahsis için bekleyen ödülleri kontrol etmek adına kullanılabilen salt okunur bir [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) işlevine sahiptir. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +Topluluk yapımı gösterge panolarının çoğu, bekleyen ödül değerleri içerir ve bunlar, aşağıdaki adımlar izlenerek manuel olarak kolayca kontrol edilebilir: -1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: +1. Tüm etkin tahsislerin kimliklerini almak için [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet)'i sorgulayın: ```graphql query indexerAllocations { @@ -58,139 +58,139 @@ query indexerAllocations { } ``` -Use Etherscan to call `getRewards()`: +`getRewards()` öğesini çağırmak için Etherscan'i kullanın: -- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- [Ödül sözleşmesine Etherscan arayüzü](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract)'ne gidin -* To call `getRewards()`: - - Expand the **10. getRewards** dropdown. - - Enter the **allocationID** in the input. - - Click the **Query** button. +* `getRewards()`'ı çağırmak için: + - **10. getRewards** açılır menüsünü genişletin. + - Girişte **allocationID**'yi girin. + - **Sorgu** düğmesini tıklayın. -### What are disputes and where can I view them? +### Anlaşmazlıklar nelerdir ve bunları nerede görebilirim? -Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. +İndeksleyicinin sorguları ve tahsisleri, itiraz süresi boyunca Graph üzerinde tartışılabilir. Anlaşmazlık süresi, anlaşmazlığın türüne göre değişir. Sorgular/onaylar 7 dönem ihtilaf penceresine sahipken, tahsisler 56 dönem içerir. Bu süreler geçtikten sonra tahsislere veya sorgulara karşı ihtilaf açılamaz. Bir anlaşmazlık açıldığında, fishermen tarafından minimum 10.000 GRT tutarında bir depozito talep edilir ve bu, anlaşmazlık sonuçlanana ve bir çözüm sağlanana kadar kilitlenir. Fishermen, anlaşmazlıkları açan herhangi bir ağ katılımcısıdır. -Disputes have **three** possible outcomes, so does the deposit of the Fishermen. +Anlaşmazlıkların **üç** olası sonucu vardır, fishermen'lerin para yatırması da öyle. -- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. -- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. -- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. +- Anlaşmazlık reddedilirse, fishermen tarafından yatırılan GRT yakılacak ve tartışmalı indeksleyici cezalandırılmayacaktır. +- İhtilaf berabere biterse, fishermen'in depozitosu iade edilecek ve ihtilaflı indeksleyici cezalandırılmayacaktır. +- Anlaşmazlık kabul edilirse, fishermen tarafından yatırılan GRT iade edilecek, tartışmalı indeksleyici cezalandırılacak ve fishermen kesilen GRT'nin %50'sini kazanacaktır. -Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. +Anlaşmazlıklar, bir indeksleyicinin profil sayfasında `İtirazlar` sekmesi altındaki kullanıcı arayüzünde görüntülenebilir. -### What are query fee rebates and when are they distributed? +### Sorgu ücreti ödülleri nedir ve ne zaman dağıtılır? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Sorgu ücretleri ağ geçidi tarafından toplanır ve üstel indirim fonksiyonuna göre indeksleyicilere dağıtılır ([buradan](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) GIP'e bakınız). Üstel indirim fonksiyonu, indeksleyicilerin sorguları dürüstçe sunarak en iyi sonucu elde etmelerini sağlamanın bir yolu olarak önerilmiştir. İndeksleyicileri, toplayabilecekleri sorgu ücretlerine göre büyük miktarda pay (bir sorguya hizmet verirken hata yaptıklarında kesinti olabilir) ayırmaya teşvik ederek çalışmaktadır. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Bir tahsisat kapatıldıktan sonra iadeler İndeksleyici tarafından talep edilebilir. Talep edildikten sonra, sorgu ücreti iadeleri, sorgu ücreti kesintisi ve üstel indirim fonksiyonuna göre İndeksleyiciye ve Delegatörlerine dağıtılır. -### What is query fee cut and indexing reward cut? +### Sorgu ücreti kesintisi ve indeksleme ödülü kesintisi nedir? -The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. +`queryFeeCut` ve `indexingRewardCut` değerleri, indeksleyicinin, GRT'nin indeksleyici ile delegatörleri arasındaki dağıtımını kontrol etmek için cooldownBlock'larla birlikte ayarlayabileceği yetkilendirme parametreleridir. Stake parametrelerini ayarlama talimatları için [Protokolde Staking](/network/indexing#stake-in-the-protocol) kısmındaki son adımlara göz atın. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - İndeksleyiciye dağıtılacak sorgu ücreti iadelerinin %'si. Bu %95 olarak ayarlanırsa, İndeksleyici bir tahsisat kapatıldığında kazanılan sorgu ücretlerinin %95'ini alır ve diğer %5'lik kısım Delegatörlere gider. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - İndeksleyiciye dağıtılacak indeksleme ödüllerinin %'si. Bu, %95 olarak ayarlanırsa, indeksleyici, bir tahsis kapatıldığında indeksleme ödül havuzunun %95'ini alacak ve delegatörler diğer %5'i bölüşecektir. -### How do Indexers know which subgraphs to index? +### İndeksleyiciler hangi subgraph'lerin indeksleneceğini nasıl bilir? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +İndeksleyiciler, subgraph indeksleme kararları vermek için gelişmiş teknikler uygulayarak kendilerini farklılaştırabilir, ancak genel bir fikir vermek için ağdaki subgraph'leri değerlendirmek için kullanılan birkaç temel ölçümü tartışacağız: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Kürasyon Sinyali** - Belirli bir subgraph'e uygulanan ağ kürasyon sinyalinin oranı, özellikle sorgu hacminin arttığı önyükleme aşamasında, o subgraph'e olan ilginin iyi bir göstergesidir. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Toplanan sorgu ücretleri** - Belirli bir subgraph için toplanan sorgu ücretlerinin hacmine ilişkin geçmiş veriler, gelecekteki talebin iyi bir göstergesidir. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Stake edilen miktar** - Diğer İndeksleyicilerin davranışlarını izlemek veya belirli subgraphlara tahsis edilen toplam stake oranlarına bakmak, bir İndeksleyicinin subgraph sorgularına yönelik arz tarafını izlemesine olanak tanır; böylece ağın güvendiği subgraphları veya daha fazla arz ihtiyacı olabilecek subgraphları belirlemesine yardımcı olur. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **İndeksleme ödülü olmayan subgraphlar** - Bazı subgraphlar, IPFS gibi desteklenmeyen özellikleri kullandıkları veya ana ağ dışında başka bir ağı sorguladıkları için indeksleme ödülü üretmezler. İndeksleme ödülleri üretmeyen bir subgraph üzerinde bir mesaj göreceksiniz. -### What are the hardware requirements? +### Donanım gereksinimleri nelerdir? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Düşük** - Birkaç subgraph'ı indekslemeye başlamak için yeterli, muhtemelen genişletilmesi gerekecek. +- **Standart** - Varsayılan kurulum, örnek k8s/terraform dağıtım manifestlerinde kullanılan budur. +- **Orta** - 100 subgraph ve saniyede 200-500 isteği destekleyen Üretim İndeksleyici. +- **Yüksek** - Şu anda kullanılan tüm subgraphları indekslemek ve ilgili trafik için istekleri sunmak için hazırlanmıştır. -| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | +| Kurulum | Postgres
    (CPU'lar) | Postgres
    (GB cinsinden bellek) | Postgres
    (TB cinsinden disk) | VM'ler
    (CPU'lar) | VM'ler
    (GB cinsinden bellek) | | --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Düşük | 4 | 8 | 1 | 4 | 16 | +| Standart | 8 | 30 | 1 | 12 | 48 | +| Orta | 16 | 64 | 2 | 32 | 64 | +| Yüksek | 72 | 468 | 3.5 | 48 | 184 | -### What are some basic security precautions an Indexer should take? +### Bir İndeksleyicinin alması gereken bazı temel güvenlik önlemleri nelerdir? -- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. +- **Operatör cüzdanı** - Bir operatör cüzdanı oluşturmak önemli bir önlemdir, çünkü bir İndeksleyicinin stake'i kontrol eden anahtarları ile günlük işlemleri kontrol eden anahtarları arasında ayrım yapmasına olanak tanır. Talimatlar için [Protokolde Stake](/network/indexing#stake-in-the-protocol) bölümüne bakın. -- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. +- **Firewall** - Yalnızca İndeksleyici hizmetinin herkese açık olması gerekir ve yönetici bağlantı noktalarının ve veritabanı erişiminin kilitlenmesine özellikle dikkat edilmelidir: Graph Node JSON-RPC uç noktası (varsayılan bağlantı noktası: 8030), İndeksleyici yönetim API uç noktası (varsayılan bağlantı noktası: 18000) ve Postgres veritabanı uç noktası (varsayılan bağlantı noktası: 5432) herkese açık olmamalıdır. -## Infrastructure +## Altyapı -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +Bir İndeksleyicinin altyapısının merkezinde, indekslenen ağları izleyen, bir subgraph tanımına göre verileri ayıklayan, yükleyen ve [GraphQL API](/about/#how-the-graph-works) olarak sunan Graph Düğümü yer alır. Graph Düğümü'nün, her bir indekslenmiş ağdan gelen verileri açığa çıkaran bir uç noktaya; veri kaynağı için bir IPFS düğümüne; deposu için bir PostgreSQL veritabanına ve ağ ile etkileşimlerini kolaylaştıran İndeksleyici bileşenlerine bağlanması gerekir. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL veritabanı** - Graph Düğümü için ana depo, subgraph verilerinin depolandığı yerdir. İndeksleyici hizmeti ve aracı da durum kanalı verilerini, maliyet modellerini, indeksleme kurallarını ve tahsis eylemlerini depolamak için veritabanını kullanır. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Veri uç noktası** - EVM uyumlu ağlar için, Graph Düğümü'nün EVM uyumlu bir JSON-RPC API'si sunan bir uç noktaya bağlanması gerekir. Bu, tek bir istemci şeklinde olabileceği gibi birden fazla istemci arasında yük dengelemesi yapan daha karmaşık bir kurulum da olabilir. Belirli subgraphlar'ın arşiv modu ve/veya parite izleme API'si gibi belirli istemci yetenekleri gerektireceğinin bilincinde olmak önemlidir. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **IPFS düğümü (sürüm 5'ten düşük)** - Subgraph dağıtım üst verisi IPFS ağında saklanır. Graph Düğümü, subgraph manifesti ve tüm bağlantılı dosyaları almak için subgraph dağıtımı sırasında öncelikle IPFS düğümüne erişir. Ağ İndeksleyicilerinin kendi IPFS düğümlerini barındırmalarına gerek yoktur, ağ için bir IPFS düğümü https://ipfs.network.thegraph.com adresinde barındırılır. -- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. +- **İndeksleyici hizmeti** - Ağ ile gerekli tüm harici iletişimleri gerçekleştirir. Maliyet modellerini ve indeksleme durumlarını paylaşır, ağ geçitlerinden gelen sorgu isteklerini bir Graph Düğümü'ne iletir ve ağ geçidi ile durum kanalları aracılığıyla sorgu ödemelerini yönetir. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **İndeksleyici aracı** - Ağa kaydolma, Graph Düğümlerine subgraph dağıtımlarını ve tahsisleri yönetme dahil olmak üzere İndeksleyicilerin zincir üzerindeki etkileşimlerini kolaylaştırır. -- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. +- **Prometheus metrik sunucusu** - Graph Düğümü ve İndeksleyici bileşenleri metriklerini metrik sunucusuna kaydeder. -Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. +Not: Çevik ölçeklendirmeyi desteklemek için, sorgulama ve indeksleme endişelerinin sorgu düğümleri ve indeks düğümleri olarak farklı düğüm kümeleri arasında ayrılması önerilir. -### Ports overview +### Portlara genel bakış -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. +> **Önemli**: Portları herkese açık hale getirme konusunda dikkatli olun - **yönetim portları** kilitli tutulmalıdır. Bu, aşağıda ayrıntıları verilen Graph Düğümü JSON-RPC ve İndeksleyici yönetim uç noktalarını içerir. #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP sunucusu
    ( subgraph sorguları için) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    ( subgraph abonelikleri için) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (dağıtımları yönetmek için) | / | --admin-port | - | +| 8030 | Subgraph indeksleme durum API'si | /graphql | --index-node-port | - | +| 8040 | Prometheus metrikleri | /metrics | --metrics-port | - | -#### Indexer Service +#### İndeksleyici Hizmeti -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
    (for paid subgraph queries) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| 7600 | GraphQL HTTP sunucusu
    (ücretli subgraph sorguları için) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Prometheus metrikleri | /metrics | --metrics-port | - | -#### Indexer Agent +#### İndeksleyici Aracı -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Port | Amaç | Rotalar | CLI Argümanı | Ortam Değişkeni | +| ---- | --------------------------- | ------- | ------------------------- | --------------------------------------- | +| 8000 | İndeksleyici yönetim API'si | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Setup server infrastructure using Terraform on Google Cloud +### Google Cloud'da Terraform kullanarak sunucu altyapısını kurun -> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. +> Not: İndeksleyiciler alternatif olarak AWS, Microsoft Azure veya Alibaba kullanabilir. -#### Install prerequisites +#### Önkoşulları yükleme - Google Cloud SDK -- Kubectl command line tool +- Kubectl komut satırı aracı - Terraform -#### Create a Google Cloud Project +#### Bir Google Cloud Projesi Oluşturun -- Clone or navigate to the Indexer repository. +- İndeksleyici deposunu klonlayın veya bu depoya gidin. -- Navigate to the ./terraform directory, this is where all commands should be executed. +- ./terraform dizinine gidin, tüm komutların yürütülmesi gereken yer burasıdır. ```sh cd terraform ``` -- Authenticate with Google Cloud and create a new project. +- Google Cloud ile kimlik doğrulaması yapın ve yeni bir proje oluşturun. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use the Google Cloud Console's billing page to enable billing for the new project. +- Yeni projenin faturalandırılmasını etkinleştirmek için Google Cloud Console'un faturalandırma sayfasını kullanın. -- Create a Google Cloud configuration. +- Bir Google Cloud yapılandırması oluşturun. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Enable required Google Cloud APIs. +- Gerekli Google Cloud API'lerini etkinleştirin. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Create a service account. +- Bir hizmet hesabı oluşturun. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Enable peering between database and Kubernetes cluster that will be created in the next step. +- Bir sonraki adımda oluşturulacak veritabanı ve Kubernetes kümesi arasında eşlemeyi etkinleştirin. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Create minimal terraform configuration file (update as needed). +- Minimal terraform yapılandırma dosyası oluşturun (gerektiğinde güncelleyin). ```sh indexer= @@ -262,11 +262,11 @@ database_password = "" EOF ``` -#### Use Terraform to create infrastructure +#### Altyapı oluşturmak için Terraform'u kullanın -Before running any commands, read through [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) and create a file `terraform.tfvars` in this directory (or modify the one we created in the last step). For each variable where you want to override the default, or where you need to set a value, enter a setting into `terraform.tfvars`. +Herhangi bir komutu çalıştırmadan önce [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) dosyasını okuyun ve bu dizinde bir `terraform.tfvars` dosyası oluşturun (veya son adımda oluşturduğumuzu değiştirin). Varsayılanı geçersiz kılmak istediğiniz veya bir değer ayarlamanız gereken her değişken için `terraform.tfvars` dosyasına bir ayar girin. -- Run the following commands to create the infrastructure. +- Altyapıyı oluşturmak için aşağıdaki komutları çalıştırın. ```sh # Install required plugins @@ -279,7 +279,7 @@ terraform plan terraform apply ``` -Download credentials for the new cluster into `~/.kube/config` and set it as your default context. +Yeni kümenin kimlik bilgilerini `~/.kube/config` dosyasına indirin ve varsayılan bağlamınız olarak ayarlayın. ```sh gcloud container clusters get-credentials $indexer @@ -287,21 +287,21 @@ kubectl config use-context $(kubectl config get-contexts --output='name' | grep $indexer) ``` -#### Creating the Kubernetes components for the Indexer +#### İndeksleyici için Kubernetes komponentlerini oluşturma -- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. +- `k8s/overlays` dizinini yeni bir `$dir,` dizinine kopyalayın ve `$dir/kustomization.yaml` içindeki `bases` girişini `k8s/base` dizinini gösterecek şekilde ayarlayın. -- Read through all the files in `$dir` and adjust any values as indicated in the comments. +- `$dir` içindeki tüm dosyaları okuyun ve yorumlarda belirtilen değerleri ayarlayın. -Deploy all resources with `kubectl apply -k $dir`. +Tüm kaynakları `kubectl apply -k $dir` ile dağıtın. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Düğümü](https://github.com/graphprotocol/graph-node), GraphQL uç noktası aracılığıyla sorgulanabilen bir veri deposunu belirleyici bir şekilde güncellemek için Ethereum blok zincirine olay kaynağı sağlayan açık kaynaklı bir Rust uygulamasıdır. Geliştiriciler şemalarını tanımlamak için subgraph'lar ve blok zincirinden alınan verileri dönüştürmek için bir dizi eşleştirme kullanır ve Graph Node tüm zinciri senkronize eder, yeni blokları izler ve GraphQL uç noktası aracılığıyla sunar. -#### Getting started from source +#### Kaynaktan başlama -#### Install prerequisites +#### Önkoşulları yükleme - **Rust** @@ -309,15 +309,15 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Ubuntu kullanıcıları için Ek Gereksinimler** - Ubuntu üzerinde bir Graph Düğümü çalıştırmak için birkaç ek paket gerekebilir. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Kurulum -1. Start a PostgreSQL database server +1. Bir PostgreSQL veritabanı sunucusu başlatma ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. [Graph Düğümü](https://github.com/graphprotocol/graph-node) github deposunu klonlayın ve `cargo build` çalıştırarak kaynağı derleyin -3. Now that all the dependencies are setup, start the Graph Node: +3. Artık tüm bağımlılıklar ayarlandığına göre Graph Düğümü'nü başlatın: ```sh cargo run -p graph-node --release -- \ @@ -336,48 +336,48 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Docker kullanmaya başlama -#### Prerequisites +#### Ön Koşullar -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. +- **Ethereum düğümü** - Varsayılan olarak, docker oluşturma kurulumu ana makinenizdeki Ethereum düğümüne bağlanmak için ana ağı kullanacaktır:[http://host.docker.internal:8545](http://host.docker.internal:8545). Bu ağ adını ve url'yi `docker-compose.yaml` dosyasını güncelleyerek değiştirebilirsiniz. -#### Setup +#### Kurulum -1. Clone Graph Node and navigate to the Docker directory: +1. Graph Düğümü'nü klonlayın ve Docker dizinine gidin: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml`using the included script: +2. Yalnızca linux kullanıcıları için - `docker-compose.yaml` dosyasında `host.docker.internal` yerine ana IP adresini kullanın: ```sh ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Ethereum uç noktanıza bağlanacak yerel bir Graph Düğümü başlatın: ```sh docker-compose up ``` -### Indexer components +### İndeksleyici komponentleri -To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: +Ağa başarılı bir şekilde katılmak hemen hemen sürekli izleme ve etkileşim gerektirir, bu nedenle İndeksleyicilerin ağa katılımını kolaylaştırmak için bir dizi Typescript uygulaması oluşturduk. Üç İndeksleyici bileşeni bulunmaktadır: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards on chain and how much is allocated towards each. +- **İndeksleyici aracısı** - Aracı, ağı ve İndeksleyicinin kendi altyapısını izler ve hangi subgraph dağıtımlarının indekslendiğini, zincir üzerinde tahsis edildiğini ve her birine ne miktarda tahsis edildiğini yönetir. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **İndeksleyici hizmeti** - Harici olarak gösterilmesi gereken tek bileşen olan hizmet, subgraph sorgularını graph düğümüne iletir, sorgu ödemeleri için durum kanallarını yönetir ve istemcilerle ağ geçitleri gibi önemli karar verme bilgilerini paylaşır. -- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. +- **İndeksleyici CLI'si** - İndeksleyici aracısını yönetmek için komut satırı arayüzüdür. İndeksleyicilerin maliyet modellerini, manuel tahsislerini, eylem kuyruğunu ve indeksleme kurallarını yönetmesini sağlar. -#### Getting started +#### Buradan başlayın -The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! +İndeksleyici aracısı ve İndeksleyici hizmeti, Graph Düğümü altyapınızla birlikte konumlandırılmalıdır. İndeksleyici bileşenleriniz için sanal yürütme ortamları kurmanın birçok yolu vardır; burada bunları NPM paketleri veya kaynak kullanarak baremetal üzerinde veya Google Cloud Kubernetes Motoru üzerinde kubernetes ve docker aracılığıyla nasıl çalıştıracağınızı açıklayacağız. Bu kurulum örnekleri altyapınıza iyi bir şekilde uyarlanamazsa, muhtemelen başvurabileceğiniz bir topluluk rehberi olacaktır, gelin [Discord](https://discord.gg/graphprotocol)'da merhaba deyin! İndeksleyici bileşenlerinizi başlatmadan önce [protokolde stake etmeyi](/network/indexing#stake-in-the-protocol) unutmayın! -#### From NPM packages +#### NPM paketlerinden ```sh npm install -g @graphprotocol/indexer-service @@ -400,7 +400,7 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### From source +#### Kaynaktan ```sh # From Repo root directory @@ -420,16 +420,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Using docker +#### Docker kullanma -- Pull images from the registry +- Kayıt defterinden görüntüleri çekin ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Or build images locally from source +Veya görüntüleri yerel olarak kaynaktan oluşturun ```sh # Indexer service @@ -444,24 +444,24 @@ docker build \ -t indexer-agent:latest \ ``` -- Run the components +- Komponentleri çalıştırın ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). +**NOT:** Konteynerleri(containers) başlattıktan sonra, İndeksleyici hizmetine [http://localhost:7600](http://localhost:7600) adresinden erişilebilmeli ve İndeksleyici aracısı [http://localhost:18000/](http://localhost:18000/) adresinden İndeksleyici yönetim API'sini sunabilmelidir. -#### Using K8s and Terraform +#### K8s ve Terraform kullanma -See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section +[Google Cloud'da Terraform Kullanarak Sunucu Altyapısı Kurma](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) bölümüne bakın -#### Usage +#### Kullanım -> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **NOT**: Tüm çalışma zamanı yapılandırma değişkenleri ya başlangıçta komuta parametre olarak ya da `COMPONENT_NAME_VARIABLE_NAME` (örn. `INDEXER_AGENT_ETHEREUM`) biçimindeki ortam değişkenleri kullanılarak uygulanabilir. -#### Indexer agent +#### İndeksleyici Aracı ```sh graph-indexer-agent start \ @@ -490,7 +490,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer service +#### İndeksleyici hizmeti ```sh SERVER_HOST=localhost \ @@ -516,58 +516,60 @@ graph-indexer-service start \ | pino-pretty ``` -#### Indexer CLI +#### İndeksleyici CLI -The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. +İndeksleyici CLI, `graph indexer` terminalinden erişilebilen [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) için bir eklentidir. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexer management using Indexer CLI +#### İndeksleyici CLI kullanarak indeksleyici yönetimi -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +**İndeksleyici Yönetim API**'si ile etkileşim için önerilen araç, **Graph CLI**'nın bir uzantısı olan **İndeksleyici CLI**'dır. İndeksleyici aracısı, İndeksleyici adına ağ ile bağımsız olarak etkileşim kurmak için bir İndeksleyiciden gelen girdiye ihtiyaç duyar. İndeksleyici aracı davranışını tanımlama mekanizması **tahsis yönetim** modu ve **indeksleme kurallarıdır**. Otomatik modda, bir İndeksleyici, sorguları indekslemek ve sunmak üzere subgraph'ları seçmek için kendi özel stratejisini uygulamak üzere **indeksleme kurallarını** kullanabilir. Kurallar, aracı tarafından sunulan ve İndeksleyici Yönetim API'si olarak bilinen bir GraphQL API aracılığıyla yönetilir. Manuel modda, bir İndeksleyici **eylem kuyruğunu** kullanarak tahsis eylemleri oluşturabilir ve yürütülmeden önce bunları açıkça onaylayabilir. Gözetim modunda, **indeksleme kuralları** **eylem kuyruğunu** doldurmak için kullanılır ve ayrıca yürütme için açık onay gerektirir. -#### Usage +#### Kullanım -The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. +İndeksleyici CLI, tipik olarak bağlantı noktası yönlendirme yoluyla İndeksleyici aracısına +bağlanır, bu nedenle CLI'nın aynı sunucuda veya kümede çalışması gerekmez. Başlamanıza yardımcı olmak ve biraz bilgi +vermek için CLI burada kısaca açıklanacaktır. -- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` - İndeksleyici yönetim API'sine bağlanın. Tipik olarak sunucuya bağlantı port yönlendirme yoluyla açılır, böylece CLI uzaktan kolayca çalıştırılabilir. (Örnek: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. +- `graph indexer rules get [options] [ ...]` - Tüm kuralları almak için `` olarak `all` veya genel varsayılanları almak için `global` kullanarak bir veya daha fazla indeksleme kuralı alın. Dağıtıma özgü kuralların genel kuralla birleştirileceğini belirtmek için bir `--merged` ek bağımsız değişkeni kullanılabilir. Bu şekilde, indeksleyici aracısında uygulanırlar. -- `graph indexer rules set [options] ...` - Set one or more indexing rules. +- `graph indexer rules set [options] ...` - Bir veya daha fazla indeksleme kuralı ayarlayın. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` - Varsa bir subgraph dağıtımını indekslemeye başlayın ve `decisionBasis` değerini `always` olarak ayarlayın, böylece İndeksleyici aracı her zaman onu indekslemeyi seçecektir. Genel kural her zaman olarak ayarlanırsa, ağdaki mevcut tüm subgraphlar indekslenecektir. -- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. +- `graph indexer rules stop [options] ` - Bir dağıtımı indekslemeyi durdurun ve `decisionBasis` değerini never olarak ayarlayın, böylece indekslenecek dağıtımlara karar verirken bu dağıtımı atlayacaktır. -- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. +- `graph indexer rules maybe [options] ` — Bir dağıtım için `decisionBasis` öğesini `rules` olarak ayarlayın, böylece İndeksleyici aracısı bu dağıtımı indeksleyip indekslemeyeceğine karar vermek için indeksleme kurallarını kullanacaktır. -- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additonal argument `--status` can be used to print out all actions of a certain status. +- `graph indexer actions get [options] ` - `all` kullanarak bir veya daha fazla eylemi getirin veya tüm eylemleri almak için `action-id`'yi boş bırakın. Belirli bir durumdaki tüm eylemleri yazdırmak için `--status` ek argümanı kullanılabilir. -- `graph indexer action queue allocate ` - Queue allocation action +- `graph indexer action queue allocate ` - Kuyruk tahsis eylemi -- `graph indexer action queue reallocate ` - Queue reallocate action +- `graph indexer action queue reallocate ` - Kuyruk yeniden tahsis eylemi -- `graph indexer action queue unallocate ` - Queue unallocate action +- `graph indexer action queue unallocate ` - Kuyruk tahsis kaldırma eylemi -- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator +- `graph indexer actions cancel [ ...]` - id(kimlik) belirtilmemişse kuyruktaki tüm eylemleri iptal eder, aksi takdirde ayırıcı olarak boşluk içeren id dizisini iptal eder -- `graph indexer actions approve [ ...]` - Approve multiple actions for execution +- `graph indexer actions approve [ ...]` - Yürütme için birden fazla eylemi onaylama -- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately +- `graph indexer actions execute approve` - Çalışanı onaylanan eylemleri derhal gerçekleştirmeye zorlama -All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. +Çıktıda kuralları görüntüleyen tüm komutlar, `-output` argümanını kullanarak desteklenen çıktı formatları (`table`, `yaml`, and `json`) arasında seçim yapabilir. -#### Indexing rules +#### İndeksleme kuralları -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +İndeksleme kuralları genel varsayılanlar olarak ya da ID'leri kullanılarak belirli subgraph dağıtımları için uygulanabilir. Diğer tüm alanlar isteğe bağlı iken `deployment` ve `decisionBasis` alanları zorunludur. Bir indeksleme kuralı `decisionBasis` olarak `rules`'a sahipse, indeksleyici aracı bu kuraldaki boş olmayan eşik değerlerini ilgili dağıtım için ağdan alınan değerlerle karşılaştıracaktır. Subgraph dağıtımı, eşik değerlerden herhangi birinin üstünde (veya altında) değerlere sahipse, indeksleme için seçilecektir. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +Örneğin, genel kuralın `minStake` değeri **5** (GRT) ise, kendisine 5 (GRT)'den fazla pay tahsis edilen tüm subgraph dağıtımları indekslenecektir. Eşik kuralları arasında `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, ve `minAverageQueryFees` yer alır. -Data model: +Veri modeli: ```graphql type IndexingRule { @@ -601,7 +603,7 @@ IndexingDecisionBasis { } ``` -Example usage of indexing rule: +İndeksleme kuralı örnek kullanımı: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -613,20 +615,20 @@ graph indexer rules stop QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK ``` -#### Actions queue CLI +#### Eylemler kuyruğu CLI -The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. +Indexer-cli, eylem kuyruğu ile manuel olarak çalışmak için bir `actions` modülü sağlar. Eylem kuyruğu ile etkileşim kurmak için indeksleyici yönetim sunucusu tarafından barındırılan **Graphql API**'sini kullanır. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +Eylem yürütme çalışanı, yalnızca `ActionStatus = approved` değerine sahipse yürütmek için kuyruktan öğeleri alır. Önerilen yolda eylemler ActionStatus = queued ile kuyruğa eklenir, bu nedenle zincir üzerinde yürütülmeleri için onaylanmaları gerekir. Genel işleyiş şu şekilde olacaktır: -- Action added to the queue by the 3rd party optimizer tool or indexer-cli user -- Indexer can use the `indexer-cli` to view all queued actions -- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. -- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. -- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. -- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. +- Kuyruğa üçüncü şahıs optimizasyon aracı veya indexer-cli kullanıcısı tarafından eklenen eylem +- İndeksleyici, sıraya alınan tüm eylemleri görüntülemek için `indexer-cli`'yi kullanabilir +- İndeksleyici (veya diğer yazılımlar) `indexer-cli` kullanarak kuyruktaki eylemleri onaylayabilir veya iptal edebilir. Onaylama ve iptal etme komutları girdi olarak bir dizi eylem kimliği alır. +- Yürütme çalışanı, onaylanan eylemler için kuyruğu düzenli olarak tarar. Kuyruktan`approved` eylemleri alır, bunları yürütmeye çalışır ve yürütme durumuna bağlı olarak db'deki değerleri `success` veya `failed` olarak günceller. +- Eğer bir eylem başarılı olursa, çalışan, aracı `auto` veya `oversight` modunda manuel eylemler gerçekleştirirken yararlı olacak şekilde, aracıya tahsisi ileriye dönük olarak nasıl yöneteceğini söyleyen bir indeksleme kuralının mevcut olmasını sağlayacaktır. +- İndeksleyici, eylem yürütme geçmişini görmek için eylem kuyruğunu izleyebilir ve gerekirse yürütmede başarısız olan eylem öğelerini yeniden onaylayabilir ve güncelleyebilir. Eylem kuyruğu, kuyruğa alınan ve gerçekleştirilen tüm eylemlerin bir geçmiş kaydını sağlar. -Data model: +Veri modeli: ```graphql Type ActionInput { @@ -659,147 +661,143 @@ ActionType { } ``` -Example usage from source: +Kaynaktan kullanım örneği: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` -Note that supported action types for allocation management have different input requirements: +Tahsis yönetimi için desteklenen eylem türlerinin farklı girdi gereksinimleri olduğunu unutmayın: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - stake'i belirli bir subgraph dağıtımına tahsis eder - - required action params: + - gerekli eylem parametreleri: - deploymentID - amount -- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere +- `Unallocate` - tahsisi kapatır, stake'i başka bir yere yeniden tahsis etmek için serbest bırakır - - required action params: + - gerekli eylem parametreleri: - allocationID - deploymentID - - optional action params: + - opsiyonel eylem parametreleri: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (graph-node'un sağladığıyla uyuşmasa bile sağlanan POI'yi kullanmaya zorlar) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - tahsisi atomik olarak kapatır ve aynı subgraph dağıtımı için yeni bir tahsis açar - - required action params: + - gerekli eylem parametreleri: - allocationID - deploymentID - amount - - optional action params: + - opsiyonel eylem parametreleri: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (graph-node'un sağladığıyla uyuşmasa bile sağlanan POI'yi kullanmaya zorlar) -#### Cost models +#### Maliyet modelleri -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Maliyet modelleri, pazar ve sorgu niteliklerine dayalı olarak sorgular için dinamik fiyatlandırma sağlar. İndeksleyici Hizmeti, sorgulara yanıt vermeyi amaçladıkları her bir subgraph için ağ geçitleriyle bir maliyet modeli paylaşır. Ağ geçitleri de sorgu başına İndeksleyici seçim kararları vermek ve seçilen İndeksleyicilerle ödeme pazarlığı yapmak için maliyet modelini kullanır. #### Agora -The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. +Agora dili, sorgular için maliyet modellerini bildirmek için esnek bir format sağlar. Agora fiyat modeli, bir GraphQL sorgusundaki her üst düzey sorgu için sırayla çalışan bir deyimler serisidir. Her üst düzey sorgu için, onunla eşleşen ilk ifade o sorgunun fiyatını belirler. -A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. +Bir ifade, GraphQL sorgularını eşleştirmek için kullanılan bir evet-hayır sorusundan ve değerlendirildiğinde GRT cinsinden ondalık maliyet çıktısı veren bir maliyet ifadesinden oluşur. Bir sorgunun adlandırılmış argüman konumundaki değerleri evet-hayır sorusunda yakalanabilir ve ifadede kullanılabilir. Ayrıca, genel değerler de ayarlanabilir ve bir ifadedeki yer tutucuların yerine kullanılabilir. -Example cost model: +Örnek maliyet modeli: ``` -# This statement captures the skip value, -# uses a boolean expression in the predicate to match specific queries that use `skip` -# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +# Bu ifade atlama değerini yakalar, +# `skip` kullanan belirli sorguları eşleştirmek için evet-hayır sorusunda bir boolean ifadesi kullanır +# `skip` değerine ve SYSTEM_LOAD genel değerine dayalı olarak maliyeti hesaplamak için bir maliyet ifadesi query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# This default will match any GraphQL expression. -# It uses a Global substituted into the expression to calculate cost +# Bu varsayılan, herhangi bir GraphQL ifadesiyle eşleşecektir. +# Maliyeti hesaplamak için ifadenin içine yerleştirilmiş bir Global (genel) kullanır default => 0.1 * $SYSTEM_LOAD; ``` -Example query costing using the above model: +Yukarıdaki modeli kullanarak örnek sorgu maliyetlendirmesi: -| Query | Price | +| Sorgu | Fiyat | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### Applying the cost model +#### Maliyet modelinin uygulanması -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. +Maliyet modelleri, onları veritabanında saklanmak üzere İndeksleyici aracısının İndeksleyici Yönetim API'sine aktaran İndeksleyici CLI aracılığıyla uygulanır. İndeksleyici Hizmeti daha sonra bunları alır ve maliyet modellerini istedikleri zaman ağ geçitlerine sunar. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interacting with the network +## Ağ ile etkileşim kurma -### Stake in the protocol +### Protokolde stake -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ +Bir İndeksleyici olarak ağa katılmanın ilk adımları protokolü onaylamak, fon stake etmek ve ( opsiyonel olarak) günlük protokol etkileşimleri için bir operatör adresi oluşturmaktır. _ **Not**: Bu talimatların amaçları doğrultusunda sözleşme etkileşimi için Remix kullanılacaktır, ancak kendi tercih ettiğiniz aracı kullanmaktan çekinmeyin ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), ve [MyCrypto](https://www.mycrypto.com/account) bilinen diğer birkaç araçtır)._ -Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. +Bir İndeksleyici protokolde GRT'yi stake ettikten sonra, [İndeksleyici kompenetleri ](/network/indexing#indexer-components) başlatılabilir ve ağ ile etkileşimlerine başlayabilir. -#### Approve tokens +#### Tokenleri onaylama -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. [Remix uygulamasını](https://remix.ethereum.org/) bir tarayıcıda açın -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. `File Explorer`'da [ABI token](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json) ile **GraphToken.abi** adında bir dosya oluşturun. -3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. +3. Editörde `GraphToken.abi` seçili ve açık durumdayken, Remix arayüzünde Deploy ve `Run Transactions` bölümüne gidin. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Ortam altında `Injected Web3`'ü seçin ve `Account` altında İndeksleyici adresinizi seçin. -5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. +5. GraphToken sözleşme adresini ayarlayın - GraphToken sözleşme adresini (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) `At Address` seçeneğinin yanına yapıştırın ve uygulamak için `At address` düğmesine tıklayın. -6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). +6. Staking sözleşmesini onaylamak için `approve(spender, amount)` fonksiyonunu çağırın. `spender`'ı Staking sözleşmesi adresiyle (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) ve `amount`'ı stake edilecek tokenlarla (wei cinsinden) doldurun. -#### Stake tokens +#### Tokenleri stake et -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. [Remix uygulamasını](https://remix.ethereum.org/) bir tarayıcıda açın -2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. +2. `File Explorer`'da, Staking ABI ile **Staking.abi** adında bir dosya oluşturun. -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. +3. Editörde `Staking.abi` seçili ve açık durumdayken, Remix arayüzünde `Deploy` ve `Run Transactions` bölümüne gidin. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. Ortam altında `Injected Web3`'ü seçin ve `Account` altında İndeksleyici adresinizi seçin. -5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. +5. Stake sözleşmesi adresini ayarlayın - Stake sözleşmesi adresini (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) `At address` seçeneğinin yanına yapıştırın ve uygulamak için `At Address` düğmesine tıklayın. -6. Call `stake()` to stake GRT in the protocol. +6. GRT'yi protokolde stake etmek için `stake()` fonksiyonunu çağırın. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Opsiyonel) İndeksleyiciler, fonları kontrol eden anahtarları subgraphlar'da tahsis etme ve (ücretli) sorgular sunma gibi günlük eylemleri gerçekleştiren anahtarlardan ayırmak için başka bir adresi kendi İndeksleyici altyapıları için operatör olarak onaylayabilirler. Operatörü ayarlamak için operatör adresi ile `setOperator()` fonksiyonunu çağırın. -8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. +8. ( Opsiyonel) Ödüllerin dağıtımını kontrol etmek ve Delegatörleri stratejik olarak cezbetmek için İndeksleyiciler, indexingRewardCut (milyon başına parça), queryFeeCut (milyon başına parça) ve cooldownBlocks (blok sayısı) değerlerini güncelleyerek delegasyon parametrelerini güncelleyebilirler. Bunu yapmak için `setDelegationParameters()` fonksiyonunu çağırın. Aşağıdaki örnek queryFeeCut değerini sorgu indirimlerinin %95'ini İndeksleyiciye ve %5'ini Delegatörlere dağıtacak şekilde ayarlar, indexingRewardCut değerini indeksleme ödüllerinin %60'ını İndeksleyiciye ve %40'ını Delegatörlere dağıtacak şekilde ayarlar ve `thecooldownBlocks` süresini 500 blok olarak ayarlar. ``` setDelegationParameters(950000, 600000, 500) ``` -### The life of an allocation +### Bir tahsisin ömrü -After being created by an Indexer a healthy allocation goes through four states. +Bir İndeksleyici tarafından oluşturulmasının ardından sağlıklı bir tahsis dört aşamadan geçer. -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +- **Aktif** - Zincir üstünde bir tahsis oluşturulduğunda ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) **aktif** olarak kabul edilir. İndeksleyicinin kendi ve/veya delege edilmiş stake'inin bir kısmı indeksleme ödüllerini talep etmelerine ve bu subgraph dağıtımı için sorgular sunmalarına olanak tanıyan subgraph dağıtımına tahsis edilir. İndeksleyici aracı, İndeksleyici kurallarına ilişkin olarak tahsisat oluşturmayı düzenler. -- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). +- **Kapalı** - İndeksleyici 1 dönem geçtikten sonra ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) bir tahsisi kapatmakta serbesttir veya İndeksleyici aracısı **maxAllocationEpochs** (şu anda 28 gün) sonrasında tahsisi otomatik olarak kapatacaktır. Bir tahsis geçerli bir indeksleme kanıtı (POI) ile kapatıldığında, indeksleme ödülleri İndeksleyici ve bu indeksleyicinin Delegatörlerine dağıtılır (daha fazla bilgi için aşağıdaki "ödüller nasıl dağıtılır?" bölümüne bakın). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +İndeksleyicilerin, zincir üstünde tahsis oluşturmadan önce subgraph dağıtımlarını chainhead ile senkronize etmek için zincir dışı senkronizasyon fonksiyonunu kullanmaları önerilir. Bu özellik bilhassa senkronize edilmesi 28 dönemden daha uzun sürebilecek veya belirsiz bir şekilde başarısız olma ihtimali olan subgraphlar için kullanışlıdır. diff --git a/website/pages/tr/network/overview.mdx b/website/pages/tr/network/overview.mdx index bee546908372..9b8963f7056d 100644 --- a/website/pages/tr/network/overview.mdx +++ b/website/pages/tr/network/overview.mdx @@ -1,15 +1,15 @@ --- -title: Network Overview +title: Ağa Genel Bakış --- -The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. +Graph Ağı, blok zinciri verilerini düzenlemek için merkeziyetsiz bir indeksleme protokolüdür. Uygulamalar, ağda indekslenen verileri almak için subgraph'ler adı verilen açık API'leri sorgulamak için GraphQL kullanır. Graph ile geliştiriciler, tamamen genel altyapı üzerinde çalışan sunucusuz uygulamalar oluşturabilir. -## Overview +## Genel Bakış -The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. +Graph Ağı, ağa hizmet sağlayan ve verileri Web3 uygulamalarına sunan indeksleyiciler, küratörler ve delegatörlerden oluşur. Tüketiciler uygulamaları kullanır ve verileri tüketir. -![Token Economics](/img/Network-roles@2x.png) +![Token Ekonomisi](/img/Network-roles@2x.png) -To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20 used to allocate resources in the network. +Graph ağının ekonomik güvenliğini ve sorgulanan verilerin bütünlüğünü sağlamak için katılımcılar Graph token yani ([GRT](/tokenomics)).'yi stake eder ve kullanır. GRT, ağdaki kaynakları tahsis etmek için kullanılan bir ERC-20 olan çalışma aracı belirtecidir. -Active Indexers, Curators and Delegators can provide services and earn income from the network, proportional to the amount of work they perform and their GRT stake. +Aktif indeksleyiciler, küratörler ve delegatörler, gerçekleştirdikleri iş miktarı ve stake ettikleri GRT ile orantılı olarak hizmet sağlayabilir ve ağdan gelir elde edebilir. diff --git a/website/pages/tr/new-chain-integration.mdx b/website/pages/tr/new-chain-integration.mdx index c5934efa6f87..042b23c42b11 100644 --- a/website/pages/tr/new-chain-integration.mdx +++ b/website/pages/tr/new-chain-integration.mdx @@ -1,75 +1,75 @@ --- -title: Integrating New Networks +title: Yeni Ağların Entegrasyonu --- -Graph Node can currently index data from the following chain types: +Graph Düğümü şu anda aşağıdaki zincir türlerinden verileri indeksleyebilir: -- Ethereum, via EVM JSON-RPC and [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) -- NEAR, via a [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) -- Cosmos, via a [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) -- Arweave, via an [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) +- Ethereum, EVM JSON-RPC ve [Ethereum Firehose](https://github.com/streamingfast/firehose-ethereum) aracılığıyla +- NEAR, [NEAR Firehose](https://github.com/streamingfast/near-firehose-indexer) aracılığıyla +- Cosmos, [Cosmos Firehose](https://github.com/graphprotocol/firehose-cosmos) aracılığıyla +- Arweave, [Arweave Firehose](https://github.com/graphprotocol/firehose-arweave) aracılığıyla -If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. +Bu zincirlerden herhangi biriyle ilgileniyorsanız, entegrasyon Graph Düğümü yapılandırması ve testinden ibarettir. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +Farklı bir zincir türüyle ilgileniyorsanız, Graph Düğümü ile yeni bir zincir oluşturmanız gerekir. Önerilen yaklaşımımız, söz konusu zincir için yeni bir Firehose geliştirmek ve ardından bu Firehose'u Graph Düğümü ile entegre etmektir. Daha fazla bilgi için aşağıya bakın. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** -If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). +Blok zinciri EVM eşdeğeri ise ve istemci/düğüm standart EVM JSON-RPC API'sini sunuyorsa, Graph Düğümü yeni zinciri indeksleyebilmelidir. Daha fazla bilgi için [EVM JSON-RPC'yi test etme](new-chain-integration#testing-an-evm-json-rpc) bölümüne bakın. **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +EVM tabanlı olmayan zincirler için, Graph Düğümü blok zinciri verilerini gRPC ve bilinen tip tanımları aracılığıyla alması zorunludur. Bu, [StreamingFast] \(https://www.streamingfast.io/) tarafından geliştirilen ve dosya tabanlı ve akış öncelikli bir yaklaşım kullanarak yüksek ölçeklenebilir bir indeksleme blok zinciri çözümü sağlayan yeni bir teknoloji olan [Firehose](firehose/) aracılığıyla yapılabilir. [StreamingFast team](mailto:integrations@streamingfast.io/) geliştirme konusunda yardıma ihtiyacınız varsa StreamingFast ekibine ulaşın. -## Difference between EVM JSON-RPC & Firehose +## EVM JSON-RPC ve Firehose arasındaki fark -While the two are suitable for subgraphs, a Firehose is always required for developers wanting to build with [Substreams](substreams/), like building [Substreams-powered subgraphs](cookbook/substreams-powered-subgraphs/). In addition, Firehose allows for improved indexing speeds when compared to JSON-RPC. +Her ikisi de subgraphlar için uygun olsa da, [Substreams destekli subgraphlar](cookbook/substreams-powered-subgraphs/) oluşturmak gibi [Substreams](substreams/) ile oluşturmak isteyen geliştiriciler için her zaman bir Firehose gereklidir. Ayrıca Firehose, JSON-RPC ile karşılaştırıldığında daha iyi indeksleme hızları sağlar. -New EVM chain integrators may also consider the Firehose-based approach, given the benefits of substreams and its massive parallelized indexing capabilities. Supporting both allows developers to choose between building substreams or subgraphs for the new chain. +Yeni EVM zinciri entegre edicileri, substreams faydaları ve devasa paralelleştirilmiş indeksleme kabiliyetleri göz önüne alındığında Firehose tabanlı yaklaşımı da düşünebilirler. Her ikisinin de desteklenmesi, geliştiricilerin yeni zincir için substreams veya subgraphlar oluşturma arasında seçim yapmasına olanak tanır. -> **NOTE**: A Firehose-based integration for EVM chains will still require Indexers to run the chain's archive RPC node to properly index subgraphs. This is due to the Firehose's inability to provide smart contract state typically accessible by the `eth_call` RPC method. (It's worth reminding that eth_calls are [not a good practice for developers](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/)) +> **NOT**: EVM zincirleri için Firehose tabanlı bir entegrasyon, subgraphları düzgün bir şekilde indekslemek için İndeksleyicilerin zincirin arşiv RPC düğümünü çalıştırmasını gerektirecektir. Bunun nedeni, Firehose'un \`eth_call' RPC metodu tarafından erişilebilen akıllı sözleşme durumunu sağlayamamasıdır. (eth_calls'ların [geliştiriciler için iyi bir uygulama olmadığını](https://thegraph.com/blog/improve-subgraph-performance-reduce-eth-calls/) hatırlatmakta fayda var) --- -## Testing an EVM JSON-RPC +## EVM JSON-RPC'yi test etme -For Graph Node to be able to ingest data from an EVM chain, the RPC node must expose the following EVM JSON RPC methods: +Graph Düğümü'nün bir EVM zincirinden veri alabilmesi için RPC düğümünün aşağıdaki EVM JSON RPC yöntemlerini sunması gerekir: - `eth_getLogs` -- `eth_call` \_(for historical blocks, with EIP-1898 - requires archive node): +- `eth_call` \_(geçmiş bloklar için EIP-1898 ile - arşiv düğümü gerektirir): - `eth_getBlockByNumber` - `eth_getBlockByHash` - `net_version` -- `eth_getTransactionReceipt`, in a JSON-RPC batch request -- _`trace_filter`_ _(optionally required for Graph Node to support call handlers)_ +- `eth_getTransactionReceipt`, bir JSON-RPC toplu talebinde +- _`trace_filter`_ _(Graph Düğümü'nün çağrı işleyicilerini desteklemesi için opsiyonel olarak gereklidir)_ -### Graph Node Configuration +### Graph Düğümü Konfigürasyonu -**Start by preparing your local environment** +**Yerel ortamınızı hazırlayarak başlayın** -1. [Clone Graph Node](https://github.com/graphprotocol/graph-node) -2. Modify [this line](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) to include the new network name and the EVM JSON RPC compliant URL - > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. -3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ +1. [Graph Düğümü'nü Klonlayın](https://github.com/graphprotocol/graph-node) +2. [Bu satırı](https://github.com/graphprotocol/graph-node/blob/master/docker/docker-compose.yml#L22) yeni ağ adını ve EVM JSON RPC uyumlu URL'yi içerecek şekilde değiştirin + > env var adının kendisini değiştirmeyin. Ağ adı farklı olsa bile `ethereum` olarak kalmalıdır. +3. Bir IPFS düğümü çalıştırın veya Graph tarafından kullanılanı kullanın: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Bir subgraph'ı yerel olarak dağıtarak entegrasyonu test edin** -1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) -2. Create a simple example subgraph. Some options are below: - 1. The pre-packed [Gravitar](https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) smart contract and subgraph is a good starting point - 2. Bootstrap a local subgraph from any existing smart contract or solidity dev environment [using Hardhat with a Graph plugin](https://github.com/graphprotocol/hardhat-graph) -3. Adapt the resulting `subgraph.yaml` by changing [`dataSources.network`](http://dataSources.network) to the same name previously passed on to Graph Node. -4. Create your subgraph in Graph Node: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` -5. Publish your subgraph to Graph Node: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` +1. [graph-cli](https://github.com/graphprotocol/graph-cli)'ı yükleyin +2. Basit bir örnek subgraph oluşturun. Bazı seçenekler aşağıdadır: + 1. Önceden paketlenmiş [Gravitar] \(https://github.com/graphprotocol/example-subgraph/tree/f89bdd4628efa4badae7367d4919b3f648083323) akıllı sözleşmesi ve subgraph'ı iyi bir başlangıç noktasıdır + 2. [Bir Graph eklentisi ile Hardhat kullanarak](https://github.com/graphprotocol/hardhat-graph) mevcut herhangi bir akıllı sözleşmeden veya solidity geliştirme ortamından yerel bir subgraph'ı önyükleyin +3. Elde edilen `subgraph.yaml`'ı, [`dataSources.network`](http://dataSources.network)'ü daha önce Graph Node'a aktarılan adla değiştirerek uyarlayın. +4. Graph Düğümü'nde subgraph'ınızı oluşturun: `graph create $SUBGRAPH_NAME --node $GRAPH_NODE_ENDPOINT` +5. Subgraph'ınızı Graph Düğümü'nde yayınlayın: `graph deploy $SUBGRAPH_NAME --ipfs $IPFS_ENDPOINT --node $GRAPH_NODE_ENDPOINT` -Graph Node should be syncing the deployed subgraph if there are no errors. Give it time to sync, then send some GraphQL queries to the API endpoint printed in the logs. +Herhangi bir hata olmadığı takdirde Graph Düğü'mü dağıtılan subgraph'ı senkronize ediyor olmalıdır. Senkronizasyon için zaman tanıyın, ardından kayıtlarla yazdırılan API uç noktasına bazı GraphQL sorguları gönderin. --- -## Integrating a new Firehose-enabled chain +## Firehose özellikli yeni bir zincirin entegrasyonu -Integrating a new chain is also possible using the Firehose approach. This is currently the best option for non-EVM chains and a requirement for substreams support. Additional documentation focuses on how Firehose works, adding Firehose support for a new chain and integrating it with Graph Node. Recommended docs for integrators: +Yeni bir zincirin entegrasyonu, Firehose yaklaşımını kullanarak da mümkündür. Bu, şu anda EVM dışı zincirler için en iyi seçenektir ve substreams desteği için bir gerekliliktir. Ek dokümantasyon, Firehose'un nasıl çalıştığına, yeni bir zincir için Firehose desteği eklemeyi ve onun Graph Düğümü ile entegrasyonunu içerir. Entegre ediciler için önerilen dokümanlar: -1. [General docs on Firehose](firehose/) -2. [Adding Firehose support for a new chain](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) -3. [Integrating Graph Node with a new chain via Firehose](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) +1. [Firehose ile ilgili genel dokümanlar](firehose/) +2. [Yeni bir zincir için Firehose desteği ekleme](https://firehose.streamingfast.io/integrate-new-chains/integration-overview) +3. [Firehose aracılığıyla Graph Düğümü'nün yeni bir zincirle entegrasyonu](https://github.com/graphprotocol/graph-node/blob/master/docs/implementation/add-chain.md) diff --git a/website/pages/tr/operating-graph-node.mdx b/website/pages/tr/operating-graph-node.mdx index 832b6cccf347..2ce8c3b707eb 100644 --- a/website/pages/tr/operating-graph-node.mdx +++ b/website/pages/tr/operating-graph-node.mdx @@ -1,40 +1,40 @@ --- -title: Operating Graph Node +title: Graph Düğümü İşletme --- -Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. +Graph Düğümü, subgraphları indeksleyen ve sonuçta oluşan verileri GraphQL API aracılığıyla sorgulanabilir hale getiren bileşendir. Bu nedenle indeksleyici yığınının merkezi bir parçasıdır ve başarılı bir indeksleyici çalıştırmak için Graph Düğümü'nün doğru şekilde çalışması çok önemlidir. -This provides a contextual overview of Graph Node, and some of the more advanced options available to indexers. Detailed documentation and instructions can be found in the [Graph Node repository](https://github.com/graphprotocol/graph-node). +Bu, Graph Düğümü hakkında bağlamsal bir genel bakış ve indeksleyiciler için mevcut olan daha gelişmiş seçenekler hakkında bilgi sağlar. Ayrıntılı belgeler ve talimatlar [Graph Düğümü Github deposunda](https://github.com/graphprotocol/graph-node) bulunabilir. ## Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is the reference implementation for indexing Subgraphs on The Graph Network, connecting to blockchain clients, indexing subgraphs and making indexed data available to query. +[Graph Düğümü](https://github.com/graphprotocol/graph-node),Subgraph'ları Graph Ağı üzerinde indeksleme, blok zinciri istemcilerine bağlanma ve indekslenen verileri sorgulanabilir hale getirimek için referans uygulamasıdır. -Graph Node (and the whole indexer stack) can be run on bare metal, or in a cloud environment. This flexibility of the central indexing component is crucial to the robustness of The Graph Protocol. Similarly, Graph Node can be [built from source](https://github.com/graphprotocol/graph-node), or indexers can use one of the [provided Docker Images](https://hub.docker.com/r/graphprotocol/graph-node). +Graph Düğümü(ve tüm endeksleyici yığını), bare metal veya bir bulut ortamında çalıştırılabilir. Bu merkezi indeksleme bileşeninin esnekliği, Graph Protokolü'nün dayanıklılığı için önemlidir. Benzer şekilde, Graph Düğümü [kaynaktan oluşturulabilir](https://github.com/graphprotocol/graph-node) veya indeksleyiciler [sağlanan Docker Görüntülerinden](https://hub.docker.com/r/graphprotocol/graph-node) birini kullanabilir. -### PostgreSQL database +### PostgreSQL veritabanı -The main store for the Graph Node, this is where subgraph data is stored, as well as metadata about subgraphs, and subgraph-agnostic network data such as the block cache, and eth_call cache. +Graph Düğümü'nün ana deposu, burada subgraph verileri yanı sıra subgraphlarla ilgili üst veriler ve blok önbelleği ve eth_call önbelleği gibi subgraphtan bağımsız ağ verileri saklanır. -### Network clients +### Ağ istemcileri -In order to index a network, Graph Node needs access to a network client via an EVM-compatible JSON-RPC API. This RPC may connect to a single client or it could be a more complex setup that load balances across multiple. +Bir ağı dizine eklemek için Graph Node'un EVM uyumlu bir JSON-RPC API aracılığıyla bir ağ istemcisine erişmesi gerekir. Bu RPC, tek bir istemciye bağlanabilir veya birden çok istemci arasında yük dengelemesi yapan daha karmaşık bir kurulum olabilir. -While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). +Bazı subgraph'ler yalnızca tam bir node gerektirebilirken, bazıları ek RPC işlevselliği gerektiren indeksleme özelliklerine sahip olabilir. Spesifik olarak, indekslemenin bir parçası olarak `eth_calls` yapan subgraph'ler, [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898)'yi destekleyen bir arşiv node'u gerektirir ve `callHandlers`'li subgraph'ler veya `call` filtreli `blockHandlers`, `trace_filter` desteği ([izleme modülü belgelerine buradan göz atın](https://openethereum.github.io/JSONRPC-trace-module)) gerektirir. -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). -### IPFS Nodes +### IPFS Düğümleri -Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network indexers do not need to host their own IPFS node. An IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +Subgraph dağıtım üst verilerini IPFS ağında depolanır. Graph düğümü, subgraph manifestini ve tüm bağlantılı dosyaları almak için subgraph dağıtımı sırasında öncelikle IPFS düğümüne erişir. Ağ indeksleyicilerinin kendi IPFS düğümlerini barındırmaları gerekmez. Ağ için bir IPFS düğümü https://ipfs.network.thegraph.com adresinde barındırılmaktadır. -### Prometheus metrics server +### Prometheus metrik sunucusu -To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. +İzleme ve raporlama etkinleştirmek için Graph Düğümü, metrikleri bir Prometheus metrik sunucusuna opsiyonel olarak kaydedebilir. -### Getting started from source +### Kaynaktan başlama -#### Install prerequisites +#### Önkoşulları yükleyin - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Ubuntu kullanıcıları için Ek Gereksinimler** - Ubuntu üzerinde bir Graph Node'u çalıştırmak için birkaç ek paket gerekebilir. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Kurulum -1. Start a PostgreSQL database server +1. Bir PostgreSQL veritabanı sunucusu başlatın ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. [Graph Node](https://github.com/graphprotocol/graph-node) deposunu klonlayın ve `cargo build`'i çalıştırarak kaynağı oluşturun -3. Now that all the dependencies are setup, start the Graph Node: +3. Artık tüm herr şey hazır olduğuna göre, Graph node'unu başlatın: ```sh cargo run -p graph-node --release -- \ @@ -69,37 +69,37 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -### Getting started with Kubernetes +### Kubernetes'i kullanmaya başlarken -A complete Kubernetes example configuration can be found in the [indexer repository](https://github.com/graphprotocol/indexer/tree/main/k8s). +Tam Kubernetes örnek yapılandırması [indeksleyici Github deposunda](https://github.com/graphprotocol/indexer/tree/main/k8s) bulunabilir. -### Ports +### Portlar -When it is running Graph Node exposes the following ports: +Graph Düğümü çalışırken aşağıdaki portları açar: -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Port | Amaç | Routes | CLI Argümanı | Ortam Değişkeni | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | GraphQL HTTP sunucusu
    (subgraph sorguları için) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (subgraph abonelikleri için) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (dağıtımları yönetmek için) | / | --admin-port | - | +| 8030 | Subgraph indeksleme durumu API'ı | /graphql | --index-node-port | - | +| 8040 | Prometheus ölçümleri | /metrics | --metrics-port | - | -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. +> **Önemli**: Bağlantı noktalarını herkese açık olarak açarken dikkatli olun - **yönetim portları** kilitli tutulmalıdır. Bu, Graph Düğümü JSON-RPC uç noktasını içerir. -## Advanced Graph Node configuration +## Gelişmiş Graph Düğüm yapılandırması -At its simplest, Graph Node can be operated with a single instance of Graph Node, a single PostgreSQL database, an IPFS node, and the network clients as required by the subgraphs to be indexed. +En basit haliyle, Graph Düğümü tek bir Graph Düğüm örneği, bir PostgreSQL veritabanı, bir IPFS düğümü ve indekslenecek subgraphlar tarafından gerektirilen ağ istemcileri ile çalıştırılabilir. -This setup can be scaled horizontally, by adding multiple Graph Nodes, and multiple databases to support those Graph Nodes. Advanced users may want to take advantage of some of the horizontal scaling capabilities of Graph Node, as well as some of the more advanced configuration options, via the `config.toml` file and Graph Node's environment variables. +Bu yapı birden fazla Graph Düğümü ekleyerek ve bu Graph Düğümlerini desteklemek için birden fazla veritabanı ekleyerek yatay olarak ölçeklenebilir. Gelişmiş kullanıcılar,`config.toml` dosyası ve Graph Düğümü ortam değişkenleri aracılığıyla bazı yatay ölçekleme yeteneklerinden ve daha gelişmiş yapılandırma seçeneklerinden faydalanmak isteyebilirler. ### `config.toml` -A [TOML](https://toml.io/en/) configuration file can be used to set more complex configurations than those exposed in the CLI. The location of the file is passed with the --config command line switch. +[TOML](https://toml.io/en/) yapılandırma dosyası, CLI'de sunulanlardan daha karmaşık yapılandırmaları ayarlamak için kullanılabilir. Dosyanın konumu --config komut satırı anahtar kelimesiyle iletilir. -> When using a configuration file, it is not possible to use the options --postgres-url, --postgres-secondary-hosts, and --postgres-host-weights. +> Yapılandırma dosyası kullanırken --postgres-url, --postgres-secondary-hosts ve --postgres-host-weights seçeneklerinin kullanılması mümkün değildir. -A minimal `config.toml` file can be provided; the following file is equivalent to using the --postgres-url command line option: +Asgari bir `config.toml` dosyası sağlanabilir. Aşağıdaki dosya, --postgres-url komut satırı seçeneği kullanmakla eşdeğerdir: ```toml [store] @@ -110,19 +110,21 @@ connection="<.. postgres-url argument ..>" indexers = [ "<.. list of all indexing nodes ..>" ] ``` -Full documentation of `config.toml` can be found in the [Graph Node docs](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md). +`config.toml`'nin tam dökümantasyonu, [Graph Düğümü belgelerinde](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md) bulunabilir. -#### Multiple Graph Nodes +#### Birden Fazla Graph Düğümü -Graph Node indexing can scale horizontally, running multiple instances of Graph Node to split indexing and querying across different nodes. This can be done simply by running Graph Nodes configured with a different `node_id` on startup (e.g. in the Docker Compose file), which can then be used in the `config.toml` file to specify [dedicated query nodes](#dedicated-query-nodes), [block ingestors](#dedicated-block-ingestor), and splitting subgraphs across nodes with [deployment rules](#deployment-rules). +Graph Düğümü indeksleme farklı düğümler arasında indekslemeyi ve sorgulamayı ayırmak için birden fazla Graph Düğüm örneği çalıştırarak yatay olarak ölçeklenebilir. Bu, daha sonra `config.toml` dosyasında [özel sorgu düğümleri](#dedicated-query-nodes), -> Note that multiple Graph Nodes can all be configured to use the same database, which itself can be horizontally scaled via sharding. +blok alıcıları ve [dağıtım kuralları](#deployment-rules) ile düğümler arasında subgrapları bölmek için kullanılmak üzere, farklı bir `node_id` ile yapılandırılmış Graph Düğümü başlatarak yapılabilir (örneğin Docker Compose dosyasında). -#### Deployment rules +> Birden fazla Graph Düğümü, aynı veritabanını kullanacak şekilde yapılandırılabilir ve veritabanı sharding kullanılarak yatay olarak ölçeklenebilir. -Given multiple Graph Nodes, it is necessary to manage deployment of new subgraphs so that the same subgraph isn't being indexed by two different nodes, which would lead to collisions. This can be done by using deployment rules, which can also specify which `shard` a subgraph's data should be stored in, if database sharding is being used. Deployment rules can match on the subgraph name and the network that the deployment is indexing in order to make a decision. +#### Dağıtım kuralları -Example deployment rule configuration: +Birden fazla Graph Düğümü verildiğinde, aynı subgraph'ın çarpışmalara yol açacak şekilde iki farklı düğüm tarafından indekslenmesinin önüne geçmek için yeni subgraphlar'ın dağıtımını yönetmek gereklidir. Bu, veritabanı sharding kullanılıyorsa bir subgraph'ın verilerinin hangi `shard`'da saklanması gerektiğini de belirtebilen dağıtım kurallarını kullanılarak yapılabilir. Dağıtım kuralları, karar vermek için subgraph adı ve dağıtımın indekslediği ağ ile eşleşebilir. + +Örnek dağıtım kuralı yapılandırması: ```toml [deployment] @@ -150,51 +152,51 @@ indexers = [ ] ``` -Read more about deployment rules [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment). +Dağıtım kuralları hakkında daha fazlasını [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#controlling-deployment) okuyun. -#### Dedicated query nodes +#### Özelleştirilmiş sorgu düğümleri -Nodes can be configured to explicitly be query nodes by including the following in the configuration file: +Düğümler, yapılandırma dosyasına aşağıdakini dahil ederek açıkça sorgu düğümleri olarak yapılandırılabilir: ```toml [general] query = "" ``` -Any node whose --node-id matches the regular expression will be set up to only respond to queries. +--node-id'si düzenli ifade ile eşleşen herhangi bir düğüm, sadece sorgulara yanıt vermek üzere ayarlanacaktır. -#### Database scaling via sharding +#### Sharding ile veritabanı ölçeklendirme -For most use cases, a single Postgres database is sufficient to support a graph-node instance. When a graph-node instance outgrows a single Postgres database, it is possible to split the storage of graph-node's data across multiple Postgres databases. All databases together form the store of the graph-node instance. Each individual database is called a shard. +Çoğu kullanım durumu için, tek bir Postgres veritabanı bir graph-düğümü örneğini desteklemek için yeterlidir. Bir graph-düğümü örneği tek bir Postgres veritabanından daha büyük hale geldiğinde, bu graph düğümü verilerinin depolanmasını birden fazla Postgres veritabanına yaymak mümkündür. Tüm veritabanları birlikte, graph-düğümü örneğinin deposunu oluşturur. Her tekil veritabanına bir shard denir. -Shards can be used to split subgraph deployments across multiple databases, and can also be used to use replicas to spread query load across databases. This includes configuring the number of available database connections each `graph-node` should keep in its connection pool for each database, which becomes increasingly important as more subgraphs are being indexed. +Shard'lar, subgraph dağıtımlarını birden çok veritabanına bölmek için kullanılabilir ve sorgu yükünü veritabanları arasında yaymak için replikaların kullanılmasına da izin verilebilir. Bu, her `graph-düğümü`'nün her veritabanı için bağlantı havuzunda ne kadar mevcut veritabanı bağlantısı olduğunu yapılandırmayı içerir ve daha fazla subgraph'ın indekslendiği durumlarda önem kazanır. -Sharding becomes useful when your existing database can't keep up with the load that Graph Node puts on it, and when it's not possible to increase the database size anymore. +Sharding, Graph Düğümü'nün üzerine koyduğu yükü mevcut veritabanınıza koyamadığınızda ve veritabanı boyutunu artıramayacağınızda faydalı hale gelir. -> It is generally better make a single database as big as possible, before starting with shards. One exception is where query traffic is split very unevenly between subgraphs; in those situations it can help dramatically if the high-volume subgraphs are kept in one shard and everything else in another because that setup makes it more likely that the data for the high-volume subgraphs stays in the db-internal cache and doesn't get replaced by data that's not needed as much from low-volume subgraphs. +> Genellikle, shard'larla başlamadan önce tek bir veritabanını mümkün olduğunca büyük hale getirmek daha mantıklıdır. Tek bir istisna, sorgu trafiği subgraphlar arasında çokta eşit olmayan bir şekilde bölünmesidir. Bu durumda, yüksek-hacimli subgraphlar'ın bir shard'da tutulması ve geriye kalan her şeyin diğer bir shard'da tutulması, yüksek hacimli subgraphlar için verinin veritabanı dahili önbellekte kalması ve düşük hacimli subgraphlar'daki daha az ihtiyaç duyulan veriler tarafından değiştirilmemesi daha olası olduğu için çok yardımcı olabilir. -In terms of configuring connections, start with max_connections in postgresql.conf set to 400 (or maybe even 200) and look at the store_connection_wait_time_ms and store_connection_checkout_count Prometheus metrics. Noticeable wait times (anything above 5ms) is an indication that there are too few connections available; high wait times there will also be caused by the database being very busy (like high CPU load). However if the database seems otherwise stable, high wait times indicate a need to increase the number of connections. In the configuration, how many connections each graph-node instance can use is an upper limit, and Graph Node will not keep connections open if it doesn't need them. +Bağlantı yapılandırması açısından postgresql.conf'da max_connections değerinin 400 (veya belki de 200) olarak ayarlanması ve store_connection_wait_time_ms ve store_connection_checkout_count Prometheus metriklerine bakılması önerilir. Belirgin bekleme süreleri (5 milisaniye'nin üzerinde herhangi bir değer) yetersiz bağlantıların mevcut olduğunun bir işaretidir; yüksek bekleme süreleri veritabanının çok yoğun olması gibi sebeplerden de kaynaklanabilir. Ancak, veritabanı genel olarak stabil görünüyorsa, yüksek bekleme süreleri bağlantı sayısını arttırma ihtiyacını belirtir. Yapılandırmada her graph-düğümü örneğinin ne kadar bağlantı kullanabileceği bir üst sınırdır ve Graph Düğümü bunları gereksiz bulmadığı sürece açık tutmaz. -Read more about store configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases). +Depolama yapılandırması hakkında daha fazla bilgi için [burayı](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-multiple-databases) okuyabilirsiniz. -#### Dedicated block ingestion +#### Özelleştirilmiş blok alınması -If there are multiple nodes configured, it will be necessary to specify one node which is responsible for ingestion of new blocks, so that all configured index nodes aren't polling the chain head. This is done as part of the `chains` namespace, specifying the `node_id` to be used for block ingestion: +Birden fazla düğüm yapılandırılmışsa yeni blokları işleme sorumluluğu olan bir düğüm belirtmek gerekecektir, böylece yapılandırılmış tüm dizin düğümleri zincir başını sorgulamaz. Bu, zincir (`chains`) ad alanının bir parçası olarak yapılır ve blok yüklemek için kullanılacak düğüm kimliği(`node_id`) belirtilir: ```toml [chains] ingestor = "block_ingestor_node" ``` -#### Supporting multiple networks +#### Birden fazla ağın desteklenmesi -The Graph Protocol is increasing the number of networks supported for indexing rewards, and there exist many subgraphs indexing unsupported networks which an indexer would like to process. The `config.toml` file allows for expressive and flexible configuration of: +Graph Protokolü, indeksleme ödülleri için desteklenen ağların sayısını arttırıyor ve bir indekleyicinin işlemek isteyebileceği desteklenmeyen ağları indeksleyen birçok subgraph mevcut. c`config.toml` dosyası şunlar gibi anlamlı ve esnek yapılandırmaları destekler: -- Multiple networks -- Multiple providers per network (this can allow splitting of load across providers, and can also allow for configuration of full nodes as well as archive nodes, with Graph Node preferring cheaper providers if a given workload allows). -- Additional provider details, such as features, authentication and the type of provider (for experimental Firehose support) +- Birden fazla ağ +- Ağ başına birden fazla sağlayıcı (bu, yükü sağlayıcılar arasında bölme ve bir Graph Düğümü'nün deneyimsel Firehose desteği gibi daha ucuz sağlayıcıları tercih etmesi ile tam düğümlerin yanı sıra arşiv düğümlerinin yapılandırılmasına da izin verebilir). +- Özellikler, kimlik doğrulama ve sağlayıcı türü gibi ek sağlayıcı detayları (deneysel Firehose desteği için) -The `[chains]` section controls the ethereum providers that graph-node connects to, and where blocks and other metadata for each chain are stored. The following example configures two chains, mainnet and kovan, where blocks for mainnet are stored in the vip shard and blocks for kovan are stored in the primary shard. The mainnet chain can use two different providers, whereas kovan only has one provider. +`[chains]` bölümü, graph-düğümü'nün bağlandığı ethereum sağlayıcılarını ve her zincir için blokların ve diğer üst verilerin nerede depolandığını kontrol eder. Aşağıdaki örnek, mainnet için blokların vip shard'da depolandığı ve kovan için blokların primary shard'da depolandığı olmak üzere iki zinciri, mainnet ve kovan'ı yapılandırır. Mainnet zinciri iki farklı sağlayıcı kullanabilirken, kovan yalnızca bir sağlayıcıya sahiptir. ```toml [chains] @@ -210,136 +212,136 @@ shard = "primary" provider = [ { label = "kovan", url = "http://..", features = [] } ] ``` -Read more about provider configuration [here](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers). +Sağlayıcı yapılandırması hakkında daha fazla bilgi için [burayı](https://github.com/graphprotocol/graph-node/blob/master/docs/config.md#configuring-ethereum-providers) okuyabilirsiniz. -### Environment variables +### Ortam değişkenleri -Graph Node supports a range of environment variables which can enable features, or change Graph Node behaviour. These are documented [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md). +Graph Düğümü, özellikleri etkinleştirebilen veya Graph Düğümü davranışını değiştirebilen bir dizi çevre değişkeni destekler. Bunlar [burada](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md) belgelenmiştir. -### Continuous deployment +### Sürekli dağıtım -Users who are operating a scaled indexing setup with advanced configuration may benefit from managing their Graph Nodes with Kubernetes. +Gelişmiş yapılandırmaya sahip ölçeklendirilmiş bir dizinleme kurulumu işleten kullanıcılar, Graph Düğümler'ini Kubernetes ile yönetmekten faydalanabilirler. -- The indexer repository has an [example Kubernetes reference](https://github.com/graphprotocol/indexer/tree/main/k8s) -- [Launchpad](https://docs.graphops.xyz/launchpad/intro) is a toolkit for running a Graph Protocol Indexer on Kubernetes maintained by GraphOps. It provides a set of Helm charts and a CLI to manage a Graph Node deployment. +- İndeksleyici github deposunda bir [Kubernetes referansı örneği](https://github.com/graphprotocol/indexer/tree/main/k8s) bulunmaktadır +- [Launchpad](https://docs.graphops.xyz/launchpad/intro), GraphOps tarafından yönetilen Kubernetes üzerinde Graph Protokol indeksleyicisi çalıştırmak için kullanılan bir araç setidir. Graph Düğümü dağıtımını yönetmek için bir dizi Helm şeması ve bir CLI sağlar. -### Managing Graph Node +### Graph Düğümü Yönetimi -Given a running Graph Node (or Graph Nodes!), the challenge is then to manage deployed subgraphs across those nodes. Graph Node surfaces a range of tools to help with managing subgraphs. +Çalışan bir Graph Düğümüne (veya Graph Düğümlerine) sahip olunduktan sonra, dağıtılan subgraplar'ın bu düğümler üzerinde yönetilmesi zorluğu ortaya çıkar. Subgraphlar'ı yönetmeye yardımcı olmak için Graph Düğümü, bir dizi araç sunar. -#### Logging +#### Kayıt tutma -Graph Node's logs can provide useful information for debugging and optimisation of Graph Node and specific subgraphs. Graph Node supports different log levels via the `GRAPH_LOG` environment variable, with the following levels: error, warn, info, debug or trace. +Graph Düğümü'nün kayıtları, Graph Düğümü ve belirli subgraphlar'ın hata ayıklanması ve optimizasyonu için faydalı bilgiler sağlayabilir. Graph Düğümü, `GRAPH_LOG` ortam değişkeni aracılığıyla farklı kayıt seviyelerini destekler ve şu seviyeleri içerir: error, warn, info, debug veya trace. -In addition setting `GRAPH_LOG_QUERY_TIMING` to `gql` provides more details about how GraphQL queries are running (though this will generate a large volume of logs). +Ek olarak, `GRAPH_LOG_QUERY_TIMING` `gql` olarak ayarlanması GraphQL sorgularının nasıl çalıştığı hakkında daha fazla ayrıntı sağlar (ancak bu, büyük bir kayıt hacmi oluşturacaktır). -#### Monitoring & alerting +#### Görüntüleme & uyarma -Graph Node provides the metrics via Prometheus endpoint on 8040 port by default. Grafana can then be used to visualise these metrics. +Graph Düğümü, varsayılan olarak 8040 port'undaki Prometheus uç noktası aracılığıyla metrikleri sağlar. Ardından Grafana, bu metrikleri görselleştirmek için kullanılabilir. -The indexer repository provides an [example Grafana configuration](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml). +İndeksleyici github deposu [Grafana yapılandırmasına bir örnek](https://github.com/graphprotocol/indexer/blob/main/k8s/base/grafana.yaml) sağlar. #### Graphman -`graphman` is a maintenance tool for Graph Node, helping with diagnosis and resolution of different day-to-day and exceptional tasks. +`graphman`, Graph Düğümü'nün bakım aracıdır ve farklı günlük görevlerinin teşhis ve çözümüne yardımcı olur. -The graphman command is included in the official containers, and you can docker exec into your graph-node container to run it. It requires a `config.toml` file. +Graphman komutu, resmi konteynerlara dahil edilmiştir ve graph-düğümü konteynerınıza docker exec ile girerek çalıştırabilirsiniz. Bu bir `config.toml` dosyasına ihtiyaç duyar. -Full documentation of `graphman` commands is available in the Graph Node repository. See \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) in the Graph Node `/docs` +`graphman` komutlarının tam belgeleri Graph Düğümü github deposunda mevcuttur. Graph Düğümü `/docs`'da bulunan \[/docs/graphman.md\] (https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md) bağlantısına bakın -### Working with subgraphs +### Subgraphlarla çalışma -#### Indexing status API +#### İndeksleme durum API'si -Available on port 8030/graphql by default, the indexing status API exposes a range of methods for checking indexing status for different subgraphs, checking proofs of indexing, inspecting subgraph features and more. +Varsayılan olarak 8030/graphql port'unda mevcut olan indeksleme durumu API'si, farklı subgraphlar için indeksleme durumunu ve ispatlarını kontrol etmek, subgraph özelliklerini incelemek ve daha fazlasını yapmak için çeşitli yöntemler sunar. -The full schema is available [here](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql). +Tam şema [burada](https://github.com/graphprotocol/graph-node/blob/master/server/index-node/src/schema.graphql) mevcut. -#### Indexing performance +#### Endeksleme performansı -There are three separate parts of the indexing process: +İndeksleme sürecinin üç ayrı parçası bulunmaktadır: -- Fetching events of interest from the provider -- Processing events in order with the appropriate handlers (this can involve calling the chain for state, and fetching data from the store) -- Writing the resulting data to the store +- Sağlayıcıdan ilgili olayları getirme +- Uygun işleyicilerle sırayla olayları işleme (bu, durumu sormak için zincire çağrı yapmayı ve depodan veri getirmeyi içerebilir) +- Elde edilen verileri depoya yazma -These stages are pipelined (i.e. they can be executed in parallel), but they are dependent on one another. Where subgraphs are slow to index, the underlying cause will depend on the specific subgraph. +Bu aşamalar boru hattında (yani eşzamanlı olarak yürütülebilir), ancak birbirlerine bağımlıdırlar. Subgraphlar'ın indekslenmesi yavaş olduğunda, bunun altındaki neden spesifik subgraphlar'a bağlı olacaktır. -Common causes of indexing slowness: +İndeksleme yavaşlığının yaygın nedenleri: -- Time taken to find relevant events from the chain (call handlers in particular can be slow, given the reliance on `trace_filter`) -- Making large numbers of `eth_calls` as part of handlers -- A large amount of store interaction during execution -- A large amount of data to save to the store -- A large number of events to process -- Slow database connection time, for crowded nodes -- The provider itself falling behind the chain head -- Slowness in fetching new receipts at the chain head from the provider +- Zincirdeki ilgili olayları bulmak için geçen süre (özellikle çağrı yönlendiricileri, `trace_filter`'a bağımlı oldukları için yavaş olabilir) +- İşleyicilerin bir parçası olarak çok fazla `eth_calls` yapmak +- Yürütme sırasında büyük miktarda depolama etkileşimi +- Depoya kaydedilecek büyük miktarda veri +- İşlenecek büyük miktarda olay +- Kalabalık düğümler için yavaş veritabanı bağlantı süresi +- Sağlayıcının zincir başından geriye düşmesi +- Sağlayıcıdan zincir başındaki yeni makbuzların alınmasındaki yavaşlık -Subgraph indexing metrics can help diagnose the root cause of indexing slowness. In some cases, the problem lies with the subgraph itself, but in others, improved network providers, reduced database contention and other configuration improvements can markedly improve indexing performance. +Subgraph indeksleme metrikleri, indeksleme yavaşlığının temel nedenini teşhis etmede yardımcı olabilir. Bazı durumlarda, sorun subgraph'ın kendisiyle ilgilidir, ancak diğer durumlarda, geliştirilmiş ağ sağlayıcıları, azaltılmış veritabanı çekişmesi ve diğer yapılandırma iyileştirmeleri indeksleme performansını belirgin şekilde artırabilir. -#### Failed subgraphs +#### Başarısıız subgraphlar -During indexing subgraphs might fail, if they encounter data that is unexpected, some component not working as expected, or if there is some bug in the event handlers or configuration. There are two general types of failure: +İndekslemesi sırasında subgraphlar beklenmedik veri, beklendiği gibi çalışmayan bir bileşen veya olay işleyicilerinde veya yapılandırmada bir hata olması durumunda başarısız olabilir. İki genel başarısızlık türü mevcuttur: -- Deterministic failures: these are failures which will not be resolved with retries -- Non-deterministic failures: these might be down to issues with the provider, or some unexpected Graph Node error. When a non-deterministic failure occurs, Graph Node will retry the failing handlers, backing off over time. +- Deterministik başarısızlıklar: Bu, yeniden denemelerle çözülmeyecek hatalardır +- Deterministik olmayan başarısızlıklar: Bunlar, sağlayıcının sorunları veya beklenmedik bir Graph Düğüm hatası gibi nedenlere bağlı olabilir. Deterministik olmayan bir başarısızlık meydana geldiğinde Graph Düğümü, başarısız olan işleyicileri yeniden deneyecek ve zamanla geri çekilecektir. -In some cases a failure might be resolvable by the indexer (for example if the error is a result of not having the right kind of provider, adding the required provider will allow indexing to continue). However in others, a change in the subgraph code is required. +Bazı durumlarda, başarısızlık indeksleyici tarafından çözülebilir (örneğin, hatanın doğru türde sağlayıcıya sahip olmamasından kaynaklanması durumunda, gerekli sağlayıcı eklenirse indeksleme devam ettirilebilir). Ancak diğer durumlarda, subgraph kodunda bir değişiklik gereklidir. -> Deterministic failures are considered "final", with a Proof of Indexing generated for the failing block, while non-determinstic failures are not, as the subgraph may manage to "unfail" and continue indexing. In some cases, the non-deterministic label is incorrect, and the subgraph will never overcome the error; such failures should be reported as issues on the Graph Node repository. +> Belirleyici başarısızlıklar, başarısız blok için oluşturulan İndeksleme Kanıtı ile "final" olarak kabul edilirken, deterministik olmayan başarısızlıklar subgraph'ın "unfail"i idare edip indekslememye devam edebileceğinden "final" olarak kabul edilmez. Bazı durumlarda, deterministik olmayan etiketi yanlış olabilir ve subgraph hatayı asla aşamayabilir. Bu tür başarısızlıklar, Graph Düğümü github deposunda bir sorun olarak bildirilmelidir. -#### Block and call cache +#### Blok ve çağrı önbelleği -Graph Node caches certain data in the store in order to save refetching from the provider. Blocks are cached, as are the results of `eth_calls` (the latter being cached as of a specific block). This caching can dramatically increase indexing speed during "resyncing" of a slightly altered subgraph. +Graph Düğümü, sağlayıcıdan tekrar alma işlemini kaydetmek için depoda belirli verileri önbelleğe alır. Bloklar ve `eth_calls` sonuçları önbelleğe alınır (bu sonuncusu belirli bir bloktan itibaren önbelleğe alınır). Bu önbellekleme, azca değiştirilmiş bir subgraph'ın "yeniden senkronizasyonu" sırasında indeksleme hızını büyük ölçüde artırabilir. -However, in some instances, if an Ethereum node has provided incorrect data for some period, that can make its way into the cache, leading to incorrect data or failed subgraphs. In this case indexers can use `graphman` to clear the poisoned cache, and then rewind the affected subgraphs, which will then fetch fresh data from the (hopefully) healthy provider. +Ancak bazı örneklerde, Ethereum düğümü bir süre boyunca yanlış veri sağlamışsa, bu önbelleğe girebilir ve yanlış verilere veya subgraphlar'ın başarısız olmasına neden olabilir. Bu durumda, indeksleyiciler zehirlenmiş önbelleği temizlemek için `graphman` kullanabilir ve etkilenen subgraphlar'ı geri sarabilir, böylece (umarız) sağlıklı sağlayıcıdan temiz verileri alabilirler. -If a block cache inconsistency is suspected, such as a tx receipt missing event: +Örneğin tx makbuzu etkinlik eksikliği gibi bir blok önbellek tutarsızlığı şüphesi varsa: -1. `graphman chain list` to find the chain name. -2. `graphman chain check-blocks by-number ` will check if the cached block matches the provider, and deletes the block from the cache if it doesn’t. - 1. If there is a difference, it may be safer to truncate the whole cache with `graphman chain truncate `. - 2. If the block matches the provider, then the issue can be debugged directly against the provider. +1. zincir ismini bulmak için `graphman chain list`. +2. `graphman chain check-blocks by-number `, önbelleğe alınan bloğun sağlayıcıyla eşleşip eşleşmediğini kontrol edecek ve eşleşmiyorsa bloğu önbellekten silecek. + 1. Bir fark varsa, tüm önbelleği `graphman chain truncate ` ile kesmek daha güvenli olabilir. + 2. Blok sağlayıcıyla eşleşirse, sorun doğrudan sağlayıcıya karşı hata ayıklanabilir. -#### Querying issues and errors +#### Sorgulama sorunları ve hataları -Once a subgraph has been indexed, indexers can expect to serve queries via the subgraph's dedicated query endpoint. If the indexer is hoping to serve significant query volume, a dedicated query node is recommended, and in case of very high query volumes, indexers may want to configure replica shards so that queries don't impact the indexing process. +Bir subgraph indekslendikten sonra, indeksleyiciler subgraph'ın ayrılmış sorgu son noktası aracılığıyla sorguları sunmayı bekleyebilirler. İndeksleyiciler önemli sorgu hacmi sunmayı umuyorlarsa, bunun için ayrılmış bir sorgu düğümü önerilir ve çok yüksek sorgu hacimleri durumunda indeksleyiciler sorguların indeksleme sürecini etkilememesi için replika shardlar yapılandırmak isteyebilirler. -However, even with a dedicated query node and replicas, certain queries can take a long time to execute, and in some cases increase memory usage and negatively impact the query time for other users. +Bununla birlikte, özel bir sorgu düğümü ve replikalarda bile, belirli sorguların yürütülmesi uzun zaman alabilir, bazı durumlarda bellek kullanımını artırabilir ve diğer kullanıcılar için sorgu süresini olumsuz etkileyebilir. -There is not one "silver bullet", but a range of tools for preventing, diagnosing and dealing with slow queries. +Tek bir "sihirli çözüm" yoktur, ancak yavaş sorguların önlenmesi, teşhisi ve işlenmesi için bir dizi araç bulunmaktadır. -##### Query caching +##### Sorgu önbellekleme -Graph Node caches GraphQL queries by default, which can significantly reduce database load. This can be further configured with the `GRAPH_QUERY_CACHE_BLOCKS` and `GRAPH_QUERY_CACHE_MAX_MEM` settings - read more [here](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching). +Graph Düğümü, varsayılan olarak GraphQL sorgularını önbelleğe alarak veritabanı yükünü önemli ölçüde azaltabilir. Bu, `GRAPH_QUERY_CACHE_BLOCKS` ve `GRAPH_QUERY_CACHE_MAX_MEM` ayarları ile daha da yapılandırılabilir - [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/environment-variables.md#graphql-caching) daha fazla bilgi edinin. -##### Analysing queries +##### Sorguların analizi -Problematic queries most often surface in one of two ways. In some cases, users themselves report that a given query is slow. In that case the challenge is to diagnose the reason for the slowness - whether it is a general issue, or specific to that subgraph or query. And then of course to resolve it, if possible. +Sorunlu sorgular genellikle iki şekilde ortaya çıkar. Bazı durumlarda, kullanıcılar kendileri belirli bir sorgunun yavaş olduğunu bildirirler. Bu durumda zorluk, yavaşlığın nedenini teşhis etmektir - genel bir sorun mu, yoksa subgraph'a veya sorguya özgü mü olduğunu belirlemek ve tabii ki mümkünse sonra çözmek olacaktır. -In other cases, the trigger might be high memory usage on a query node, in which case the challenge is first to identify the query causing the issue. +Diğer durumlarda, tetikleyici sorgu düğümündee yüksek bellek kullanımı olabilir, bu durumda zorluk ilk olarak soruna neden olan sorguyu belirlemektir. -Indexers can use [qlog](https://github.com/graphprotocol/qlog/) to process and summarize Graph Node's query logs. `GRAPH_LOG_QUERY_TIMING` can also be enabled to help identify and debug slow queries. +İndeksleyiciler [qlog](https://github.com/graphprotocol/qlog/) kullanarak Graph Düğümü'nün sorgu kayıtlarını işleyebilir ve özetleyebilir. Ayrıca `GRAPH_LOG_QUERY_TIMING` yavaş sorguların tanımlamak ve ayıklamak için etkinleştirilebilir. -Given a slow query, indexers have a few options. Of course they can alter their cost model, to significantly increase the cost of sending the problematic query. This may result in a reduction in the frequency of that query. However this often doesn't resolve the root cause of the issue. +Yavaş bir sorgu verildiğinde, indeksleyicilerin birkaç seçeneği vardır. Tabii ki, sorunlu sorgunun gönderilme maliyetini önemli ölçüde artırmak için maliyet modelini değiştirebilirler. Bu, o sorgunun sıklığında azalmaya neden olabilir. Ancak, genellikle sorunun temek nedenini çözmez. -##### Account-like optimisation +##### Hesabımsı optimizasyon -Database tables that store entities seem to generally come in two varieties: 'transaction-like', where entities, once created, are never updated, i.e., they store something akin to a list of financial transactions, and 'account-like' where entities are updated very often, i.e., they store something like financial accounts that get modified every time a transaction is recorded. Account-like tables are characterized by the fact that they contain a large number of entity versions, but relatively few distinct entities. Often, in such tables the number of distinct entities is 1% of the total number of rows (entity versions) +Varlıkları depolayan veritabanı tablolarının genellikle iki çeşit olduğu görünmektedir: oluşturulduktan sonra hiçbir zaman güncellenmeyen mesela finansal işlemler listesine benzer şeyler saklayan olan 'işlemimsi' ve varlıkların çok sık güncellendiği, mesela her işlem kaydedildiğinde değiştirilen finansal hesaplar gibi şeyler saklayan 'hesabımsı'. Hesabımsı tablolar, birçok varlık sürümünü içermelerine rağmen, nispeten az sayıda farklı varlığa sahip olmasıyla bilinir. Çoğu durumda, böyle bir tabloda farklı varlık sayısı, toplam satır (varlık sürümleri) sayısının %1'ine eşittir -For account-like tables, `graph-node` can generate queries that take advantage of details of how Postgres ends up storing data with such a high rate of change, namely that all of the versions for recent blocks are in a small subsection of the overall storage for such a table. +Hesabımsı tablolar için, `graph-node`, Postgres'in verileri nasıl bu kadar yüksek bir değişim oranıyla depolamaya başladığına dair ayrıntılardan yararlanan sorgular oluşturabilir, yani böyle bir tablo için son blokların tüm sürümleri genel depolamanın küçük bir alt bölümünde yer alır. -The command `graphman stats show shows, for each entity type/table in a deployment, how many distinct entities, and how many entity versions each table contains. That data is based on Postgres-internal estimates, and is therefore necessarily imprecise, and can be off by an order of magnitude. A `-1` in the `entities` column means that Postgres believes that all rows contain a distinct entity. +`graphman stats show komutu, bir dağıtımdaki her varlık türü/tablosu için kaç farklı varlık ve her tablonun kaç varlık sürümü içerdiğini gösterir. Bu veriler Postgres dahili tahminlerine dayanır ve bu nedenle doğruluğu kesin olmayabilir ve bir büyüklük sırasına nazaran yanıltıcı olabilir. `entities` sütununda `-1`, Postgres'un tüm satırların farklı bir varlık içerdiğine inandığını gösterir. -In general, tables where the number of distinct entities are less than 1% of the total number of rows/entity versions are good candidates for the account-like optimization. When the output of `graphman stats show` indicates that a table might benefit from this optimization, running `graphman stats show
    ` will perform a full count of the table - that can be slow, but gives a precise measure of the ratio of distinct entities to overall entity versions. +Genel olarak, farklı varlıkların sayısı toplam satır/varlık sürümü sayısının %1'inden az olan tablolar, hesap-tablo optimizasyonu için iyi adaylardır. `graphman stats show` çıktısı, bir tablonun bu optimizasyondan faydalanabileceğini gösteriyorsa, `graphman stats show
    `'ı çalıştırmak, tablonun tam sayımını gerçekleştirir. - bu yavaş olabilir, ancak farklı varlıkların toplam varlık sürümlerine oranı kesin bir ölçüdür. -Once a table has been determined to be account-like, running `graphman stats account-like .
    ` will turn on the account-like optimization for queries against that table. The optimization can be turned off again with `graphman stats account-like --clear .
    ` It takes up to 5 minutes for query nodes to notice that the optimization has been turned on or off. After turning the optimization on, it is necessary to verify that the change does not in fact make queries slower for that table. If you have configured Grafana to monitor Postgres, slow queries would show up in `pg_stat_activity`in large numbers, taking several seconds. In that case, the optimization needs to be turned off again. +Bir tablonun hesabımsı olduğu belirlendikten sonra `graphman stats account-like .
    `'ı çalıştırmak, bu tabloya karşı sorgular için hesabımsı optimizasyonu etkinleştirecektir. Optimizasyon, `graphman stats account-like --clear .
    ` ile tekrar kapatılabilir. Optimizasyonun etkinleştirildiğinin veya kapatıldığının sorgu düğümleri tarafından fark edilmesi için en fazla 5 dakika beklemek gereklidir. Optimizasyonu açtıktan sonra, değişikliğin söz konusu tablo için sorguları daha yavaş hale getirmediğinden emin olmak için doğrulama yapılması gerekir. Postgres'i izlemek için Grafana'yı yapılandırdıysanız, yavaş sorgular `pg_stat_activity` bölümünde büyük sayılarda ve birkaç saniye süren işlemler şeklinde görünecektir. Bu durumda, optimizasyon tekrar kapatılmalıdır. -For Uniswap-like subgraphs, the `pair` and `token` tables are prime candidates for this optimization, and can have a dramatic effect on database load. +Uniswapımsı subgraplar için, çift (`pair`) ve `token` tabloları bu optimizasyon için en uygun adaylardır ve veritabanı yükü üzerinde etkili bir etkiye sahip olabilirler. -#### Removing subgraphs +#### Subgraphları kaldırma -> This is new functionality, which will be available in Graph Node 0.29.x +> Bu, Graph Node 0.29.x sürümünde kullanılabilir olan yeni bir fonksiyonelliktir -At some point an indexer might want to remove a given subgraph. This can be easily done via `graphman drop`, which deletes a deployment and all it's indexed data. The deployment can be specified as either a subgraph name, an IPFS hash `Qm..`, or the database namespace `sgdNNN`. Further documentation is available [here](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop). +Bir noktada indeksleyici, belirli bir subgraph'ı kaldırmak isteyebilir. Bu, tüm indekslenmiş verileri ve bir dağıtımı silen `graphman drop` komutuyla kolayca gerçekleştirilebilir. Dağıtım, subgraph adı, bir IPFS hash `Qm..` veya veritabanı ad alanı `sgdNNN` olarak belirtilebilir. Daha fazla belgeye [buradan](https://github.com/graphprotocol/graph-node/blob/master/docs/graphman.md#-drop) erişilebilir. diff --git a/website/pages/tr/publishing/publishing-a-subgraph.mdx b/website/pages/tr/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..b217d55b13bd 100644 --- a/website/pages/tr/publishing/publishing-a-subgraph.mdx +++ b/website/pages/tr/publishing/publishing-a-subgraph.mdx @@ -1,33 +1,33 @@ --- -title: Publishing a Subgraph to the Decentralized Network +title: Merkeziyetsiz Ağa Bir Subgraph Yayınlamak --- -Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deploying-a-subgraph-to-studio), you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. +Subgraph'iniz [Subgraph Studio'ya deploy edildikten](/deploying/deploying-a-subgraph-to-studio) sonra, onu test edip üretime sokmaya hazırsınız, ardından merkeziyetsiz ağda yayınlayabilirsiniz. -Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. +Merkeziyetsiz ağda bir subgraph yayınlamak, [küratörlerin](/network/curating) ona sinyal göndermeye ve [indeksleyicilerin](/network/indexing) onu indekslemeye başlamasını sağlar. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + -You can find the list of the supported networks [Here](/developing/supported-networks). +Desteklenen ağların listesini [burada](/developing/supported-networks) bulabilirsiniz. -## Publishing a subgraph +## Bir subgraph yayınlamak -Subgraphs can be published to the decentralized network directly from the Subgraph Studio dashboard by clicking on the **Publish** button. Once a subgraph is published, it will be available to view in the [Graph Explorer](https://thegraph.com/explorer/). +Subgraph'ler, **yayınla** düğmesi tıklanarak doğrudan Subgraph Studio panosundan merkeziyetsiz ağda yayınlanabilir. Bir subgraph yayınlandıktan sonra, [Graph Gezgini](https://thegraph.com/explorer/)'nde görüntülenebilecektir. -- Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. +- Subgraph'ler Goerli, Arbitrum Goerli, Arbitrum One veya Ethereum mainnet'te yayınlanabilir. -- Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). +- Subgraph'in yayınlandığı ağdan bağımsız olarak, herhangi bir [desteklenen ağlar](/developing/supported-networks) üzerindeki verileri indeksleyebilir. -- When publishing a new version for an existing subgraph the same rules apply as above. +- Mevcut bir subgraph için yeni bir sürüm yayınlarken, yukarıdakiyle aynı kurallar geçerlidir. -## Curating your subgraph +## Subgraph'ınızın küratörlüğünü yapma -> It is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. +> 10.000 GRT ile kendi subgraph'inize sinyal göndermeniz, mümkün olan en kısa sürede indekslenmesini ve sorgulanmaya hazır olmasını sağlamanız önerilir. -Subgraph Studio enables you to be the first to curate your subgraph by adding GRT to your subgraph's curation pool in the same transaction. When publishing your subgraph, make sure to check the button that says, "Be the first to signal on this subgraph." +Subgraph Studio, aynı işlemde subgraph kürasyon havuzuna GRT ekleyerek subgraph'inize ilk sinyal gönderen kişi olmanızı sağlar. Subgraph'inizi yayınlarken, "Bu subgraph'te ilk sinyal veren siz olun" yazan düğmeyi kontrol ettiğinizden emin olun. -![Curation Pool](/img/curate-own-subgraph-tx.png) +![Kürasyon Havuzu](/img/curate-own-subgraph-tx.png) -## Updating metadata for a published subgraph +## Yayınlanan bir subgraph için üst veri güncelleme -Once your subgraph has been published to the decentralized network, you can modify the metadata at any time by making the update in the Subgraph Studio dashboard of the subgraph. After saving the changes and publishing your updates to the network, they will be reflected in The Graph Explorer. This won’t create a new version, as your deployment hasn’t changed. +Subgraph'iniz merkeziyetsiz ağda yayınlandıktan sonra, Subgraph Studio kontrol panelinde güncelleme yaparak istediğiniz zaman meta verileri değiştirebilirsiniz. Değişiklikleri kaydettikten ve güncellemelerinizi ağda yayınladıktan sonra, bunlar Graph Gezgini'ne yansıtılacaktır. Konuşlandırmanız değişmediğinden dolayı, bu yeni bir sürüm oluşturmaz. diff --git a/website/pages/tr/querying/distributed-systems.mdx b/website/pages/tr/querying/distributed-systems.mdx index 85337206bfd3..2116698d38ed 100644 --- a/website/pages/tr/querying/distributed-systems.mdx +++ b/website/pages/tr/querying/distributed-systems.mdx @@ -1,37 +1,37 @@ --- -title: Distributed Systems +title: Dağıtık Sistemler --- -The Graph is a protocol implemented as a distributed system. +Graph, dağıtık bir sistem olarak uygulanan bir protokoldür. -Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. +Bağlantılar başarısız. İstekler sıra dışı geliyor. Saatleri ve durumları senkronize olmayan farklı bilgisayarlar, ilgili istekleri işler. Yeniden başlatılan sunucular. İstekler arasında gerçekleşen yeniden düzenlemeler. Bu sorunlar, tüm dağıtık sistemlerin doğasında vardır, ancak küresel ölçekte çalışan sistemlerde daha da kötüleşir. -Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. +Bir müşteri, bir yeniden düzenleme sırasında en son veriler için bir indeksleyiciyi yoklarsa neler olabileceğine dair bu örneği düşünün. -1. Indexer ingests block 8 -2. Request served to the client for block 8 -3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +1. İndeksleyici blok 8'i alır +2. İstemciye blok 8 için sunulan istek +3. İndeksleyici blok 9'u alır +4. İndeksleyici blok 10A'yı alır +5. İstemciye blok 10A için sunulan istek +6. Indexer, 10B'ye yeniden düzenlemeyi algılar ve 10A'yı geri alır +7. İstemciye blok 9 için sunulan istek +8. İndeksleyici blok 10B'yi alır +9. İndeksleyici blok 11'i alır +10. İstemciye blok 11 için sunulan istek -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +İndeksleyici açısından bakıldığında, işler mantıksal olarak ileriye doğru ilerliyor. Zaman ilerliyor, ancak bir amca bloğunu geri almamız ve konsensüs altındaki bloğu bunun üzerine oynamamız gerekti. Yol boyunca indeksleyici, o sırada bildiği en son durumu kullanarak isteklere hizmet eder. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +Bununla birlikte, müşterinin bakış açısından, işler kaotik görünüyor. Müşteri, yanıtların sırasıyla 8, 10, 9 ve 11. bloklar için olduğunu gözlemler. Buna "blok yalpalama" sorunu diyoruz. Bir müşteri blok yalpalaması yaşadığında, veriler zaman içinde kendisiyle çelişiyor gibi görünebilir. İndeksleyicilerin hepsinin en son blokları aynı anda almadığını ve isteklerinizin birden çok indeksleyiciye yönlendirilebileceğini düşündüğümüzde durum daha da kötüleşir. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +Kullanıcıya tutarlı veriler sağlamak için birlikte çalışmak istemci ve sunucunun sorumluluğundadır. Her problem için doğru program olmadığı için istenilen tutarlılığa bağlı olarak farklı yaklaşımlar kullanılmalıdır. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Dağıtık sistemlerin sonuçları üzerinden akıl yürütmek zordur, ancak düzeltme olmayabilir! Bazı yaygın kullanım durumlarında gezinmenize yardımcı olacak API'ler ve modeller oluşturduk. Aşağıdaki örnekler, bu kalıpları göstermektedir, ancak ana fikirleri karıştırmamak için üretim kodunun gerektirdiği ayrıntıları (hata işleme ve iptal etme gibi) yine de atlamaktadır. -## Polling for updated data +## Güncellenmiş veriler için yoklama -The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. +Graph, yanıtın `$minBlock`'a eşit veya daha yüksek tek bir blok için olmasını sağlayan `block: { number_gte: $minBlock }` API'ını sağlar. İstek bir `graph-node` örneğine yapılırsa ve minimum blok henüz senkronize edilmemişse, `graph-node` bir hata döndürür. `graph-node`, minimum bloğunu senkronize ettiyse, yanıtı en son blok için çalıştıracaktır. Talep bir Edge & Node ağ geçicidine yapılırsa, ağ geçidi minimum bloğunu henüz senkronize etmemiş tüm indeksleyicileri filtreleyecek ve indeksleyicinin senkronize ettiği en son blok için talepte bulunacaktır. -We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: +Bir döngüdeki verileri yoklarken zamanın asla geriye gitmemesini sağlamak için `number_gte`'i kullanabiliriz. İşte bir örnek: ```javascript /// Updates the protocol.paused variable to the latest @@ -74,11 +74,11 @@ async function updateProtocolPaused() { } ``` -## Fetching a set of related items +## Bir dizi ilgili öğe getiriliyor -Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. +Başka bir kullanım durumu, büyük bir kümeyi almak veya daha genel olarak birden çok istekte ilgili öğeleri almaktır. Yoklama durumundan farklı olarak (istenen tutarlılığın zamanda ilerlemek olduğu), istenen tutarlılık tek bir zaman noktası içindir. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Burada, tüm sonuçlarımızı aynı bloğa sabitlemek için `block: { hash: $blockHash }` argümanını kullanacağız. ```javascript /// Gets a list of domain names from a single block using pagination @@ -131,4 +131,4 @@ async function getDomainNames() { } ``` -Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. +Yeniden düzenleme durumunda, müşterinin blok hash'ini non-uncle bir bloğa güncellemek için ilk istekten yeniden denemesi gerekeceğini unutmayın. diff --git a/website/pages/tr/querying/graphql-api.mdx b/website/pages/tr/querying/graphql-api.mdx index 89cda460d58f..4e817f0e579d 100644 --- a/website/pages/tr/querying/graphql-api.mdx +++ b/website/pages/tr/querying/graphql-api.mdx @@ -2,15 +2,15 @@ title: GraphQL API --- -This guide explains the GraphQL Query API that is used for the Graph Protocol. +Bu kılavuz, Graph Protokolü için kullanılan GraphQL sorgu API'ını açıklar. -## Queries +## Sorgular -In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. +Subgraph şemanızda `Entities` adlı türleri tanımlarsınız. Her `Entity` türü için, üst düzey `Query` türünde bir `entity` ve `entities` alanı oluşturulur. Graph kullanılırken `query` öğesinin `graphql` sorgusunun en üstüne eklenmesi gerekmediğini unutmayın. -### Examples +### Örnekler -Query for a single `Token` entity defined in your schema: +Şemanızda tanımlanan tek bir `token` varlığını sorgulayın: ```graphql { @@ -21,9 +21,9 @@ Query for a single `Token` entity defined in your schema: } ``` -> **Note:** When querying for a single entity, the `id` field is required, and it must be a string. +> **Not:** Tek bir varlık için sorgulama yaparken `id` alanı zorunludur ve bir dize olmalıdır. -Query all `Token` entities: +Tüm `Token` varlıklarını sorgulayın: ```graphql { @@ -34,11 +34,11 @@ Query all `Token` entities: } ``` -### Sorting +### Sıralama -When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. +Bir koleksiyonu sorgularken, belirli bir niteliğe göre sıralamak için `orderBy` parametresi kullanılabilir. Ayrıca, sıralama yönünü belirtmek için `orderDirection` kullanılabilir; artan için `asc` veya azalan için `desc`. -#### Example +#### Örnek ```graphql { @@ -49,11 +49,11 @@ When querying a collection, the `orderBy` parameter may be used to sort by a spe } ``` -#### Example for nested entity sorting +#### İç içe varlık sıralaması için örnek -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) entities can be sorted on the basis of nested entities. +Graph Düğümü [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0)'dan itibaren varlıklar iç içe geçmiş varlıklar bazında sıralanabilir. -In the following example, we sort the tokens by the name of their owner: +Aşağıdaki örnekte, tokenleri sahiplerinin adına göre sıralıyoruz: ```graphql { @@ -66,19 +66,19 @@ In the following example, we sort the tokens by the name of their owner: } ``` -> Currently, you can sort by one-level deep `String` or `ID` types on `@entity` and `@derivedFrom` fields. Unfortunately, [sorting by interfaces on one level-deep entities](https://github.com/graphprotocol/graph-node/pull/4058), sorting by fields which are arrays and nested entities is not yet supported. +> Şu anda, `@entity` ve `@derivedFrom` alanlarında tek seviye derinliğindeki `String` veya `ID` tiplerine göre sıralama yapabilirsiniz. Ne yazık ki, [tek seviye derinliğindeki varlıklarda arayüzlere göre sıralama](https://github.com/graphprotocol/graph-node/pull/4058), diziler ve iç içe geçmiş varlıklar olan alanlara göre sıralama henüz desteklenmemektedir. -### Pagination +### Sayfalandırma -When querying a collection, the `first` parameter can be used to paginate from the beginning of the collection. It is worth noting that the default sort order is by ID in ascending alphanumeric order, not by creation time. +Bir koleksiyonu sorgularken, koleksiyonun başından itibaren sayfalama yapmak için `first` parametresi kullanılabilir. Varsayılan sıralama düzeninin oluşturma zamanına göre değil, artan alfanümerik düzende ID'ye göre olduğunu belirtmekte fayda var. -Further, the `skip` parameter can be used to skip entities and paginate. e.g. `first:100` shows the first 100 entities and `first:100, skip:100` shows the next 100 entities. +Ayrıca, `skip` parametresi varlıkları atlamak ve sayfalandırmak için kullanılabilir. örn. `first:100` ilk 100 varlığı gösterir ve `first:100, skip:100` sonraki 100 varlığı gösterir. -Queries should avoid using very large `skip` values since they generally perform poorly. For retrieving a large number of items, it is much better to page through entities based on an attribute as shown in the last example. +Sorgular genellikle kötü performans gösterdiğinden çok büyük `skip` değerleri kullanmaktan kaçınmalıdır. Çok sayıda öğeyi almak için, son örnekte gösterildiği gibi bir özniteliğe dayalı olarak varlıklar arasında sayfa açmak çok daha idealdir. -#### Example using `first` +#### `first`'ün kullanımına örnek -Query the first 10 tokens: +İlk 10 tokeni sorgulayın: ```graphql { @@ -89,11 +89,11 @@ Query the first 10 tokens: } ``` -To query for groups of entities in the middle of a collection, the `skip` parameter may be used in conjunction with the `first` parameter to skip a specified number of entities starting at the beginning of the collection. +Bir koleksiyonun ortasındaki varlık gruplarını sorgulamak için `skip` parametresi, koleksiyonun başından başlayarak belirli sayıda varlığı atlamak üzere `first` parametresi ile birlikte kullanılabilir. -#### Example using `first` and `skip` +#### `first` ve `skip`'in kullanımına örnek -Query 10 `Token` entities, offset by 10 places from the beginning of the collection: +10 `Token` varlığını sorgulayın, bunları koleksiyonun başlangıcından itibaren 10 sıra kaydırın: ```graphql { @@ -104,9 +104,9 @@ Query 10 `Token` entities, offset by 10 places from the beginning of the collect } ``` -#### Example using `first` and `id_ge` +#### `first` ve `id_ge`'nin kullanımına örnek -If a client needs to retrieve a large number of entities, it is much more performant to base queries on an attribute and filter by that attribute. For example, a client would retrieve a large number of tokens using this query: +Bir istemcinin çok sayıda varlığı alması gerekiyorsa, sorguları bir niteliğe dayandırmak ve bu niteliğe göre filtrelemek çok daha performanslıdır. Örneğin, bir istemci bu sorguyu kullanarak çok sayıda token alabilir: ```graphql query manyTokens($lastID: String) { @@ -117,15 +117,15 @@ query manyTokens($lastID: String) { } ``` -The first time, it would send the query with `lastID = ""`, and for subsequent requests would set `lastID` to the `id` attribute of the last entity in the previous request. This approach will perform significantly better than using increasing `skip` values. +İlk seferinde, sorguyu `lastID = ""` ile gönderecek ve sonraki istekler için `lastID`'yi önceki istekteki son varlığın `id` niteliğine ayarlayacaktır. Bu yaklaşım, artan `skip` değerleri kullanmaktan önemli ölçüde daha iyi performans gösterecektir. -### Filtering +### Filtreleme -You can use the `where` parameter in your queries to filter for different properties. You can filter on mulltiple values within the `where` parameter. +Sorgularınızda `where` parametresini kullanarak farklı özellikler için filtreleme yapabilirsiniz. `where` parametresi içerisinde birden fazla değer için filtreleme yapabilirsiniz. -#### Example using `where` +#### `where`'in kullanımına örnek -Query challenges with `failed` outcome: +`failed` ile sonuçlanan sorgu zorlukları: ```graphql { @@ -139,9 +139,9 @@ Query challenges with `failed` outcome: } ``` -You can use suffixes like `_gt`, `_lte` for value comparison: +Değer karşılaştırması için `_gt`, `_lte` gibi son ekler kullanabilirsiniz: -#### Example for range filtering +#### Aralık filtreleme için örnek ```graphql { @@ -153,11 +153,11 @@ You can use suffixes like `_gt`, `_lte` for value comparison: } ``` -#### Example for block filtering +#### Blok filtreleme için örnek -You can also filter entities by the `_change_block(number_gte: Int)` - this filters entities which were updated in or after the specified block. +Varlıkları `_change_block(number_gte: Int)` ile de filtreleyebilirsiniz. Bu, belirtilen blok içinde veya sonrasında güncellenen varlıkları filtreler. -This can be useful if you are looking to fetch only entities which have changed, for example since the last time you polled. Or alternatively it can be useful to investigate or debug how entities are changing in your subgraph (if combined with a block filter, you can isolate only entities that changed in a specific block). +Örneğin bu, son yoklamanızdan bu yana yalnızca değişen varlıkları almak istiyorsanız yararlı olabilir. Ya da alternatif olarak, subgraph'ınızda varlıkların nasıl değiştiğini araştırmak veya hata ayıklamak için yararlı olabilir (bir blok filtresiyle birleştirilirse, yalnızca belirli bir blokta değişen varlıkları izole edebilirsiniz). ```graphql { @@ -169,11 +169,11 @@ This can be useful if you are looking to fetch only entities which have changed, } ``` -#### Example for nested entity filtering +#### İç içe varlık filtreleme örneği -Filtering on the basis of nested entities is possible in the fields with the `_` suffix. +İç içe geçmiş varlıklar temelinde filtreleme, `_` son ekine sahip alanlarda mümkündür. -This can be useful if you are looking to fetch only entities whose child-level entities meet the provided conditions. +Bu, yalnızca alt düzey varlıkları sağlanan koşulları karşılayan varlıkları getirmek istiyorsanız yararlı olabilir. ```graphql { @@ -187,13 +187,13 @@ This can be useful if you are looking to fetch only entities whose child-level e } ``` -#### Logical operators +#### Mantıksal operatörler -As of Graph Node [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) you can group multiple parameters in the same `where` argument using the `and` or the `or` operators to filter results based on more than one criteria. +Graph Düğümü'nün [`v0.30.0`](https://github.com/graphprotocol/graph-node/releases/tag/v0.30.0) sürümüne göre, birden fazla parametreyi aynı `where` argümanında gruplayabilirsiniz. Bu, sonuçları birden fazla kritere göre filtrelemek için `and` veya `or` operatörlerini kullanmanıza olanak tanır. -##### `AND` Operator +##### `AND` Operatörü -In the following example, we are filtering for challenges with `outcome` `succeeded` and `number` greater than or equal to `100`. +Aşağıdaki örnekte, `outcome` değeri `succeeded` olan ve `number` değeri `100`'den büyük veya buna eşit olan zorlukları filtreliyoruz. ```graphql { @@ -207,7 +207,7 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Syntactic sugar:** You can simplify the above query by removing the `and` operator by passing a sub-expression separated by commas. +> **Syntactic sugar:** Yukarıdaki sorguyu, virgülle ayrılmış bir alt ifade geçirerek, `and` operatörünü kaldırarak basitleştirebilirsiniz. > > ```graphql > { @@ -221,9 +221,9 @@ In the following example, we are filtering for challenges with `outcome` `succee > } > ``` -##### `OR` Operator +##### `OR` Operatörü -In the following example, we are filtering for challenges with `outcome` `succeeded` or `number` greater than or equal to `100`. +Aşağıdaki örnekte, `outcome` değeri `succeeded` olan veya `number` değeri `100` yada daha büyük olan zorlukları filtreliyoruz. ```graphql { @@ -237,11 +237,11 @@ In the following example, we are filtering for challenges with `outcome` `succee } ``` -> **Note**: When constructing queries, it is important to consider the performance impact of using the `or` operator. While `or` can be a useful tool for broadening search results, it can also have significant costs. One of the main issues with `or` is that it can cause queries to slow down. This is because `or` requires the database to scan through multiple indexes, which can be a time-consuming process. To avoid these issues, it is recommended that developers use and operators instead of or whenever possible. This allows for more precise filtering and can lead to faster, more accurate queries. +> **Not**: Sorguları oluştururken, `or` operatörünü kullanmanın performans üzerindeki etkisini göz önünde bulundurmak önemlidir. `or` arama sonuçlarını genişletmek için yararlı bir araç olsa da, önemli maliyetleri de olabilir. `or` ile ilgili temel sorunlardan biri, sorguların yavaşlamasına neden olabilmesidir. Bunun nedeni, `or` operatörünün veritabanının birden fazla dizini taramasını gerektirmesidir ve bu da zaman alıcı bir işlem olabilir. Bu sorunlardan kaçınmak için, geliştiricilerin mümkün olduğunda or yerine and operatörlerini kullanmaları önerilir. Bu, daha hassas filtreleme sağlar ve daha hızlı, daha doğru sorgulara yol açabilir. -#### All Filters +#### Tüm Filtreler -Full list of parameter suffixes: +Parametre eklerinin tam listesi: ``` _ @@ -266,23 +266,23 @@ _not_ends_with _not_ends_with_nocase ``` -> Please note that some suffixes are only supported for specific types. For example, `Boolean` only supports `_not`, `_in`, and `_not_in`, but `_` is available only for object and interface types. +> Lütfen bazı eklentilerin yalnızca belirli tipler için desteklendiğini unutmayın. Örneğin, `Boolean` yalnızca `_not`, `_in` ve `not_in` desteği sağlar, ancak `_` yalnızca nesne ve arayüz tipleri için kullanılabilir. -In addition, the following global filters are available as part of `where` argument: +Ayrıca, aşağıdaki global filtreler `where` argümanının bir parçası olarak kullanılabilir: ```gr _change_block(number_gte: Int) ``` -### Time-travel queries +### Zaman yolculuğu sorguları -You can query the state of your entities not just for the latest block, which is the default, but also for an arbitrary block in the past. The block at which a query should happen can be specified either by its block number or its block hash by including a `block` argument in the toplevel fields of queries. +Varlıklarınızın durumunu yalnızca varsayılan olan en son blok için değil, aynı zamanda geçmişteki rastgele bir blok için de sorgulayabilirsiniz. Bir sorgunun gerçekleşmesi gereken blok, sorguların üst düzey alanlarına bir `block` bağımsız değişkeni eklenerek blok numarası veya blok karması ile belirtilebilir. -The result of such a query will not change over time, i.e., querying at a certain past block will return the same result no matter when it is executed, with the exception that if you query at a block very close to the head of the chain, the result might change if that block turns out to not be on the main chain and the chain gets reorganized. Once a block can be considered final, the result of the query will not change. +Böyle bir sorgunun sonucu zaman içinde değişmeyecektir, yani belirli bir geçmiş blokta sorgu yapmak, ne zaman yürütülürse yürütülsün aynı sonucu verecektir, ancak zincirin başına çok yakın bir blokta sorgu yaptığınız zaman, bu bloğun ana zincirde olmadığı ortaya çıkarsa ve zincir yeniden düzenlenirse sonuç değişebilir. Bir blok nihai olarak kabul edildiğinde takdirde, sorgunun sonucu değişmeyecektir. -Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. +Lütfen şunu göz önünde bulundurun ki mevcut uygulama hala belirli sınırlamalara tabidir ve bu garantiyi ihlal edebilir. Uygulama her zaman verilen bir blok hash değerinin ana zincirde olup olmadığını ya da henüz kesinleştirilmemiş bir blok için blok hash değeri ile yapılan sorgunun, sorgu ile eş zamanlı olarak gerçekleşen bir blok yeniden düzenlemesi tarafından etkilenebileceğini bilemeyebilir. Ancak bu durum, blok kesinleştirildiğinde ve ana zincirde bulunduğu biliniyorsa, blok hash değeri ile yapılan sorguların sonuçlarını etkilemez. [Bu sorun](https://github.com/graphprotocol/graph-node/issues/1405), bu sınırlamaların ayrıntılarını detaylı bir şekilde açıklamaktadır. -#### Example +#### Örnek ```graphql { @@ -296,9 +296,9 @@ Note that the current implementation is still subject to certain limitations tha } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. +Bu sorgu, 8.000.000 numaralı bloğun işlenmesinden hemen sonra var oldukları şekliyle `Challenge` varlıklarını ve bunlarla ilişkili `Application` varlıklarını döndürür. -#### Example +#### Örnek ```graphql { @@ -312,26 +312,26 @@ This query will return `Challenge` entities, and their associated `Application` } ``` -This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing the block with the given hash. +Bu sorgu, verilen hash ile bloğun işlenmesinden hemen sonra var olan şekliyle `Challenge` varlıklarını ve bunlarla ilişkili `Application` varlıklarını döndürür. -### Fulltext Search Queries +### Tam Metin Arama Sorguları -Fulltext search query fields provide an expressive text search API that can be added to the subgraph schema and customized. Refer to [Defining Fulltext Search Fields](/developing/creating-a-subgraph#defining-fulltext-search-fields) to add fulltext search to your subgraph. +Tam metin arama sorgu alanları, subgraph şemasına eklenebilen ve özelleştirilebilen etkileyici bir metin arama API'si sağlar. Subgraph'ınıza tam metin araması eklemek için [Tam Metin Arama Alanlarını Tanımlama](/developing/creating-a-subgraph#defining-fulltext-search-fields) bölümüne göz atın. -Fulltext search queries have one required field, `text`, for supplying search terms. Several special fulltext operators are available to be used in this `text` search field. +Tam metin arama sorgularının kullanması gereken bir zorunlu alanı vardır, bu alan `text` adını taşır ve arama terimlerini sağlamak için kullanılır. Bu `text` arama alanında kullanılmak üzere birkaç özel tam metin operatörü mevcuttur. -Fulltext search operators: +Tam metin arama operatörleri: -| Symbol | Operator | Description | +| Sembol | Operatör | Tanımlama | | --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| `&` | `And` | Birden fazla arama terimini, sağlanan tüm terimleri içeren varlıklar için bir filtrede birleştirmek için kullanılır | +| | | `Or` | Or, işleciyle ayrılmış birden çok arama terimi içeren sorgular, sağlanan terimlerden herhangi biriyle eşleşen tüm varlıkları döndürür | +| `<->` | `Follow by` | İki kelime arasındaki mesafeyi belirtir. | +| `:*` | `Prefix` | Ön eki eşleşen kelimeleri bulmak için önek arama terimini kullanın (2 karakter gereklidir.) | -#### Examples +#### Örnekler -Using the `or` operator, this query will filter to blog entities with variations of either "anarchism" or "crumpet" in their fulltext fields. +`or` operatörünü kullanan bu sorgu, tam metin alanlarında "anarchism" veya "crumpet" varyasyonları bulunan blog varlıklarını filtreleyecektir. ```graphql { @@ -344,7 +344,7 @@ Using the `or` operator, this query will filter to blog entities with variations } ``` -The `follow by` operator specifies a words a specific distance apart in the fulltext documents. The following query will return all blogs with variations of "decentralize" followed by "philosophy" +`follow by` operatörü, tam metin belgelerinde belirli bir mesafe uzaklıktaki kelimeleri belirtir. Aşağıdaki sorgu "decentralize" ve ardından "philosophy" kelimelerinin geçtiği tüm blogları döndürür ```graphql { @@ -357,7 +357,7 @@ The `follow by` operator specifies a words a specific distance apart in the full } ``` -Combine fulltext operators to make more complex filters. With a pretext search operator combined with a follow by this example query will match all blog entities with words that start with "lou" followed by "music". +Daha karmaşık filtreler oluşturmak için tam metin operatörlerini birleştirin. Bu örnek sorgu, bir pretext arama işleci ile bir follow by işlecini birleştirerek "lou" ile başlayan ve ardından "music" ile devam eden sözcükleri içeren tüm blog varlıklarıyla eşleşecektir. ```graphql { @@ -370,27 +370,27 @@ Combine fulltext operators to make more complex filters. With a pretext search o } ``` -### Validation +### Validasyon -Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. +Graph Düğümü, [graphql-js referans uygulamasını](https://github.com/graphql/graphql-js/tree/main/src/validation) temel alan [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules)'yi kullanarak aldığı GraphQL sorgularının [spesifikasyon tabanlı](https://spec.graphql.org/October2021/#sec-Validation) doğrulamasını gerçekleştirir. Bir doğrulama kuralını geçemeyen sorgular standart bir hata ile sonuçlanır. Daha fazla bilgi için [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation)'i ziyaret edin. -## Schema +## Şema -The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). +Veri kaynağınızın şeması, sorgulamak için kullanılabilen varlık tipleri, değerler ve ilişkiler [GraphQL Arayüz Tanımlama Dili (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System) aracılığıyla tanımlanır. -GraphQL schemas generally define root types for `queries`, `subscriptions` and `mutations`. The Graph only supports `queries`. The root `Query` type for your subgraph is automatically generated from the GraphQL schema that's included in your subgraph manifest. +GraphQL şemaları genellikle `queries`, `subscriptions` ve `mutations` için root tipleri tanımlar. Graph yalnızca `queries` destekler. Subgraph'ınız için root `Query` tipi, subgraph bildiriminize dahil edilen GraphQL şemasından otomatik olarak oluşturulur. -> **Note:** Our API does not expose mutations because developers are expected to issue transactions directly against the underlying blockchain from their applications. +> **Not:** API'miz mutations tipini açığa çıkarmaz çünkü geliştiricilerin uygulamalarından doğrudan temelindeki blok zincire karşı işlemleri gerçekleştirmeleri beklenir. -### Entities +### Varlıklar -All GraphQL types with `@entity` directives in your schema will be treated as entities and must have an `ID` field. +Şemanızda `@entity` yönergeleri bulunan tüm GraphQL türleri varlık olarak değerlendirilir ve bir `ID` alanına sahip olmalıdır. -> **Note:** Currently, all types in your schema must have an `@entity` directive. In the future, we will treat types without an `@entity` directive as value objects, but this is not yet supported. +> **Not:** Şu anda, şemanızdaki tüm tiplerin bir `@entity` yönergesine sahip olması gerekmektedir. İlerleyen zamanlarda, `@entity` yönergesi olmadan tanımlanan tipleri değer nesneleri olarak ele alacağız, ancak bu henüz desteklenmemektedir. -### Subgraph Metadata +### Subgraph Üst Verisi -All subgraphs have an auto-generated `_Meta_` object, which provides access to subgraph metadata. This can be queried as follows: +Tüm subgraphlar, subgraph üst verisine erişim sağlayan otomatik olarak oluşturulmuş bir `_Meta_` nesnesine sahiptir. Bu aşağıdaki gibi sorgulanabilir: ```graphQL { @@ -406,14 +406,14 @@ All subgraphs have an auto-generated `_Meta_` object, which provides access to s } ``` -If a block is provided, the metadata is as of that block, if not the latest indexed block is used. If provided, the block must be after the subgraph's start block, and less than or equal to the most recently indexed block. +Eğer bir blok belirtilirse, üst veri o blokla ilgilidir; belirtilmezse en son dizinlenen blok dikkate alınır. Eğer belirtilirse, blok subgraph başlangıç bloğundan sonra olmalıdır ve en son indekslenen bloğa eşit veya daha küçük olmalıdır. -`deployment` is a unique ID, corresponding to the IPFS CID of the `subgraph.yaml` file. +`deployment` eşsiz bir kimliktir ve `subgraph.yaml` dosyasının IPFS CID'sine karşılık gelir. -`block` provides information about the latest block (taking into account any block constraints passed to `_meta`): +`block` en son blok hakkında bilgi sağlar (`_meta`'ya aktarılan blok kısıtlamalarını dikkate alarak): -- hash: the hash of the block -- number: the block number -- timestamp: the timestamp of the block, if available (this is currently only available for subgraphs indexing EVM networks) +- hash: bloğun hash'i +- number: blok numarası +- timestamp: varsa, bloğa ait timestamp (bu şu anda yalnızca EVM ağlarını indeksleyen subgraph'ler için kullanılabilir) -`hasIndexingErrors` is a boolean identifying whether the subgraph encountered indexing errors at some past block +`hasIndexingErrors` ifadesi, subgraph'ın önceki bazı bloklarda indeksleme hatalarıyla karşılaşıp karşılaşmadığını belirleyen bir boolean değeridir diff --git a/website/pages/tr/querying/managing-api-keys.mdx b/website/pages/tr/querying/managing-api-keys.mdx index ee7c274bca10..561000f522e5 100644 --- a/website/pages/tr/querying/managing-api-keys.mdx +++ b/website/pages/tr/querying/managing-api-keys.mdx @@ -1,26 +1,26 @@ --- -title: Managing your API keys +title: API Anahtarlarınızı Yönetme --- -Regardless of whether you’re a dapp developer or a subgraph developer, you’ll need to manage your API keys. This is important for you to be able to query subgraphs because API keys make sure the connections between application services are valid and authorized. This includes authenticating the end user and the device using the application. +Merkeziyetsiz uygulama veya subgraph geliştiricisi olmanızdan bağımsız olarak, API anahtarlarınızı yönetmeniz gerekir. Bu, subgraph'leri sorgulayabilmeniz için önemlidir çünkü API anahtarları, uygulama servisleri arasındaki bağlantıların geçerli ve yetkili olmasını sağlar. Bu, uygulamayı kullanarak son kullanıcının ve cihazın kimliğinin doğrulanmasını içerir. -The Studio will list out existing API keys, which will give you the ability to manage or delete them. +Stüdyo, mevcut API anahtarlarını listeleyerek size bunları yönetme veya silme olanağı sağlar. -1. The **Overview** section will allow you to: - - Edit your key name - - Regenerate API keys - - View the current usage of the API key with stats: - - Number of queries - - Amount of GRT spent -2. Under **Security**, you’ll be able to opt into security settings depending on the level of control you’d like to have over your API keys. In this section, you can: - - View and manage the domain names authorized to use your API key - - Assign subgraphs that can be queried with your API key -3. Under **Indexer Preference**, you’ll be able to set different preferences for Indexers who are indexing subgraphs that your API key is used for. You can assign up to 5 points for each of these: - - **Fastest Speed**: Time between the query and the response from an indexer. If you mark this as important we will optimize for fast indexers. - - **Lowest Price**: The amount paid per query. If you mark this as important we will optimize for the less expensive indexers. - - **Data Freshness**: How recent the latest block an indexer has processed for the subgraph you are querying. If you mark this as important we will optimize to find the indexers with the freshest data. - - **Economic Security**: The amount of GRT an indexer can lose if they respond incorrectly to your query. If you mark this as important we will optimize for indexers with a large stake. -4. Under **Budget**, you’ll be able to update the maximum price per query. Note that we have a dynamic setting for that that's based on a volume discounting algorithm. **We strongly recommend using the default settings unless you are experiencing a specific problem.** Otherwise, you can update it under "Set a custom maximum budget". On this page you can also view different KPIs (in GRT and USD): - - Average cost per query - - Failed queries over max price - - Most expensive query +1. **Genel Bakış** bölümü şunları yapmanızı sağlar: + - Anahtar adınızı düzenleyin + - API anahtarlarını yeniden oluştur + - API anahtarının mevcut kullanımını istatistiklerle görüntüleyin: + - Sorgu sayısı + - Harcanan GRT miktarı +2. **Güvenlik** bölümünün altında, API anahtarlarınız üzerinde sahip olmak istediğiniz kontrol düzeyine bağlı olarak güvenlik ayarlarını etkinleştirebileceksiniz. Bu bölümde şunları yapabilirsiniz: + - API anahtarınızı kullanmaya yetkili domainleri görüntüleyin ve yönetin + - API anahtarınızla sorgulanabilecek subgraph'ler atayın +3. **İndeksleyici Tercihi** bölümünün altında, API anahtarınızın kullanıldığı subgraph'leri indeksleyen indeksleyiciler için farklı tercihler belirleyebilirsiniz. Bunların her biri için en fazla 5 puan atayabilirsiniz: + - **En Yüksek Hız**: Sorgu ile indeksleyicinin yanıtı arasındaki süredir. Bunu önemli olarak işaretlerseniz, hızlı indeksleyiciler için optimize edeceğiz. + - **En Düşük Fiyat**: Sorgu başına ödenen tutarı ifade eder. Bunu önemli olarak işaretlerseniz, daha ucuz indeksleyiciler için optimizasyon yaparız. + - **Veri Yeniliği**: Bir indeksleyicinin sorguladığınız subgraph için işlediği en son bloğun ne kadar yeni olduğunu ifade eder. Bunu önemli olarak işaretlerseniz, en yeni verilere sahip indeksleyicileri bulmak için optimize edeceğiz. + - **Ekonomik Güvenlik**: Bir indeksleyicinin sorgunuza yanlış yanıt vermesi durumunda kaybedebileceği GRT miktarıdır. Bunu önemli olarak işaretlerseniz, büyük paya sahip indeksleyiciler için optimizasyon yaparız. +4. **Bütçe** altında, sorgu başına maksimum fiyatı güncelleyebilirsiniz. Bunun için bir hacim indirimi algoritmasına dayalı dinamik bir ayarımız olduğunu unutmayın. **Belirli bir sorun yaşamıyorsanız varsayılan ayarları kullanmanızı kesinlikle öneririz.** Aksi takdirde, "Özel bir maksimum bütçe belirleyin" altında güncelleyebilirsiniz. Bu sayfada ayrıca farklı KPI'ları da görüntüleyebilirsiniz (GRT ve USD cinsinden): + - Sorgu başına ortalama maliyet + - Maksimum fiyat üzerinden başarısız sorgular + - En pahalı sorgu diff --git a/website/pages/tr/querying/querying-best-practices.mdx b/website/pages/tr/querying/querying-best-practices.mdx index 98c0ffb72c61..d0a3f50ed7de 100644 --- a/website/pages/tr/querying/querying-best-practices.mdx +++ b/website/pages/tr/querying/querying-best-practices.mdx @@ -1,22 +1,22 @@ --- -title: Querying Best Practices +title: En İyi Uygulamaları Sorgulama --- -The Graph provides a decentralized way to query data from blockchains. +Graph, blok zincirlerinden veri sorgulamak için merkeziyetsiz bir yol sağlar. -The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. +Graph ağının verileri, GraphQL diliyle verilerin sorgulanmasını kolaylaştıran bir GraphQL API aracılığıyla sunulur. -This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. +Bu sayfa, temel GraphQL dil kuralları ve GraphQL sorguları en iyi uygulamaları konusunda size rehberlik edecektir. --- -## Querying a GraphQL API +## GraphQL API Sorgulama -### The anatomy of a GraphQL query +### Bir GraphQL sorgusunun anatomisi -Unlike REST API, a GraphQL API is built upon a Schema that defines which queries can be performed. +REST API'den farklı olarak GraphQL API, hangi sorguların gerçekleştirilebileceğini tanımlayan bir şema üzerine kuruludur. -For example, a query to get a token using the `token` query will look as follows: +Örneğin, `token` sorgusunu kullanarak token almak için bir sorgu aşağıdaki gibi görünecektir: ```graphql query GetToken($id: ID!) { @@ -27,7 +27,7 @@ query GetToken($id: ID!) { } ``` -which will return the following predictable JSON response (_when passing the proper `$id` variable value_): +bu, aşağıdaki öngörülebilir JSON yanıtını döndürür (_uygun `$id` değişken değerini geçerken_): ```json { @@ -38,9 +38,9 @@ which will return the following predictable JSON response (_when passing the pro } ``` -GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). +GraphQL sorguları, [bir şartname](https://spec.graphql.org/) üzerinde tanımlanan GraphQL dilini kullanır. -The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): +Yukarıdaki `GetToken` sorgusu birden çok dil bölümünden oluşur (aşağıda `[...]` yer tutucularla değiştirilmiştir): ```graphql query [operationName]([variableName]: [variableType]) { @@ -52,33 +52,33 @@ query [operationName]([variableName]: [variableType]) { } ``` -While the list of syntactic do's and don'ts is long, here are the essential rules to keep in mind when it comes to writing GraphQL queries: +Sözdizimsel olarak yapılacaklar ve yapılmayacaklar listesi uzun olsa da, iş GraphQL sorguları yazmaya geldiğinde akılda tutulması gereken temel kurallar şunlardır: -- Each `queryName` must only be used once per operation. -- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) -- Some `field`s or queries (like `tokens`) return complex types that require a selection of sub-field. Not providing a selection when expected (or providing one when not expected - for example, on `id`) will raise an error. To know a field type, please refer to [The Graph Explorer](/network/explorer). -- Any variable assigned to an argument must match its type. -- In a given list of variables, each of them must be unique. -- All defined variables must be used. +- Her `queryName`, işlem başına yalnızca bir kez kullanılmalıdır. +- Her `field` bir seçimde yalnızca bir kez kullanılmalıdır (`token` altında `id` öğesini iki kez sorgulayamayız) +- Bazı `field`'lar veya sorgular (`tokens` gibi), bir alt alan seçimi gerektiren karmaşık türler döndürür. Beklendiği zaman bir seçimin sağlanmaması (veya beklenmediğinde - örneğin, `id` üzerinde) bir seçimin sağlanmaması bir hataya yol açacaktır. Bir alan türünü bilmek için lütfen [Graph Explorer](/network/explorer)'a bakın. +- Bir bağımsız değişkene atanan herhangi bir değişken, türüyle eşleşmelidir. +- Belirli bir değişken listesinde, her birinin benzersiz olması gerekir. +- Tanımlanan tüm değişkenler kullanılmalıdır. -Failing to follow the above rules will end with an error from the Graph API. +Yukarıdaki kurallara uyulmaması, Graph API'sinden bir hata ile sonuçlanacaktır. -For a complete list of rules with code examples, please look at our GraphQL Validations guide. +Kod örnekleriyle birlikte kuralların tam listesi için lütfen GraphQL validasyonları kılavuzumuza bakın. -### Sending a query to a GraphQL API +### Bir GraphQL API'ına Sorgu Gönderme -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL, HTTP aracılığıyla aktarım yapan bir dil ve kurallar bütünüdür. -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +Bu, bir GraphQL API'sini standart `fetch` kullanarak (yerel olarak yada `@whatwg-node/fetch` veya `isomorphic-fetch`) sorgulayabileceğiniz anlamına gelir. -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +Ancak, ["Bir Uygulamadan Sorgulama"](/querying/querying-from-an-application) bölümünde belirtildiği gibi, aşağıdaki gibi benzersiz özellikleri destekleyen `graph-client`'ımızı kullanmanızı öneririz: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Zincirler Arası Subgraph İşleme: Tek bir sorguda birden çok subgraph'ten sorgulama +- [Otomatik Blok Takibi](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Otomatik Sayfalandırma](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Tam olarak yazılan sonuç -Here's how to query The Graph with `graph-client`: +Graph'ı `graph-client` ile nasıl sorgulayacağınız aşağıda açıklanmıştır: ```tsx import { execute } from '../.graphclient' @@ -102,17 +102,17 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +Daha fazla GraphQL istemci alternatifi ["Bir Uygulamadan Sorgulama"](/querying/querying-from-an-application) bölümünde ele alınmıştır. -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +GraphQL sorguları sözdiziminin temel kurallarını ele aldığımıza göre, şimdi GraphQL sorgusu yazmanın en iyi uygulamalarına bakalım. --- -## Writing GraphQL queries +## GraphQL Sorguları Yazma -### Always write static queries +### Her zaman statik sorgular yazın -A common (bad) practice is to dynamically build query strings as follows: +Yaygın (hatalı) bir yöntem, sorgu dizelerini aşağıdaki gibi dinamik olarak oluşturmaktır: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +Yukarıdaki kod parçacığı geçerli bir GraphQL sorgusu oluştursa da, **birçok dezavantajı vardır**: -- it makes it **harder to understand** the query as a whole -- developers are **responsible for safely sanitizing the string interpolation** -- not sending the values of the variables as part of the request parameters **prevent possible caching on server-side** -- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) +- sorguyu bir bütün olarak **anlamayı** zorlaştırır +- geliştiriciler **dize enterpolasyonunu güvenli bir şekilde temizlemekten sorumludur** +- değişkenlerin değerlerini istek parametrelerinin bir parçası olarak göndermemek **sunucu tarafında olası önbelleğe almayı önler** +- **araçların sorguyu statik olarak analiz etmesini engeller** (ör. Linter veya tür oluşturma araçları) -For this reason, it is recommended to always write queries as static strings: +Bu nedenle, sorguların her zaman statik dizeler olarak yazılması önerilir: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +Bunu yapmak **birçok avantajı** beraberinde getirir: -- **Easy to read and maintain** queries -- The GraphQL **server handles variables sanitization** -- **Variables can be cached** at server-level -- **Queries can be statically analyzed by tools** (more on this in the following sections) +- **Okunması ve bakımı kolay** sorgular +- GraphQL **sunucu, değişken temizleme işlemlerini gerçekleştirir** +- **Değişkenler sunucu düzeyinde önbelleğe alınabilir** +- **Sorgular, araçlar tarafından statik olarak analiz edilebilir** (bununla ilgili daha fazla bilgiyi sonraki bölümlerde bulabilirsiniz) -**Note: How to include fields conditionally in static queries** +**Not: Alanları statik sorgulara koşullu olarak dahil etme** -We might want to include the `owner` field only on a particular condition. +`owner` alanını yalnızca belirli bir koşulda dahil etmek isteyebiliriz. -For this, we can leverage the `@include(if:...)` directive as follows: +Bunun için `@include(if:...)` direktifinden aşağıdaki şekilde yararlanabiliriz: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,21 +191,21 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +Not: Zıt direktif `@skip(if: ...)` şeklindedir. -### Performance tips +### Performans ipuçları -**"Ask for what you want"** +**"Ne istiyorsanız isteyin"** -GraphQL became famous for its "Ask for what you want" tagline. +GraphQL, "Ne istiyorsanız isteyin" sloganıyla ünlenmiştir. -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +Bu nedenle, GraphQL'de mevcut tüm alanları tek tek listelemeden almanın bir yolu yoktur. -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +GraphQL API'leri sorgularken, her zaman sadece gerçekten kullanılacak alanları sorgulamayı düşünmelisiniz. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +Aşırı alma'nın(over-fetching) yaygın bir nedeni varlık koleksiyonlarıdır. Varsayılan olarak, sorgular bir koleksiyondaki 100 varlığı getirecektir, bu da genellikle kullanıcıya göstermek için gerçekte kullanılacak olandan çok daha fazladır. Bu nedenle sorgular neredeyse her zaman ilk olarak açıkça ayarlanmalı ve yalnızca gerçekten ihtiyaç duydukları kadar varlık getirdiklerinden emin olmalıdırlar. Bu sadece bir sorgudaki üst düzey koleksiyonlar için değil, aynı zamanda iç içe geçmiş varlık koleksiyonları için de geçerlidir. -For example, in the following query: +Örneğin, aşağıdaki sorguda: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +Yanıt, 100 tokenin her biri için 100 işlem içerebilir. -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +Uygulama yalnızca 10 işleme ihtiyaç duyuyorsa, sorgu işlemler alanında `first: 10`'u açıkça ayarlamalıdır. -**Combining multiple queries** +**Birden fazla sorguyu birleştirme** -Your application might require querying multiple types of data as follows: +Uygulamanız aşağıdaki gibi birden fazla veri türünü sorgulamayı gerektirebilir: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +Bu uygulama tamamen geçerli olsa da, GraphQL API ile iki git-gel gerektirecektir. -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +Neyse ki, aşağıdaki gibi aynı GraphQL isteğinde birden fazla sorgu göndermek de geçerlidir: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +Bu yaklaşım, ağ üzerinde harcanan zamanı azaltarak **genel performansı artıracak** (API'ye gidiş gelişten tasarruf etmenizi sağlar) ve daha **kısa bir uygulama** sağlayacaktır. -### Leverage GraphQL Fragments +### GraphQL Parçalarından Yararlanın -A helpful feature to write GraphQL queries is GraphQL Fragment. +GraphQL sorguları yazmak için yararlı bir özellik GraphQL Fragment'tir. -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +Aşağıdaki sorguya baktığınızda, bazı alanların birden fazla Seçim Setinde tekrarlandığını fark edeceksiniz (`{ ... }`): ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +Bu tür tekrarlanan alanlar (`id`, `active`, `status`) birçok sorunu beraberinde getirir: -- harder to read for more extensive queries -- when using tools that generate TypeScript types based on queries (_more on that in the last section_), `newDelegate` and `oldDelegate` will result in two distinct inline interfaces. +- daha kapsamlı sorgular için okunması daha zordur +- sorgulara dayalı olarak TypeScript türleri oluşturan araçları kullanırken (_son bölümde bundan daha fazlası mevcut_), `newDelegate` ve `oldDelegate`, iki farklı satır içi arabirimle sonuçlanacaktır. -A refactored version of the query would be the following: +Sorgunun yeniden yapılandırılmış bir versiyonu aşağıdaki gibi olacaktır: ```graphql query { @@ -334,15 +334,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +GraphQL `fragment` kullanımı okunabilirliği artıracak (özellikle ölçeklendirmede) ve aynı zamanda daha iyi TypeScript tipleri üretilmesini sağlayacaktır. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +Tip oluşturma aracı kullanıldığında, yukarıdaki sorgu uygun bir `DelegateItemFragment` tipi oluşturacaktır (_son "Tools" bölümüne göz atın_). -### GraphQL Fragment do's and don'ts +### GraphQL Fragment'te yapılması ve yapılmaması gerekenler -**Fragment base must be a type** +**Fragment tabanı bir tip olmalıdır** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +Bir Fragment uygulanabilir olmayan bir tipe, kısacası **alanları olmayan bir tipe** dayandırılamaz: ```graphql fragment MyFragment on BigInt { @@ -350,13 +350,13 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt` bir **skalerdir** (yerel "plain" tip) ve bir parçanın tabanı olarak kullanılamaz. -**How to spread a Fragment** +**Fragment Nasıl Yayılır** -Fragments are defined on specific types and should be used accordingly in queries. +Fragmentler belirli tiplerde tanımlanmıştır ve sorgularda buna göre kullanılmalıdır. -Example: +Örnek: ```graphql query { @@ -379,18 +379,18 @@ fragment VoteItem on Vote { `newDelegate` and `oldDelegate` are of type `Transcoder`. -It is not possible to spread a fragment of type `Vote` here. +`Vote` tipi bir parçayı buraya yaymak mümkün değildir. -**Define Fragment as an atomic business unit of data** +**Fragment'ı atomik bir veri iş birimi olarak tanımlayın** -GraphQL Fragment must be defined based on their usage. +GraphQL Fragment kullanımlarına göre tanımlanmalıdır. -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +Çoğu kullanım durumu için, tip başına bir parça tanımlamak (tekrarlanan alan kullanımı veya tip üretimi durumunda) yeterlidir. -Here is a rule of thumb for using Fragment: +İşte Fragment kullanımı için temel bir kural: -- when fields of the same type are repeated in a query, group them in a Fragment -- when similar but not the same fields are repeated, create multiple fragments, ex: +- aynı türdeki alanlar bir sorguda tekrarlandığında, bunları bir Fragment içinde gruplayın +- benzer ancak aynı olmayan alanlar tekrarlandığında, birden çok parça oluşturun, örneğin: ```graphql # base fragment (mostly used in listing) @@ -413,51 +413,51 @@ fragment VoteWithPoll on Vote { --- -## The essential tools +## Temel Araçlar -### GraphQL web-based explorers +### GraphQL web tabanlı explorer'lar -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +Sorguları uygulamanızda çalıştırarak tekrarlamak zahmetli olabilir. Bu nedenle, sorgularınızı uygulamanıza eklemeden önce test etmek için [Graph Gezgini](https://thegraph.com/explorer)'ni kullanmaktan çekinmeyin. Graph Gezgini, sorgularınızı test etmeniz için size önceden yapılandırılmış bir GraphQL test alanı(playground) sağlayacaktır. -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +Sorgularınızda hata ayıklamak/test etmek için daha esnek bir yol arıyorsanız, [Altair](https://altair.sirmuel.design/) ve [GraphiQL](https://graphiql-online.com/graphiql) gibi diğer benzer web tabanlı araçlar da kullanılabilir. ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +Yukarıda belirtilen en iyi uygulamalara ve sözdizimsel kurallara ayak uydurmak için aşağıdaki iş akışı ve IDE araçlarının kullanılması şiddetle tavsiye edilir. **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint), zahmetsiz bir şekilde GraphQL'in en iyi uygulamalarını takip etmenize yardımcı olur. -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +["Operations-recommended" yapılandırmasını kurmak](https://github.com/dotansimha/graphql-eslint#available-configs), aşağıdaki gibi temel kuralları uygulayacaktır: -- `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? -- `@graphql-eslint/no-unused variables`: should a given variable stay unused? -- and more! +- `@graphql-eslint/fields-on-correct-type`: alan uygun bir türde mi kullanılıyor? +- `@graphql-eslint/no-unused variables`: belirli bir değişken kullanılmadan kalmalı mı? +- ve daha fazlası! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +Bu, sorguları öğrenme ortamında **test etmeden** veya üretimde çalıştırmadan bile hataları yakalamanızı sağlayacaktır! -### IDE plugins +### IDE Eklentileri -**VSCode and GraphQL** +**VSCode ve GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +GraphQL [VSCode uzantısı](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql), geliştirme iş akışınız için mükemmel bir eklentidir: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets -- go to definition for fragments and input types +- sözdizimi vurgulama +- otomatik tamamlama önerileri +- şemaya karşı validasyon +- parçacıklar +- fragmanlar ve giriş türleri için tanıma gidin -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +Eğer `graphql-eslint` kullanıyorsanız, [ESLint VSCode uzantısı](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) kodunuzdaki hataları ve uyarıları doğru bir şekilde görselleştirmek için olmazsa olmazdır. -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij ve GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQL eklentisi](https://plugins.jetbrains.com/plugin/8097-graphql/), GraphQL ile çalışırken deneyiminizi önemli ölçüde arttırcaktır: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets +- sözdizimi vurgulama +- otomatik tamamlama önerileri +- şemaya karşı validasyon +- parçacıklar -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +Eklentinin tüm ana özelliklerini gösteren bu [WebStorm makalesinde](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) daha fazla bilgi bulabilirsiniz. diff --git a/website/pages/tr/querying/querying-from-an-application.mdx b/website/pages/tr/querying/querying-from-an-application.mdx index 30b6c2264d64..8e4d012f63b0 100644 --- a/website/pages/tr/querying/querying-from-an-application.mdx +++ b/website/pages/tr/querying/querying-from-an-application.mdx @@ -1,43 +1,43 @@ --- -title: Querying from an Application +title: Bir Uygulamadan Sorgulama --- -Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: +Subgraph Stüdyo'ya veya Graph Gezgini'ne bir subgraph deploy edildiğinde, size GraphQL API'ınız için şuna benzer bir uç nokta verilecektir: -**Subgraph Studio (testing endpoint)** +**Subgraph Stüdyo (test bitiş noktası)** ```sh Queries (HTTP) https://api.studio.thegraph.com/query/// ``` -**Graph Explorer** +**Graph Gezgini** ```sh Queries (HTTP) https://gateway.thegraph.com/api//subgraphs/id/ ``` -Using the GraphQL endpoint, you can use various GraphQL Client libraries to query the subgraph and populate your app with the data indexed by the subgraph. +GraphQL uç noktasını kullanarak, subgraph'i sorgulamak ve uygulamanızı subgraph tarafından indekslenen verilerle doldurmak için çeşitli GraphQL istemci kitaplıklarını kullanabilirsiniz. -Here are a couple of the more popular GraphQL clients in the ecosystem and how to use them: +İşte ekosistemdeki daha popüler birkaç GraphQL istemcisi ve bunların nasıl kullanılacağı: -## GraphQL clients +## GraphQL istemcileri -### Graph client +### Graph istemcisi -The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: +Graph, aşağıdakiler gibi benzersiz özellikleri destekleyen kendi GraphQL istemcisi `graph-client`'i sağlamaktadır: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Zincirler Arası Subgraph İşleme: Tek bir sorguda birden çok subgraph'ten sorgulama +- [Otomatik Blok Takibi](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Otomatik Sayfalandırma](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Tam olarak yazılan sonuç -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +Ayrıca Apollo ve URQL gibi popüler GraphQL istemcileriyle entegre edilmiş ve React, Angular, Node.js, React Native gibi tüm ortamlarla uyumlu olan `graph-client`'ı kullanmak, Graph ile etkileşimde bulunmanız için en iyi deneyimi sunacaktır. -Let's look at how to fetch data from a subgraph with `graphql-client`. +Şimdi `graphql-client` ile bir subgraph'dan nasıl veri alınacağına bakalım. -To get started, make sure to install The Graph Client CLI in your project: +Başlamak için, projenize The Graph Client CLI'yi yüklediğinizden emin olun: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +Sorgunuzu bir `.graphql` dosyasında (veya `.js` yada `.ts` dosyanızda satır içi olarak) tanımlayın: ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +Ardından, bir yapılandırma dosyası oluşturun (`.graphclientrc.yml` adında) ve Graph tarafından sağlanan GraphQL uç noktalarına işaret edin, örneğin: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +Aşağıdaki The Graph Client CLI komutunu çalıştırmak, yazılmış ve kullanıma hazır JavaScript kodunu oluşturacaktır: ```sh graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +Son olarak, oluşturulan GraphQL belgelerini kullanmak için `.ts` dosyanızı güncelleyin: ```tsx import React, { useEffect } from 'react' @@ -134,33 +134,33 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ Önemli uyarı** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client`, Apollo istemcisi, URQL veya React Query gibi diğer GraphQL istemcileriyle mükemmel bir şekilde entegre edilmiştir; [örnekleri resmi Github deposunda bulacaksınız](https://github.com/graphprotocol/graph-client/tree/main/examples). -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +Ancak, başka bir istemci kullanmayı tercih ederseniz, **Graph'ı sorgulamak için temel özellikler olan Zincirler Arası Subgraph İşleme veya Otomatik Sayfalandırma özelliklerini kullanamayacağınızı** unutmayın. -### Apollo client +### Apollo istemcisi -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[Apollo istemcisi](https://www.apollographql.com/docs/), önyüz ekosisteminde her yerde bulunan GraphQL istemcisidir. -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +React, Angular, Vue, Ember, iOS ve Android için kullanılabilir olan Apollo İstemcisi, en ağır istemci olmasına rağmen, GraphQL üzerine gelişmiş arayüzler geliştirmek için birçok özellik sunar: -- advanced error handling -- pagination -- data prefetching -- optimistic UI -- local state management +- gelişmiş hata işleme +- sayfalandırma +- veri ön getirme +- iyimser kullanıcı arayüzü +- yerel mevki yönetimi -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +Bir web projesinde Apollo istemcisi ile bir subgraph'tan nasıl veri çekileceğine bakalım. -First, install `@apollo/client` and `graphql`: +İlk olarak, `@apollo/client` ve `graphql`'i yükleyin: ```sh npm install @apollo/client graphql ``` -Then you can query the API with the following code: +Ardından API'ı aşağıdaki kodla sorgulayabilirsiniz: ```javascript import { ApolloClient, InMemoryCache, gql } from '@apollo/client' @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +Değişkenleri kullanmak için sorguya bir `variables` bağımsız değişkeni aktarabilirsiniz: ```javascript const tokensQuery = ` @@ -226,22 +226,22 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +Diğer bir seçenek ise Node.js, React/Preact, Vue ve Svelte ortamlarında kullanılabilen ve daha gelişmiş özelliklere sahip [URQL](https://formidable.com/open-source/urql/)'dir: -- Flexible cache system -- Extensible design (easing adding new capabilities on top of it) -- Lightweight bundle (~5x lighter than Apollo Client) -- Support for file uploads and offline mode +- Esnek önbellek sistemi +- Genişletilebilir tasarım (üzerine yeni yetenekler eklemeyi kolaylaştırır) +- Hafif paket (Apollo İstemcisinden ~5 kat daha hafif) +- Dosya yükleme ve çevrimdışı mod desteği -Let's look at how to fetch data from a subgraph with URQL in a web project. +Bir web projesinde URQL ile bir subgraph'tan nasıl veri çekileceğne bakalım. -First, install `urql` and `graphql`: +İlk olarak, `urql` ve `graphql`'i yükleyin: ```sh npm install urql graphql ``` -Then you can query the API with the following code: +Ardından API'ı aşağıdaki kodla sorgulayabilirsiniz: ```javascript import { createClient } from 'urql' diff --git a/website/pages/tr/querying/querying-the-graph.mdx b/website/pages/tr/querying/querying-the-graph.mdx index af9dcaaf2477..d6f413ae396f 100644 --- a/website/pages/tr/querying/querying-the-graph.mdx +++ b/website/pages/tr/querying/querying-the-graph.mdx @@ -1,14 +1,14 @@ --- -title: Querying The Graph +title: Graph'te Sorgulama --- -With the subgraph deployed, visit the [Graph Explorer](https://thegraph.com/explorer) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +Subgraph deploy edildiğinde, sorgular yayınlayarak ve şemayı görüntüleyerek subgraph için deploy edilen GraphQL API'ını keşfedebileceğiniz bir [GraphiQL](https://github.com/graphql/graphiql) arabirimi açmak için [Graph Gezgini](https://thegraph.com/explorer)'ni ziyaret edin. -An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. +Aşağıda bir örnek verilmiştir, ancak subgraph'in varlıklarının nasıl sorgulanacağına ilişkin eksiksiz bir referans için lütfen [Query API](/querying/graphql-api)'a bakın. -## Example +## Örnek -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Bu sorgu, eşlememizin oluşturduğu tüm sayaçları listeler. Yalnızca bir tane oluşturduğumuz için, sonuç yalnızca bir `default-counter`'ımızı içerecektir: ```graphql { @@ -19,14 +19,14 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Graph Explorer +## Graph Gezgini'ni Kullanma -Each subgraph published to the decentralized Graph Explorer has a unique query URL that you can find by navigating to the subgraph details page and clicking on the "Query" button on the top right corner. This will open a side pane that will give you the unique query URL of the subgraph as well as some instructions about how to query it. +Merkeziyetsiz Graph Gezgini'nde yayınlanan her subgraph'in, subgraph ayrıntıları sayfasına gidip sağ üst köşedeki "Sorgu" düğmesine tıklayarak bulabileceğiniz benzersiz bir sorgu URL'si vardır. Bu, size subgraph'in benzersiz sorgu URL'sinin yanı sıra onu nasıl sorgulayacağınızla ilgili bazı talimatlar verecek bir yan bölme açacaktır. -![Query Subgraph Pane](/img/query-subgraph-pane.png) +![Sorgu Subgraph'i Bölmesi](/img/query-subgraph-pane.png) -As you can notice, this query URL must use a unique API key. You can create and manage your API keys in the [Subgraph Studio](https://thegraph.com/studio) in the "API Keys" section. Learn more about how to use Subgraph Studio [here](/deploying/subgraph-studio). +Fark edebileceğiniz gibi, bu sorgu URL'si benzersiz bir API anahtarı kullanmalıdır. API anahtarlarınızı "API Anahtarları" bölümündeki [Subgraph Stüdyo](https://thegraph.com/studio)'da oluşturabilir ve yönetebilirsiniz. Subgraph Stüdyo'nun nasıl kullanılacağı hakkında daha fazla bilgiyi [buradan](/deploying/subgraph-studio) edinebilirsiniz. -Querying subgraphs using your API keys will generate query fees that will be paid in GRT. You can learn more about billing [here](/billing). +API anahtarlarınızı kullanarak subgraph'leri sorgulamak, GRT şeklinde ödenecek sorgu ücretleri oluşturur. Faturalandırma hakkında daha fazla bilgiyi [buradan](/billing) edinebilirsiniz. -You can also use the GraphQL playground in the "Playground" tab to query a subgraph within The Graph Explorer. +Graph Gezgini içindeki bir subgraph'i sorgulamak için "Playground" sekmesindeki GraphQL oyun alanını da kullanabilirsiniz. diff --git a/website/pages/tr/querying/querying-the-hosted-service.mdx b/website/pages/tr/querying/querying-the-hosted-service.mdx index 14777da41247..639aa66ed5e7 100644 --- a/website/pages/tr/querying/querying-the-hosted-service.mdx +++ b/website/pages/tr/querying/querying-the-hosted-service.mdx @@ -1,14 +1,14 @@ --- -title: Querying the Hosted Service +title: Barındırılan Hizmeti Sorgulama --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. -An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. +Aşağıda bir örnek verilmiştir, ancak subgraph'in varlıklarının nasıl sorgulanacağına ilişkin eksiksiz bir referans için lütfen [Query API](/querying/graphql-api)'a bakın. -## Example +## Örnek -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Bu sorgu, eşlememizin oluşturduğu tüm sayaçları listeler. Yalnızca bir tane oluşturduğumuz için, sonuç yalnızca bir `varsayılan sayacımızı` içerecektir: ```graphql { @@ -19,10 +19,10 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. -Some of the main features are detailed below: +Ana özelliklerden bazıları aşağıda ayrıntılı olarak açıklanmıştır: ![Explorer Playground](/img/explorer-playground.png) diff --git a/website/pages/tr/querying/querying-with-python.mdx b/website/pages/tr/querying/querying-with-python.mdx new file mode 100644 index 000000000000..2a0b0e524f8d --- /dev/null +++ b/website/pages/tr/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds, [Playgrounds](https://playgrounds.network/) tarafından oluşturulmuş, subgraph sorgulamak için kullanılan sezgisel bir Python kütüphanesidir. Bu kütüphane, subgraph verilerini doğrudan bir Python veri ortamına bağlamanıza olanak tanır ve [pandas](https://pandas.pydata.org/) gibi kütüphaneleri kullanarak veri analizi yapmanıza imkan sağlar! + +Subgrounds, GraphQL sorguları oluşturmak için sayfalandırma gibi sıkıcı iş akışlarını otomatikleştiren ve kontrollü şema dönüşümleri aracılığıyla ileri düzey kullanıcıları güçlendiren basit bir Pythonic API sunar. + +## Buradan Başlayın + +Subgrounds, Python 3.10 veya daha yüksek bir sürümünü gerektirir ve [pypi](https://pypi.org/project/subgrounds/) üzerinden erişilebilir. + +```bash +pip install --upgrade subgrounds +# yada +python -m pip install --upgrade subgrounds +``` + +Kurulum tamamlandıktan sonra, aşağıdaki sorgu ile subgrounds'ı test edebilirsiniz. Aşağıdaki örnek, Aave v2 protokolü için bir subgraph çeker ve TVL'ye (Toplam Kilitli Varlık) göre sıralanan en üst 5 pazarı sorgular, adlarını ve TVL'lerini (USD cinsinden) seçer ve verileri bir pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame) olarak döndürür. + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Subgraph'ı yükleme +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Sorguyu oluşturma +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Sorguyu bir veri çerçevesine döndürme +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Dökümantasyon + +Subgrounds, [Playgrounds](https://playgrounds.network/) ekibi tarafından oluşturulmuş ve sürdürülmektedir ve [Playgrounds dokümantasyonu](https://docs.playgrounds.network/subgrounds) üzerinden erişilebilir. + +Subgrounds'un keşfedilecek geniş bir özellik seti bulunduğundan, işe bazı yararlı başlangıç noktaları: + +- [Sorgulamaya Başlarken](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - Subgrounds ile sorguların nasıl oluşturulacağına dair iyi bir başlangıç. +- [Sentetik Alanlar Oluşturma](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - Veri şemasından tanımlanan verileri dönüştüren sentetik alanları tanımlamaya yönelik yumuşak bir giriş. +- [Eşzamanlı Sorgular](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Sorgularınızı paralelleştirerek nasıl geliştireceğinizi öğrenin. +- [Veriyi CSV dosyalarına aktarma](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - Başka bir analiz için verilerinizi sorunsuz bir şekilde CSV olarak kaydetme hakkında hızlı bir makale. diff --git a/website/pages/tr/quick-start.mdx b/website/pages/tr/quick-start.mdx new file mode 100644 index 000000000000..1a661f59ac69 --- /dev/null +++ b/website/pages/tr/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Hızlı Başlangıç +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +Bu rehber, aşağıdakilere sahip olduğunuzu varsayar: + +- Seçtiğiniz ağ üzerinde bir akıllı sözleşme adresi +- Subgraph'ınızın kürasyonu için GRT +- Bir kripto cüzdanı + +## 1. Subgraph Stüdyo'da bir subgraph oluşturun + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Bağlandıktan sonra, "bir subgraph oluştur" seçeneğine tıklayarak başlayabilirsiniz. Tercih ettiğiniz ağı seçin ve devam et'e tıklayın. + +## 2. Graph CLI'yi yükleyin + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +Yerel makinenizde aşağıdaki komutlardan birini çalıştırın: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Subgraph'ınızı başlatın + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Subgraph'ınızı başlattığınızda, CLI aracı sizden aşağıdaki bilgileri isteyecektir: + +- Protokol: Subgraph'ınızın veri indeksleyeceği protokolü seçin +- Subgraph slug: Subgraph'ınız için bir ad oluşturun. Subgraph slug'ınız subgraph'ınız için bir tanımlayıcıdır. +- Subgraph'ınızın oluşturulacağı dizin: yerel dizininizi seçin +- Ethereum ağı (opsiyonel): Subgraph'ınızın hangi EVM uyumlu ağdan veri indeksleyeceğini belirtmeniz gerekebilir +- Sözleşme adresi: Veri sorgulamak istediğiniz akıllı sözleşme adresini bulun +- ABI: ABI otomatik olarak doldurulmuyorsa, JSON dosyası haline manuel olarak girmeniz gerekecektir +- Başlangıç Bloğu: Subgraph'ınız blok zinciri verilerini indekslerken zaman kazanmak için başlangıç bloğunu girmeniz önerilir. Başlangıç bloğunu, sözleşmenizin dağıtıldığı bloğu bularak bulabilirsiniz. +- Sözleşme Adı: Sözleşmenizin adını girin +- Sözleşme olaylarını varlıklar olarak indeksleyin: Yayılan her olay için subgraph'ınıza otomatik olarak eşlemeler ekleyeceğinden bunu true olarak ayarlamanız önerilir +- Başka bir sözleşme ekle (opsiyonel): Başka bir sözleşme ekleyebilirsiniz + +Aşağıdaki komutu çalıştırarak subgraph'ınızı mevcut bir sözleşmeden başlatın: + +```sh +graph init --studio +``` + +Subgraph'ınızı başlatırken neyle karşılaşacağınıza dair bir örnek için aşağıdaki ekran görüntüsüne bakın: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Subgraph'ınızı Yazın + +Önceki komutlar, subgraph'ınızı oluşturmak için bir başlangıç noktası olarak kullanabileceğiniz bir subgraph iskeletini oluşturur. Subgraph'ta değişiklik yaparken, temel olarak üç dosya ile çalışacaksınız: + +- Manifest (subgraph.yaml) - Manifest, subgraph'ınızın hangi veri kaynaklarını indeksleyeceğini tanımlar. +- Şema (schema.graphql) - GraphQL şeması, subgraph'tan hangi verileri almak istediğinizi tanımlar. +- AssemblyScript Eşleştirmeleri (mapping.ts) - Bu, veri kaynaklarınızdaki verileri şemada tanımlanan varlıklara çeviren koddur. + +Subgraph'ınızı nasıl yazacağınıza dair daha fazla bilgi için, [Subgraph Oluşturma](/developing/creating-a-subgraph). + +## 5. Subgraph Stüdyo'ya Dağıtın + +Subgraph'ınız yazıldıktan sonra aşağıdaki komutları çalıştırın: + +```sh +$ graph codegen +$ graph build +``` + +- Subgraph'ınızı doğrulayın ve dağıtın. Dağıtım anahtarı Subgraph Stüdyo'daki Subgraph sayfasında bulunabilir. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Subgraph'ınızı Test Edin + +Test alanı(playground) bölümünde örnek bir sorgu yaparak subgraph'ınızı test edebilirsiniz. + +Kayıtlar, subgraph'ınızla ilgili herhangi bir hata olup olmadığını size söyleyecektir. Çalışan bir subgraph'ın kayıtları aşağıdaki gibi görünecektir: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Subgraph'ınızı Graph'ın Merkeziyetsiz Ağında Yayınlayın + +Subgraph'ınız Subgraph Stüdyo'ya dağıtıldıktan, test ettikten ve kullanıma hazır hale geldikten sonra, bunu merkeziyetsiz ağda yayınlayabilirsiniz. + +Subgraph Stüdyo'da subgraph'ınıza tıklayın. Subgraph'ın sayfasında, sağ üstteki yayınla düğmesine tıklayabileceksiniz. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Subgraph'ınızı sorgulayabilmeniz için önce İndeksleyicilerin bu subgraph üzerinde sorgu sunmaya başlaması gerekir. Bu süreci kolaylaştırmak için GRT kullanarak kendi subgraph'ınızı oluşturabilirsiniz. + +Bu rehberin yazıldığı sırada, kendi subgraph'ınızın mümkün olan en kısa sürede indekslenmesini ve sorgulanabilir olmasını sağlamanız için 10.000 GRT ile kürate etmeniz önerilir. + +Gas maliyetlerinden tasarruf etmek için, subgraph'ınızı Graph'ın merkeziyetsiz ağında yayınlarken bu düğmeyi seçerek subgraph'ınızı yayınladığınız işlemle aynı işlemde kürate edebilirsiniz: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Subgraph'ınızı Sorgulayın + +Şimdi, subgraph'ınızın Sorgu URL'sine GraphQL sorguları göndererek onu sorgulayabilirsiniz; bu URL'yi sorgu düğmesine tıklayarak bulabilirsiniz. + +API anahtarınız yoksa, geliştirme ve hazırlama için kullanılabilen ücretsiz, rate limit'li geçici sorgu URL'si aracılığıyla merkeziyetsiz uygulamanızdan sorgulama yapabilirsiniz. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/tr/release-notes/assemblyscript-migration-guide.mdx b/website/pages/tr/release-notes/assemblyscript-migration-guide.mdx index 85f6903a6c69..338c85951884 100644 --- a/website/pages/tr/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/tr/release-notes/assemblyscript-migration-guide.mdx @@ -1,50 +1,50 @@ --- -title: AssemblyScript Migration Guide +title: AssemblyScript Geçiş Kılavuzu --- -Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Şimdiye kadar, alt grafikler [AssemblyScript'in ilk versiyonu](https://github.com/AssemblyScript/assemblyscript/tree/v0.6)'ndan (v0.6) birini kullanıyordu. Sonunda [mevcut olan en yeni sürüm](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10) için destek ekledik! 🎉 -That will enable subgraph developers to use newer features of the AS language and standard library. +Bu, subgraph geliştiricilerin AS dilinin ve standart kitaplığın daha yeni özelliklerini kullanmasını sağlayacaktır. -This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 +Bu kılavuz, `0.22.0` sürümünün altındaki `graph-cli`/`graph-ts` kullanan herkes için geçerlidir. Halihazırda bundan daha yüksek (veya eşit) bir sürümdeyseniz, zaten AssemblyScript'in `0.19.10` sürümünü kullanıyorsunuz demektir 🙂 -> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. +> Not: `0.24.0` itibarıyla `graph-node`, subgraph bildiriminde belirtilen `apiVersion`'a bağlı olarak her iki sürümü de destekleyebilir. -## Features +## Özellikler -### New functionality +### Yeni işlevsellik -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) -- Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) -- Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) +- `TypedArray`'ler artık [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) kullanılarak `ArrayBuffer`'lerden oluşturulabilir ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- Yeni standart kitaplık işlevleri: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- X instanceof GenericClass desteği eklendi ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Daha verimli bir dizi varyantı olan `StaticArray` eklendi ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- `Array#flat` eklendi ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- `Number#toString` üzerinde `radix` bağımsız değişkeni uygulandı ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Floating point değişmezlerinde ayırıcılar için destek eklendi ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Birinci sınıf işlevler için destek eklendi ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- Yerleşik öğeler ekleyin: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) +- `Array/TypedArray/String#at` öğesini uygulayın. ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Şablon hazır bilgi dizeleri için destek eklendi ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- `encodeURI(Component)` ve `decodeURI(Component)` ekleyin. ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- `toString`, `toDateString` ve `toTimeString`'i `Date`'e ekleyin. ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- `Date` için `toUTCString` ekleyin. ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- `nonnull/NonNullable` yerleşik türü ekleyin. ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### Optimizations +### Optimizasyonlar -- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- `exp`, `exp2`, `log`, `log2` ve `pow` gibi `Math` fonksiyonlarının yerini daha hızlı değişkenler almıştır. ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- `Math.mod`'u biraz optimize edildi. ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Std Map ve Set'te daha fazla alan erişimi önbelleğe alındı. ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- `ipow32/64`'te ikinin kuvvetleri için optimize edildi. ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### Other +### Diğerleri -- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Bir dizi hazır bilgisinin türü artık içeriğinden çıkarılabilir. ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- stdlib Unicode 13.0.0 olarak güncellendi. ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## How to upgrade? +## Nasıl Yükseltilir? -1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: +1. `subgraph.yaml` içindeki `apiVersion` eşlemelerinizi `0.0.6` olarak değiştirin: ```yaml ... @@ -56,30 +56,30 @@ dataSources: ... ``` -2. Update the `graph-cli` you're using to the `latest` version by running: +2. Şunları çalıştırarak, kullandığınız `graph-cli`'yi `en son` sürüme güncelleyin: ```bash -# if you have it globally installed +# global olarak yüklediyseniz npm install --global @graphprotocol/graph-cli@latest -# or in your subgraph if you have it as a dev dependency +# veya subgraph'inizde geliştirici bağımlılığı olarak varsa: npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: +3. `graph-ts` için de aynısını yapın, ancak genel olarak yüklemek yerine ana bağımlılıklarınıza kaydedin: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. Follow the rest of the guide to fix the language breaking changes. -5. Run `codegen` and `deploy` again. +4. Dili bozan değişiklikleri düzeltmek için kılavuzun geri kalanını takip edin. +5. `codegen`'i çalıştırın ve yeniden `deploy edin`. -## Breaking changes +## Son Dakika Değişiklikleri ### Nullability -On the older version of AssemblyScript, you could create code like this: +AssemblyScript'in eski sürümünde şuna benzer bir kod oluşturabilirsiniz: ```typescript function load(): Value | null { ... } @@ -88,7 +88,7 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -However on the newer version, because the value is nullable, it requires you to check, like this: +Ancak daha yeni sürümde, değer null olabileceğinden, şu şekilde kontrol etmenizi gerektirir: ```typescript let maybeValue = load() @@ -98,7 +98,7 @@ if (maybeValue) { } ``` -Or force it like this: +Veya şu şekilde zorlayın: ```typescript let maybeValue = load()! // breaks in runtime if value is null @@ -106,11 +106,11 @@ let maybeValue = load()! // breaks in runtime if value is null maybeValue.aMethod() ``` -If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. +Hangisini seçeceğinizden emin değilseniz, her zaman güvenli sürümü kullanmanızı öneririz. Değer mevcut değilse, subgraph işleyicinizde bir geri dönüş içeren erken bir if ifadesi yapmak isteyebilirsiniz. -### Variable Shadowing +### Değişken Gölgeleme -Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: +[Değişken gölgeleme](https://en.wikipedia.org/wiki/Variable_shadowing) yapmadan önce ve bunun gibi bir kod işe yarayacaktı: ```typescript let a = 10 @@ -118,7 +118,7 @@ let b = 20 let a = a + b ``` -However now this isn't possible anymore, and the compiler returns this error: +Ancak artık bu mümkün değil ve derleyici şu hatayı veriyor: ```typescript ERROR TS2451: Cannot redeclare block-scoped variable 'a' @@ -128,11 +128,11 @@ ERROR TS2451: Cannot redeclare block-scoped variable 'a' in assembly/index.ts(4,3) ``` -You'll need to rename your duplicate variables if you had variable shadowing. +Değişken gölgelemeniz varsa, yinelenen değişkenlerinizi yeniden adlandırmanız gerekir. -### Null Comparisons +### Null Karşılaştırmaları -By doing the upgrade on your subgraph, sometimes you might get errors like these: +Subgraph'inizde yükseltme yaparak, bazen aşağıdaki gibi hatalar alabilirsiniz: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -To solve you can simply change the `if` statement to something like this: +Çözmek için `if` ifadesini aşağıdaki gibi değiştirebilirsiniz: ```typescript if (!decimals) { @@ -151,23 +151,23 @@ To solve you can simply change the `if` statement to something like this: if (decimals === null) { ``` -The same applies if you're doing != instead of ==. +Aynısı, == yerine != yapıyorsanız da geçerlidir. -### Casting +### Döküm -The common way to do casting before was to just use the `as` keyword, like this: +Daha önce yayın yapmanın genel yolu, `as` anahtar kelimesini şu şekilde kullanmaktı: ```typescript let byteArray = new ByteArray(10) let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` -However this only works in two scenarios: +Ancak bu yalnızca iki senaryoda çalışır: -- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting on class inheritance (subclass → superclass) +- Temel döküm (`u8`, `i32`, `bool` gibi tipler arasında; örneğin: `let b: isize = 10; b as usize`); +- Sınıf kalıtımı üzerinde yükseltme (alt sınıf → üst sınıf) -Examples: +Örnekler: ```typescript // primitive casting @@ -184,10 +184,10 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: +Yayın yapmak isteyebileceğiniz iki senaryo vardır, ancak `as`/`var` kullanmak **güvenli değildir**: -- Downcasting on class inheritance (superclass → subclass) -- Between two types that share a superclass +- Sınıf kalıtımına ilişkin olumsuz değerlendirme (üst sınıf → alt sınıf) +- Bir üst sınıfı paylaşan iki tür arasında ```typescript // downcasting on class inheritance @@ -206,7 +206,7 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -For those cases, you can use the `changetype` function: +Bu gibi durumlar için `changetype` işlevini kullanabilirsiniz: ```typescript // downcasting on class inheritance @@ -225,7 +225,7 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. +Yalnızca geçersiz kılınabilirliği kaldırmak istiyorsanız (ya da `değişken`), `as` operatörünü kullanmaya devam edebilirsiniz, ancak bu değerin boş olamayacağını bildiğinizden emin olun, aksi takdirde bozulur. ```typescript // remove nullability @@ -238,18 +238,18 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 +Nullability durumu için [Nullability kontrol özelliğine](https://www.assemblyscript.org/basics.html#nullability-checks) bir göz atmanızı öneririz, kodunuzu daha temiz hale getirecektir 🙂 -Also we've added a few more static methods in some types to ease casting, they are: +Ayrıca, dökümü kolaylaştırmak için bazı türlerde birkaç statik yöntem daha ekledik, bunlar: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### Nullability check with property access +### Özellik Erişimi ile Geçersiz Kılınabilirlik (Nullability) Kontrolü -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +[Geçersiz kılınabilirlik kontrol özelliğini](https://www.assemblyscript.org/basics.html#nullability-checks) kullanmak için `if` deyimlerini veya (`?` ve `:`) üçlü operatörünü şu şekilde kullanabilirsiniz: ```typescript let something: string | null = 'data' @@ -267,7 +267,7 @@ if (something) { } ``` -However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: +Bununla birlikte, bu yalnızca, bir mülk erişiminde değil, bir değişken üzerinde `if` / üçlü yaptığınızda çalışır, şu şekilde: ```typescript class Container { @@ -280,7 +280,7 @@ container.data = 'data' let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile ``` -Which outputs this error: +Bu hata hangi çıktıyı verir: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -289,7 +289,7 @@ ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/s ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` -To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: +Bu sorunu çözmek için, bu özellik erişimi için bir değişken oluşturabilirsiniz, böylece derleyici geçersizlik denetimi sihrini yapabilir: ```typescript class Container { @@ -304,9 +304,9 @@ let data = container.data let somethingOrElse: string = data ? data : 'else' // compiles just fine :) ``` -### Operator overloading with property access +### Özellik Erişimi ile Operatör Aşırı Yüklemesi -If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. +Null yapılabilen bir türü (bir özellik erişiminden) null olmayan bir türle toplamaya çalışırsanız (örnek olarak), AssemblyScript derleyicisi, değerlerden birinin null olabileceğine dair bir derleme zamanı hatası uyarısı vermek yerine, sadece sessizce derler ve kodun çalışma zamanında kırılması için bir şans verir. ```typescript class BigInt extends Uint8Array { @@ -330,7 +330,7 @@ let wrapper = new Wrapper(y) wrapper.n = wrapper.n + x // doesn't give compile time errors as it should ``` -We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. +Bunun için AssemblyScript derleyicisinde bir sorun açtık, ancak şimdilik subgraph eşlemelerinizde bu tür işlemler yapıyorsanız, ondan önce boş bir kontrol yapmak için değiştirmelisiniz. ```typescript let wrapper = new Wrapper(y) @@ -342,9 +342,9 @@ if (!wrapper.n) { wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt ``` -### Value initialization +### Değer Başlatma -If you have any code like this: +Bunun gibi herhangi bir kodunuz varsa: ```typescript var value: Type // null @@ -352,7 +352,7 @@ value.x = 10 value.y = 'content' ``` -It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: +Derlenir ancak çalışma zamanında bozulur, bunun nedeni değerin başlatılmamasıdır, bu nedenle subgraph'inizin değerlerini şu şekilde başlattığından emin olun: ```typescript var value = new Type() // initialized @@ -360,7 +360,7 @@ value.x = 10 value.y = 'content' ``` -Also if you have nullable properties in a GraphQL entity, like this: +Ayrıca, bir GraphQL varlığında şu şekilde null yapılabilir özellikleriniz varsa: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -And you have code similar to this: +Ve buna benzer bir kodunuz varsa: ```typescript let total = Total.load('latest') @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: +`total.amount` değerini başlattığınızdan emin olmanız gerekir, çünkü toplam için son satırdaki gibi erişmeye çalışırsanız çökecektir. Yani ya önce onu başlatırsınız: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 +Veya GraphQL şemanızı bu özellik için null yapılabilir bir tür kullanmayacak şekilde değiştirebilirsiniz, ardından `codegen` adımında onu sıfır olarak başlatacağız 😉 ```graphql type Total @entity { @@ -413,9 +413,9 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -### Class property initialization +### Sınıf Özelliği Başlatma -If you export any classes with properties that are other classes (declared by you or by the standard library) like this: +Diğer sınıflar olan (sizin tarafınızdan veya standart kitaplık tarafından bildirilen) özelliklere sahip herhangi bir sınıfı şu şekilde dışa aktarırsanız: ```typescript class Thing {} @@ -425,7 +425,7 @@ export class Something { } ``` -The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: +Derleyici hata verecektir, çünkü ya sınıf olan özellikler için bir başlatıcı eklemeniz ya da `!` operatörünü eklemeniz gerekir: ```typescript export class Something { @@ -449,9 +449,9 @@ export class Something { } ``` -### Array initialization +### Dizi Başlatma -The `Array` class still accepts a number to initialize the length of the list, however you should take care because operations like `.push` will actually increase the size instead of adding to the beginning, for example: +`Dizi` sınıfı, listenin uzunluğunu başlatmak için yine de bir sayı kabul eder, ancak dikkatli olmalısınız çünkü `.push` gibi işlemler aslında başa eklemek yerine boyutu artıracaktır, örneğin: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -459,13 +459,13 @@ let arr = new Array(5) // ["", "", "", "", ""] arr.push('something') // ["", "", "", "", "", "something"] // size 6 :( ``` -Depending on the types you're using, eg nullable ones, and how you're accessing them, you might encounter a runtime error like this one: +Kullanmakta olduğunuz türlere, örneğin null yapılabilir türlere ve bunlara nasıl eriştiğinize bağlı olarak, bunun gibi bir çalışma zamanı hatasıyla karşılaşabilirsiniz: ``` ERRO Handler skipped due to execution failure, error: Mapping aborted at ~lib/array.ts, line 110, column 40, with message: Element type must be nullable if array is holey wasm backtrace: 0: 0x19c4 - !~lib/@graphprotocol/graph-ts/index/format 1: 0x1e75 - !~lib/@graphprotocol/graph-ts/common/collections/Entity#constructor 2: 0x30b9 - !node_modules/@graphprotocol/graph-ts/global/global/id_of_type ``` -To actually push at the beginning you should either, initialize the `Array` with size zero, like this: +Aslında başlangıçta itmek için, `diziyi`'i sıfır boyutunda başlatmalısınız, bunun gibi: ```typescript let arr = new Array(0) // [] @@ -473,7 +473,7 @@ let arr = new Array(0) // [] arr.push('something') // ["something"] ``` -Or you should mutate it via index: +Veya onu indeks yoluyla değiştirmelisiniz: ```typescript let arr = new Array(5) // ["", "", "", "", ""] @@ -481,11 +481,11 @@ let arr = new Array(5) // ["", "", "", "", ""] arr[0] = 'something' // ["something", "", "", "", ""] ``` -### GraphQL schema +### GraphQL Şeması -This is not a direct AssemblyScript change, but you may have to update your `schema.graphql` file. +Bu doğrudan bir AssemblyScript değişikliği değildir, ancak `schema.graphql` dosyanızı güncellemeniz gerekebilir. -Now you no longer can define fields in your types that are Non-Nullable Lists. If you have a schema like this: +Artık türlerinizde Non-Nullable Lists olan alanları tanımlayamazsınız. Bunun gibi bir şemanız varsa: ```graphql type Something @entity { @@ -498,7 +498,7 @@ type MyEntity @entity { } ``` -You'll have to add an `!` to the member of the List type, like this: +Liste türünün üyesine şu şekilde bir `!` eklemeniz gerekecek: ```graphql type Something @entity { @@ -511,14 +511,14 @@ type MyEntity @entity { } ``` -This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). +Bu, AssemblyScript sürümleri arasındaki geçersiz kılınabilirlik farklılıkları nedeniyle değişti ve bu `src/generated/schema.ts` dosyasıyla ilgilidir (varsayılan yol, bunu değiştirmiş olabilirsiniz). -### Other +### Diğerleri -- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- `Map#set` ve `Set#add` spesifikasyonla hizalandı ve `bu` döndürüldü. ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Diziler artık ArrayBufferView'dan miras almaz, ancak artık farklıdır. ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Nesne hazır bilgilerinden başlatılan sınıflar artık bir oluşturucu tanımlayamaz. ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Her iki işlenen de tamsayıysa, `**` ikili işleminin sonucu artık ortak payda tamsayısıdır. Daha önce, sonuç `Math/f.pow` çağırıyormuş gibi bir kayan noktaydı. ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- `bool`'a yayın yaparken `NaN`'yi `false`'a zorlayın. ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- `i8`/`u8` or `i16`/`u16` tipi küçük bir tamsayı değerini kaydırırken, RHS değerinin yalnızca sırasıyla 3 en önemsiz 4 biti sonucu etkiler; bu, bir `i32.shl`'nin RHS değerinin yalnızca en önemsiz 5 bitinden etkilenmesine benzer. Örnek: `someI8 << 8` daha önce `0` değerini üretti, ancak şimdi RHS'yi `8 & 7 = 0` (3 bits) olarak maskelediği için `someI8` üretiyor. ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Boyutlar farklı olduğunda ilişkisel dizi karşılaştırmalarının hata düzeltmesi. ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/pages/tr/release-notes/graphql-validations-migration-guide.mdx b/website/pages/tr/release-notes/graphql-validations-migration-guide.mdx index f8cf8a3c2ed3..a8d1725ec834 100644 --- a/website/pages/tr/release-notes/graphql-validations-migration-guide.mdx +++ b/website/pages/tr/release-notes/graphql-validations-migration-guide.mdx @@ -1,86 +1,86 @@ --- -title: GraphQL Validations migration guide +title: GraphQL Validasyon Geçiş Kılavuzu --- -Soon `graph-node` will support 100% coverage of the [GraphQL Validations specification](https://spec.graphql.org/June2018/#sec-Validation). +Yakında "graph-node", [GraphQL Validasyon Özelliklerinin](https://spec.graphql.org/June2018/#sec-Validation)'in %100'ünü destekleyecektir. -Previous versions of `graph-node` did not support all validations and provided more graceful responses - so, in cases of ambiguity, `graph-node` was ignoring invalid GraphQL operations components. +"graph-node"un önceki sürümleri tüm doğrulamaları desteklemiyordu ve daha zarif yanıtlar veriyordu - bu nedenle, belirsizlik durumlarında "graph-node" geçersiz GraphQL işlem bileşenlerini görmezden geliyordu. -GraphQL Validations support is the pillar for the upcoming new features and the performance at scale of The Graph Network. +GraphQL validasyon desteği, yaklaşan yeni özelliklerin ve Graph Ağı ölçeğindeki performansın temel direğidir. -It will also ensure determinism of query responses, a key requirement on The Graph Network. +Ayrıca, Graph ağında önemli bir gereklilik olan sorgu yanıtlarının belirleyiciliğini de sağlayacaktır. -**Enabling the GraphQL Validations will break some existing queries** sent to The Graph API. +**GraphQL validasyonlarını etkinleştirmek, Graph API'ye gönderilen bazı mevcut sorguları bozacaktır**. -To be compliant with those validations, please follow the migration guide. +Bu doğrulamalarla uyumlu olmak için lütfen taşıma kılavuzunu takip edin. -> ⚠️ If you do not migrate your queries before the validations are rolled out, they will return errors and possibly break your frontends/clients. +> ⚠️ Doğrulamalar kullanıma sunulmadan önce sorgularınızı taşımazsanız, bunlar hata döndürecek ve muhtemelen ön uçlarınızı/istemcilerinizi bozacaktır. -## Migration guide +## Taşıma Kılavuzu -You can use the CLI migration tool to find any issues in your GraphQL operations and fix them. Alternatively you can update the endpoint of your GraphQL client to use the `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` endpoint. Testing your queries against this endpoint will help you find the issues in your queries. +GraphQL işlemlerinizdeki sorunları bulmak ve düzeltmek için CLI taşıma aracını kullanabilirsiniz. Alternatif olarak, `https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME` uç noktasını kullanmak için GraphQL istemcinizin uç noktasını güncelleyebilirsiniz. Sorgularınızı bu uç noktaya göre test etmek, sorgularınızdaki sorunları bulmanıza yardımcı olacaktır. -> Not all subgraphs will need to be migrated, if you are using [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen), they already ensure that your queries are valid. +> Tüm subgraph'lerin taşınması gerekmez, [GraphQL ESlint](https://the-guild.dev/graphql/eslint/docs) or [GraphQL Code Generator](https://the-guild.dev/graphql/codegen) kullanıyorsanız zaten sorgularınızın geçerli olmasını sağlarlar. -## Migration CLI tool +## Geçiş CLI Aracı -**Most of the GraphQL operations errors can be found in your codebase ahead of time.** +**GraphQL işlem hatalarının çoğu kod tabanınızda önceden bulunabilir.** -For this reason, we provide a smooth experience for validating your GraphQL operations during development or in CI. +Bu nedenle, geliştirme sırasında veya CI'de GraphQL işlemlerinizi doğrulamak için sorunsuz bir deneyim sağlıyoruz. -[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate) is a simple CLI tool that helps validate GraphQL operations against a given schema. +[`@graphql-validate/cli`](https://github.com/saihaj/graphql-validate), GraphQL işlemlerini belirli bir şemaya göre doğrulamaya yardımcı olan basit bir CLI aracıdır. -### **Getting started** +### **Başlarken** -You can run the tool as follows: +Aracı aşağıdaki gibi çalıştırabilirsiniz: ```bash npx @graphql-validate/cli -s https://api-next.thegraph.com/subgraphs/name/$GITHUB_USER/$SUBGRAPH_NAME -o *.graphql ``` -**Notes:** +**Notlar:** -- Set or replace $GITHUB_USER, $SUBGRAPH_NAME with the appropriate values. Like: [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) -- The preview schema URL (https://api-next.thegraph.com/) provided is heavily rate-limited and will be sunset once all users have migrated to the new version. **Do not use it in production.** -- Operations are identified in files with the following extensions [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option). +- $GITHUB_USER, $SUBGRAPH_NAME değerini uygun değerlerle ayarlayın veya değiştirin. [`artblocks/art-blocks`](https://api.thegraph.com/subgraphs/name/artblocks/art-blocks) gibi. +- Sağlanan önizleme şeması URL'si (https://api-next.thegraph.com/) büyük oranda hız sınırlamasına sahiptir ve tüm kullanıcılar yeni sürüme geçtikten sonra kullanımdan kaldırılacaktır. **Çıktıda kullanmayın.** +- İşlemler, aşağıdaki [`.graphql`,](https://www.graphql-tools.com/docs/schema-loading#graphql-file-loader)[`.ts`, `.tsx`, `.js`, `jsx`](https://www.graphql-tools.com/docs/schema-loading#code-file-loader) (`-o` option) uzantılarına sahip dosyalarda tanımlanır. -### CLI output +### CLI Çıktısı -The `[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI tool will output any GraphQL operations errors as follows: +`[@graphql-validate/cli](https://github.com/saihaj/graphql-validate)` CLI aracı, tüm GraphQL işlemleri hatalarını aşağıdaki gibi verir: ![Error output from CLI](https://i.imgur.com/x1cBdhq.png) -For each error, you will find a description, file path and position, and a link to a solution example (see the following section). +Her hata için bir açıklama, dosya yolu ve konumu ve bir çözüm örneğine bağlantı bulacaksınız (aşağıdaki bölüme göz atın). -## Run your local queries against the preview schema +## Yerel sorgularınızı önizleme şemasına göre çalıştırın -We provide an endpoint `https://api-next.thegraph.com/` that runs a `graph-node` version that has validations turned on. +Doğrulamaların açık olduğu bir "Graph Node" sürümünü çalıştıran bir uç nokta `https://api-next.thegraph.com/` sağlıyoruz. -You can try out queries by sending them to: +Sorguları şu adrese göndererek deneyebilirsiniz: - `https://api-next.thegraph.com/subgraphs/id/` -or +yada - `https://api-next.thegraph.com/subgraphs/name//` -To work on queries that have been flagged as having validation errors, you can use your favorite GraphQL query tool, like Altair or [GraphiQL](https://cloud.hasura.io/public/graphiql), and try your query out. Those tools will also mark those errors in their UI, even before you run it. +Doğrulama hataları içerdiği işaretlenen sorgular üzerinde çalışmak için Altair veya [GraphiQL](https://cloud.hasura.io/public/graphiql) gibi en sevdiğiniz GraphQL sorgulama aracını kullanabilir ve sorgunuzu deneyebilirsiniz. Bu araçlar, siz çalıştırmadan önce bile kullanıcı arayüzlerinde bu hataları işaretleyecektir. -## How to solve issues +## Sorunları nasıl çözeceğiz? -Below, you will find all the GraphQL validations errors that could occur on your existing GraphQL operations. +Aşağıda, mevcut GraphQL işlemlerinizde meydana gelebilecek tüm GraphQL doğrulama hatalarını bulacaksınız. -### GraphQL variables, operations, fragments, or arguments must be unique +### GraphQL değişkenleri, işlemleri, parçaları veya bağımsız değişkenleri benzersiz olmalıdır -We applied rules for ensuring that an operation includes a unique set of GraphQL variables, operations, fragments, and arguments. +Bir işlemin benzersiz bir GraphQL değişkenleri, işlemler, parçalar ve bağımsız değişkenler kümesi içermesini sağlamak için kurallar uyguladık. -A GraphQL operation is only valid if it does not contain any ambiguity. +Bir GraphQL işlemi, yalnızca herhangi bir belirsizlik içermiyorsa geçerlidir. -To achieve that, we need to ensure that some components in your GraphQL operation must be unique. +Bunu başarmak için, GraphQL işleminizdeki bazı bileşenlerin benzersiz olmasını sağlamamız gerekiyor. -Here's an example of a few invalid operations that violates these rules: +Aşağıda, bu kuralları ihlal eden birkaç geçersiz işleme örnek verilmiştir: -**Duplicate Query name (#UniqueOperationNamesRule)** +**Yinelenen Sorgu Adı (#UniqueOperationNamesRule)** ```graphql # The following operation violated the UniqueOperationName @@ -188,7 +188,7 @@ query myData($id: ID!) { **Duplicate anonymous query (#LoneAnonymousOperationRule)** -Also, using two anonymous operations will violate the `LoneAnonymousOperation` rule due to conflict in the response structure: +Ayrıca, iki anonim işlemin kullanılması, yanıt yapısındaki çakışma nedeniyle `LoneAnonymousOperation` kuralını ihlal edecektir: ```graphql # This will fail if executed together in @@ -211,7 +211,7 @@ query { } ``` -Or name the two queries: +Veya iki sorguyu adlandırın: ```graphql query FirstQuery { @@ -223,13 +223,13 @@ query SecondQuery { } ``` -### Overlapping Fields +### Çakışan Alanlar -A GraphQL selection set is considered valid only if it correctly resolves the eventual result set. +Bir GraphQL seçim seti, yalnızca nihai sonuç setini doğru bir şekilde çözerse geçerli kabul edilir. -If a specific selection set, or a field, creates ambiguity either by the selected field or by the arguments used, the GraphQL service will fail to validate the operation. +Belirli bir seçim kümesi veya bir alan, seçilen alan veya kullanılan bağımsız değişkenler nedeniyle belirsizlik yaratırsa, GraphQL hizmeti işlemi doğrulamada başarısız olur. -Here are a few examples of invalid operations that violate this rule: +Bu kuralı ihlal eden geçersiz işlemlere birkaç örnek: **Conflicting fields aliases (#OverlappingFieldsCanBeMergedRule)** @@ -280,7 +280,7 @@ query { } ``` -Also, in more complex use-cases, you might violate this rule by using two fragments that might cause a conflict in the eventually expected set: +Ayrıca, daha karmaşık kullanım durumlarında, sonunda beklenen kümede bir çakışmaya neden olabilecek iki parça kullanarak bu kuralı ihlal edebilirsiniz: ```graphql query { @@ -299,7 +299,7 @@ fragment B on Type { } ``` -In addition to that, client-side GraphQL directives like `@skip` and `@include` might lead to ambiguity, for example: +Buna ek olarak, "@skip" ve "@include" gibi müşteri tarafı GraphQL yönergeleri belirsizliğe yol açabilir, örneğin: ```graphql fragment mergeSameFieldsWithSameDirectives on Dog { @@ -308,15 +308,15 @@ fragment mergeSameFieldsWithSameDirectives on Dog { } ``` -[You can read more about the algorithm here.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) +[Algoritma hakkında daha fazla bilgiyi buradan edinebilirsiniz.](https://spec.graphql.org/June2018/#sec-Field-Selection-Merging) -### Unused Variables or Fragments +### Kullanılmayan Değişkenler veya Parçalar -A GraphQL operation is also considered valid only if all operation-defined components (variables, fragments) are used. +Bir GraphQL işlemi, yalnızca tüm işlem tanımlı bileşenler (değişkenler, parçalar) kullanılıyorsa geçerli kabul edilir. -Here are a few examples for GraphQL operations that violates these rules: +İşte bu kuralları ihlal eden GraphQL işlemleri için birkaç örnek: -**Unused variable** (#NoUnusedVariablesRule) +**Kullanılmayan Değişken** (#NoUnusedVariablesRule) ```graphql # Invalid, because $someVar is never used. @@ -358,14 +358,14 @@ query something { # remove the `AllFields` fragment ``` -### Invalid or missing Selection-Set (#ScalarLeafsRule) +### Geçersiz veya Eksik Seçim Kümesi (#ScalarLeafsRule) -Also, a GraphQL field selection is only valid if the following is validated: +Ayrıca, bir GraphQL alan seçimi yalnızca aşağıdakiler doğrulanırsa geçerlidir: -- An object field must-have selection set specified. -- An edge field (scalar, enum) must not have a selection set specified. +- Bir nesne alanında olması gereken seçim kümesi belirtildi. +- Bir kenar alanı (scalar, enum) belirtilen bir seçim kümesine sahip olmamalıdır. -Here are a few examples of violations of these rules with the following Schema: +Aşağıda, aşağıdaki şema ile bu kuralların ihlaline ilişkin birkaç örnek verilmiştir: ```graphql schema { @@ -430,11 +430,11 @@ query { } ``` -### Incorrect Arguments values (#VariablesInAllowedPositionRule) +### Yanlış Bağımsız Değişken Değerleri (#VariablesInAllowedPositionRule) -GraphQL operations that pass hard-coded values to arguments must be valid, based on the value defined in the schema. +Sabit kodlu değerleri bağımsız değişkenlere ileten GraphQL işlemleri, şemada tanımlanan değere göre geçerli olmalıdır. -Here are a few examples of invalid operations that violate these rules: +Aşağıda, bu kuralları ihlal eden geçersiz işlemlere ilişkin birkaç örnek verilmiştir: ```graphql query purposes { @@ -445,7 +445,7 @@ query purposes { } } -# This might also happen when an incorrect variable is defined: +# Bu, yanlış bir değişken tanımlandığında da olabilir: query purposes($name: Int!) { # If "name" is defined as `String` in the schema, @@ -457,22 +457,22 @@ query purposes($name: Int!) { } ``` -### Unknown Type, Variable, Fragment, or Directive (#UnknownX) +### Bilinmeyen Tür, Değişken, Parça veya Yönerge (#UnknownX) -The GraphQL API will raise an error if any unknown type, variable, fragment, or directive is used. +Herhangi bir bilinmeyen tür, değişken, parça veya yönerge kullanılırsa GraphQL API bir hata verir. -Those unknown references must be fixed: +Bu bilinmeyen referanslar düzeltilmelidir: -- rename if it was a typo -- otherwise, remove +- bir yazım hatasıysa yeniden adlandır +- aksi halde kaldır -### Fragment: invalid spread or definition +### Parça: Geçersiz Yayılma Veya Tanım -**Invalid Fragment spread (#PossibleFragmentSpreadsRule)** +**Geçersiz Parça Yayılması (#PossibleFragmentSpreadsRule)** -A Fragment cannot be spread on a non-applicable type. +Bir parça, geçerli olmayan bir türe yayılamaz. -Example, we cannot apply a `Cat` fragment to the `Dog` type: +Örnek olarak, "Dog" türüne bir "Cat" parçası uygulayamayız: ```graphql query { @@ -486,11 +486,11 @@ fragment CatSimple on Cat { } ``` -**Invalid Fragment definition (#FragmentsOnCompositeTypesRule)** +**Geçersiz Fragment Tanımı (#FragmentsOnCompositeTypesRule)** -All Fragment must be defined upon (using `on ...`) a composite type, in short: object, interface, or union. +Tüm parçalar ('on ...' kullanılarak) bir bileşik tipte tanımlanmalıdır, kısacası: nesne, arayüz veya birleşim. -The following examples are invalid, since defining fragments on scalars is invalid. +Aşağıdaki örnekler geçersizdir, çünkü skalerler üzerinde parça tanımlama geçersizdir. ```graphql fragment fragOnScalar on Int { @@ -506,13 +506,13 @@ fragment inlineFragOnScalar on Dog { } ``` -### Directives usage +### Direktif kullanımı -**Directive cannot be used at this location (#KnownDirectivesRule)** +**Yönerge bu konumda kullanılamaz (#KnownDirectivesRule)** -Only GraphQL directives (`@...`) supported by The Graph API can be used. +Yalnızca Graph API tarafından desteklenen GraphQL yönergeleri ("@...") kullanılabilir. -Here is an example with The GraphQL supported directives: +İşte GraphQL tarafından desteklenen direktiflere bir örnek: ```graphql query { @@ -523,13 +523,13 @@ query { } ``` -_Note: `@stream`, `@live`, `@defer` are not supported._ +_Not: `@stream`, `@live`, `@defer` desteklenmez._ -**Directive can only be used once at this location (#UniqueDirectivesPerLocationRule)** +**Yönerge bu konumda yalnızca bir kez kullanılabilir (#UniqueDirectivesPerLocationRule)** -The directives supported by The Graph can only be used once per location. +Graph tarafından desteklenen direktifler, lokasyon başına sadece bir kez kullanılabilir. -The following is invalid (and redundant): +Aşağıdakiler geçersiz (ve gereksiz): ```graphql query { diff --git a/website/pages/tr/substreams.mdx b/website/pages/tr/substreams.mdx index d0354f06bab1..62b743e1d684 100644 --- a/website/pages/tr/substreams.mdx +++ b/website/pages/tr/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams, Graph Ağı için geliştirilmiş güçlü bir blok zinciri indeksleme teknolojisidir. Substreams, geliştiricilerin Rust modülleri yazmasına, toplulukla birlikte veri akışları oluşturmasına olanak tanır ve akış öncelikli bir şekilde paralelleştirme sayesinde son derece yüksek performanslı indeksleme sunar. - +Substreams ile farklı blok zincirlerinden (Ethereum, BNB, Solana...) ultra hızlı veri çekebilirsiniz! Daha sonra bu verileri çeşitli yerlere (bir Postgres veritabanı, bir Mongo veritabanı veya bir Subgraph) gönderebilirsiniz. + +## Substreams Nasıl Çalışır - 4 Adımda + +1. **Blok zinciri verilerine uygulamak istediğiniz dönüşümleri tanımlayan bir Rust programı yazarsınız.** Örneğin, aşağıdaki Rust fonksiyonu bir Ethereum bloğundan ilgili bilgileri çıkarır (numara, hash ve üst hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **Sadece tek bir CLI komutu çalıştırarak Rust programınızı bir WASM modülüne çevirirsiniz.** + +3. **WASM konteyneri, yürütme için bir Substreams uç noktasına gönderilir.** Substreams sağlayıcısı, WASM konteynerini blok zincir verileriyle besler ve dönüşümler uygulanır. + +4. **Dönüştürülmüş verileri göndermek istediğiniz bir yer olan [hedef(sink)](https://substreams.streamingfast.io/developers-guide/sink-targets) seçersiniz** (örneğin bir Postgres veritabanı veya bir Subgraph). + +## Substreams Dökümantasyonu + +Resmi Substreams dökümantasyonu şu anda StreamingFast ekibi tarafından [StreamingFast web sitesinde](https://substreams.streamingfast.io/) tutulmaktadır. + +### Buradan Başlayın + +- Bir Substreams geliştirmek ve dağıtmak için [Substreams CLI'ını yüklemeniz gerekmektedir](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Ardından, [Hızlı Başlangıç Öğreticisini](https://substreams.streamingfast.io/getting-started/quickstart) takip ederek ilk Substreams'ınızı çalıştırın. + +### Bilgi Dağarcığınızı Genişletin + +- Substreams ile oluşturabileceğiniz temel dönüşümler hakkında bilgi edinmek için [Ethereum Gezgini Öğreticisine](https://substreams.streamingfast.io/tutorials/overview/) göz atın. diff --git a/website/pages/tr/sunrise.mdx b/website/pages/tr/sunrise.mdx new file mode 100644 index 000000000000..3dd507efeccf --- /dev/null +++ b/website/pages/tr/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Merkeziyetsiz Verinin Doğuşu SSS +--- + +> Not: Bu belge, en doğru ve yararlı bilgilerin sunulmasını sağlamak amacıyla sürekli olarak güncellenmektedir. Yeni sorular ve cevaplar düzenli olarak eklenmektedir. Aradığınız bilgiyi bulamazsanız veya acil yardıma ihtiyacınız olursa [Discord] \(https://discord.gg/vtvv7FP) üzerinden bize ulaşın. + +## Merkeziyetsiz verinin doğuşu nedir? + +Merkeziyetsiz verinin doğuşu, Graph üzerinde çalışan Edge & Node'un öncülük ettiği bir girişimdir. Hedef, subgraph geliştiricilerinin ve veri tüketicilerinin Graph'ın merkeziyetsiz ağına sorunsuz bir şekilde geçebilmelerini sağlamaktır. + +Bu plan, yeni yayınlanan subgraphlar üzerinde sorgular sunmak için bir yükseltme İndeksleyicisi ve yeni blok zinciri ağlarını Graph'a entegre etme yeteneği de dahil olmak üzere Graph ekosistemindeki önceki birçok önceki gelişmeyi içermektedir. + +### Merkeziyetsiz verinin doğuşunun aşamaları nelerdir? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Kendi altyapımı çalıştırmam gerekiyor mu? + +Hayır, yükseltme İndeksleyicisi de ([aşağıda daha fazlasını okuyun](#what-is-an-upgrade-indexer)) dahil olmak üzere tüm altyapı Graph Ağı'ndaki bağımsız İndeksleyiciler tarafından işletilmektedir. + +[Subgraph Stüdyo](https://thegraph.com/studio/) kullanarak subgraph oluşturabilir, test edebilir ve yayınlayabilirsiniz. Tüm barındırılan hizmet kullanıcılarının subgraphlar'ını Graph Ağı'na yükseltmeleri tavsiye edilmektedir. Yükseltme Endeksleyicisi, kürasyon sinyali olmadan bile subgraph'ınızı sorgulayabilmenizi sağlar. + +Subgraph'ınız yeterli kürasyon sinyaline ulaştığında ve diğer İndeksleyiciler tarafından desteklemeye başladığında, yükseltme İndeksleyicisi kademeli olarak azalacak ve diğer İndeksleyicilerin indeksleme ödüllerini ve sorgu ücretlerini toplama fırsatı tanıyacaktır. + +### Kendi indeksleme altyapımı barındırmalı mıyım? + +Kendi projeniz için altyapıyı çalıştırmak, Graph Ağı'nı kullanmaya kıyasla [önemli ölçüde daha fazla kaynak](/network/benefits/) gerektirir. + +Ayrıca, Graph Ağı, tek bir organizasyon veya ekip tarafından sağlanan herhangi bir şeyden önemli ölçüde daha güçlü, güvenilir ve uygun maliyetli bir yapıya sahiptir. Dünya genelinde yüzlerce bağımsız İndeksleyici, Graph Ağı'nı destekleyerek güvenlik, güvenilirlik ve yedeklilik sağlar. + +Bununla birlikte, hala bir [Graph Düğümü](https://github.com/graphprotocol/graph-node) çalıştırmakla ilgileniyorsanız, subgraph'ınızda ve diğerlerinde veri sunarak indeksleme ödülleri ve sorgu ücretleri kazanmak için Graph Ağı'na [İndeksleyici olarak](https://thegraph.com/blog/how-to-become-indexer/) katılmayı düşünün. + +### Merkezi bir indeksleme sağlayıcısı kullanmalı mıyım? + +Web3'te bir şeyler geliştiriyorsanız, merkezi bir indeksleme sağlayıcısı kullandığınızda, merkeziyetsiz uygulamanız ve verileriniz üzerindeki kontrolü onlara veriyorsunuz demektir. Graph'ın merkeziyetsiz ağı, düğüm yedekliliği sayesinde kusursuz çalışma süresi, önemli ölçüde [daha düşük maliyetler](/network/benefits/) ve veri katmanında rehine alınmama gibi [üstün hizmet kalitesi](https://thegraph.com/blog/qos-the-graph-network/) ve güvenilirlik sunar. + +Graph Ağı ile subgraph'ınız herkese açıktır ve herkes tarafından açıkça sorgulanabilir, bu da merkeziyetsiz uygulamanızın kullanımını ve ağ etkilerini artırır. Merkezi bir indeksleme çözümüyle, subgraph merkezi sağlayıcıya özeldir. + +İşte Graph'ın merkezi barındırmaya göre avantajlarının ayrıntılı bir açıklaması: + +- **Dayanıklılık ve Yedeklilik**: Merkeziyetsiz sistemler, dağıtık yapıları nedeniyle doğal olarak daha dayanıklı ve esnektir. Veriler tek bir sunucuda veya konumda depolanmaz. Bunun yerine, dünyanın dört bir yanındaki yüzlerce bağımsız İndeksleyici tarafından sunulur. Bu, bir düğümün arızalanması durumunda veri kaybı veya hizmet kesintisi riskini azaltır ve olağanüstü çalışma süreleri (%99,99) sağlar. + +- **Hizmet Kalitesi**: Etkileyici çalışma süresine ek olarak, Graph Ağı yaklaşık 106 ms medyan sorgu hızı (gecikme) ve barındırılan alternatiflere kıyasla daha yüksek sorgu başarı oranlarına sahiptir. Daha fazla bilgi için [bu bloğa göz atın] \(https://thegraph.com/blog/qos-the-graph-network/). + +- **Sansüre Direnç**: Merkezi sistemler, düzenleyici baskılar veya ağ saldırıları aracılığıyla sansürün hedefi haline gelebilir. Buna karşın, merkeziyetsiz sistemlerin, dağıtık mimarileri nedeniyle sansürlenmeleri çok daha zordur ve sürekli veri kullanılabilirliği sağlarlar. + +- **Şeffaflık ve Güven**: Merkeziyetsiz sistemler açık bir şekilde işler, bu da herkesin veriyi bağımsız olarak doğrulayabilmesine olanak tanır. Bu şeffaflık, ağ katılımcıları arasında güven oluşturur, çünkü sistemın bütünlüğünü merkezi bir otoriteye güvenmeden doğrulayabilirler. + +Merkezi olmayan yapısı, güvenliği ve şeffaflığı nedeniyle blok zincir ağını seçtiğiniz gibi, aynı şekilde Graph Ağı'nı tercih etmek bu ilkelerin bir devamı niteliğindedir. Veri altyapınızı bu değerlerle uyumlu hale getirerek bütünlük, dayanıklılık ve güvene dayalı bir geliştirme ortamını sağlarsınız. + +### Barındırılan hizmet subgraph'ım yükseltme İndeksleyicisi tarafından desteklenecek mi? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +Ancak, bazı subgraphlar indeksleme ödülleri için uygun olmayabilir ve bu nedenle daha fazla İndeksleyici çekmekte zorlanabilir. Örneğin, belirli zincirlerdeki subgraphlar için indeksleme ödülleri mevcut olmayabilir. Bu blok zinciri toplulukları üyelerinin, [Zincir Entegrasyon Süreci](/chain-integration-overview/) aracılığıyla zincirlerini entegre etmeleri tavsiye edilir. + +## Yükseltme İndeksleyicisi nedir? + +### "Yükseltme İndeksleyicisi" ne anlama geliyor? + +Bu, subgraphlar'ın barındırılan hizmetten Graph Ağı'na yükseltilme deneyimini iyileştirmek ve henüz indekslenmemiş mevcut subgraphlar'ın yeni sürümlerini desteklemek amacıyla tasarlanmıştır. + +Yükseltme İndeksleyicisi, ağda henüz indeksleme ödülleri olmayan zincirleri önyüklemeyi ve yeni subgraph sürümleri için bir geri dönüş noktası sağlamayı amaçlamaktadır. Hedef, bir subgraph yayınlandıktan sonra mümkün olan en kısa sürede sorguları sunmak için bir İndeksleyicinin hazır olmasını sağlamaktır. + +### Yükseltme İndeksleyicisi hangi zincirleri destekleyecek? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Desteklenen zincirlerin kapsamlı bir listesini inceleyin [here](/developing/supported-networks/). + +### Yükseltme İndeksleyicisini neden Edge & Node çalıştırıyor? + +Edge and Node geçmişte barındırılan hizmeti sürdürmüş ve dolayısıyla zaten barındırılan hizmet subgraphlar'ı için veri senkronizasyonu sağlamıştır. + +Tüm İndeksleyicilerin, aynı zamanda yükseltme İndeksleyicisi olmaları teşvik edilir. Ancak, unutulmamalıdır ki Graph Konseyi tarafından onaylanmadan indeksleme ödüllerinin olmaması nedeniyle yükseltme İndeksleyicisi çalıştırmak büyük ölçüde yeni subgraphları ve ek zincirleri desteklemek için bir kamu hizmeti olarak sağlanmaktadır. + +### Bu mevcut İndeksleyiciler için ne anlama gelmektedir? + +Şu an için yalnızca barındırılan hizmette desteklenen zincirler, ilk etapta indeksleme ödülleri olmaksızın Graph üzerindeki geliştiricilere sunulacak. Ancak bu durum ilgilenen tüm İndeksleyiciler için sorgu ücretlerinin önünü açacaktır. Bu durumun, ağda yayınlanan subgraph sayısında bir artışa yol açması ve İndeksleyicilerin, indeksleme ödülleri bir zincir için etkinleştirilmeden önce bile, sorgu ücretleri karşılığında bu subgraphlar'ı indekslemesi ve sunması için daha fazla fırsat sağlaması beklenmektedir. + +Yükseltme İndeksleyicisi ayrıca İndeksleyici topluluğuna Graph Ağı'ndaki subgraphlar ve yeni zincirler konusunda potansiyel talep hakkında bilgi sunmaktadır. + +### Bu Delegatörler için ne anlama gelmektedir? + +Yükseltme İndeksleyicisi, Delegatörler için büyük bir fırsat sunmaktadır. Daha fazla subgraph barındırılan hizmetten Graph Ağı'na yükseltildikçe, Delegatörler artan ağ etkinliğinden faydalanmaya devam edecektir. + +### Yükseltme İndeksleyicisi, mevcut İndeksleyicilerle ödüller için rekabet edecek mi? + +Hayır, yükseltme İndeksleyicisi yalnızca subgraph başına minimum miktarı tahsis edecek ve indeksleme ödüllerini toplamayacaktır. + +Yükseltme İndeksleyicisi, "gerektiğinde" çalışır ve ilgili zincirler ve subgraphlar için ağda en az 3 diğer İndeksleyici yeterli hizmet kalitesine ulaşana kadar bir geri dönüş noktası olarak hizmet eder. + +### Bu durum subgraph geliştiricilerini nasıl etkileyecek? + +Subgraph geliştiricileri, indeksleme için herhangi bir hazırlık süresi gerekmeyeceğinden dolayı subgraphlar'ını barındırılan hizmetten yükselttikten veya Subgraph Stüdyo'dan yayınladıktan hemen sonra ağ üzerinde sorgulayabilecekler. + +### Bu, veri tüketicilerine nasıl fayda sağlar? + +Yükseltme İndeksleyicisi, ağ üzerinde şu anda yalnızca barındırılan hizmette kullanılan zincirleri etkinleştirir. Bu nedenle, ağda sorgulanabilir verilerin kapsamını ve erişilebilirliğini genişletir. + +### Yükseltme İndeksleyicisi sorguları nasıl fiyatlandıracak? + +Yükseltme İndeksleyicisi, sorgu ücreti pazarını etkilememek adına sorguları piyasa fiyatına göre fiyatlandıracaktır. + +### Yükseltme İndeksleyicisi'nin bir subgraph'ı desteklemeyi durdurması için kriterler nelerdir? + +Yükseltme İndeksleyicisi, bir subgraph'a, en az 3 diğer İndeksleyici tarafından sağlanan tutarlı sorgularla yeterli ve başarılı bir şekilde hizmet verilene kadar hizmet verecektir. + +Ayrıca, yükseltme İndeksleyicisi, bir subhraph son 30 günde sorgulanmamış ise desteğini durduracaktır. + +Diğer İndeksleyiciler, süregiden sorgu hacmi olan subgraphlar'a destek sağlamaya teşvik edildiği için, yükseltme İndeksleyicisine yönelik sorgu hacmi sıfıra doğru eğilim göstermelidir. Çünkü İndeksleyici, küçük bir tahsis kapasitesine sahip olacak ve diğer İndeksleyiciler sorgular için, yükseltme İndeksleyicisinden önce seçilecektir. diff --git a/website/pages/tr/tokenomics.mdx b/website/pages/tr/tokenomics.mdx index 949796a99983..24759493565e 100644 --- a/website/pages/tr/tokenomics.mdx +++ b/website/pages/tr/tokenomics.mdx @@ -1,110 +1,110 @@ --- -title: Tokenomics of The Graph Network -description: The Graph Network is incentivized by powerful tokenomics. Here’s how GRT, The Graph’s native work utility token works. +title: Graph Network'ün Token Ekonomisi +description: Graph Ağı, güçlü bir token ekonomisi tarafından teşvik edilmektedir. Graph'ın yerel çalışma yardımcı programı belirteci olan GRT şu şekilde çalışır. --- -- GRT Token Address: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) +- GRT Token Adresi: [0xc944e90c64b2c07662a292be6244bdf05cda44a7](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) -- GRT Token Address on Arbitrum One: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) +- Arbitrum One Üzerinde GRT Token Adresi: [0x9623063377AD1B27544C965cCd7342f7EA7e88C7](https://arbiscan.io/token/0x9623063377ad1b27544c965ccd7342f7ea7e88c7) -The Graph is a decentralized protocol that enables easy access to blockchain data. +Graph, blockchain verilerine kolayca erişim sağlayan merkeziyetsiz bir protokoldür. -It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. +Merkeziyetsiz bir katılımcı ağı tarafından desteklenmesi dışında B2B2C modeline benzer. Ağ katılımcıları, GRT ödülleri karşılığında son kullanıcılara veri sağlamak için birlikte çalışır. GRT, veri sağlayıcıları ve tüketicileri koordine eden iş aracı simgesidir. GRT, ağ içindeki veri sağlayıcıları ve tüketicileri koordine etmek için bir yardımcı program olarak hizmet ederken, protokol katılımcılarını verileri etkili bir şekilde düzenlemeye de teşvik eder. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. -The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! +Graph, blockchain verilerini, Google'ın web'i indekslemesine benzer şekilde indeksler. Aslında, Graph'i farkında olmadan hali hazırda kullanıyor olabilirsiniz. Verilerini bir subgraph'ten alan merkeziyetsiz uygulamanın ön ucunu görüntülediyseniz, bu demek oluyor ki bir subgraph'ten veri sorgulamışsınız bile! -The Graph plays a crucial role in making blockchain data more accessible and enabling a marketplace for its exchange. +Graph, blok zinciri verilerini daha erişilebilir hale getirmede ve değiş tokuş için bir pazar yeri sağlamada çok önemli bir rol oynar. -## The Roles of Network Participants +## Ağ Katılımcılarının Rolleri -There are four primary network participants: +Dört birincil ağ katılımcısı vardır: -1. Delegators - Delegate GRT to Indexers & secure the network +1. Delegatörler - GRT'yi indeksleyicilere stake eder & ağı güvenliğini sağlarlar -2. Curators - Find the best subgraphs for Indexers +2. Küratörler - İndeksleyiciler için en iyi subgraph'leri bulurlar -3. Developers - Build & query subgraphs +3. Geliştiriciler - Subgraph'leri oluşturur & sorgular -4. Indexers - Backbone of blockchain data +4. İndeksleyiciler - Blockchain verilerinin omurgasıdır -Fishermen and Arbitrators are also integral to the network’s success through other contributions, supporting the work of the other primary participant roles. For more information about network roles, [read this article](https://thegraph.com/blog/the-graph-grt-token-economics/). +Fishermen ve Arbitrator'ler, diğer birincil katılımcı rollerinin çalışmalarını destekleyen diğer katkılar yoluyla da ağın başarısının ayrılmaz bir parçasıdır. Ağ rolleri hakkında daha fazla bilgi için, [bu makaleye bir göz atın](https://thegraph.com/blog/the-graph-grt-token-economics/). -![Tokenomics diagram](/img/updated-tokenomics-image.png) +![Tokenizasyon Diyagramı](/img/updated-tokenomics-image.png) -## Delegators (Passively earn GRT) +## Delegeler (Pasif olarak GRT kazanırlar) -Indexers are delegated GRT by Delegators increasing the Indexer’s stake in subgraphs on the network. In return, Delegators earn a percentage of all query fees and indexing rewards from the Indexer. Each Indexer sets the cut that will be rewarded to Delegators independently, creating competition among Indexers to attract Delegators. Most Indexers offer between 9-12% annually. +İndeksleyicilere, delegatörler tarafından GRT token'lar stake edilir ve bu, indeksleyicinin ağda bulunan subgraph'lerdeki payını artırır. Buna karşılık, delegatörler, indeksleyici'den tüm sorgu ücretlerinin ve indeksleme ödüllerinin bir yüzdesini kazanır. Her indeksleyici, delegatörlere ödüllendirilecek kesintiyi bağımsız olarak belirleyerek, indeksleyiciler arasında delegatörleri çekmek için rekabet yaratır. Çoğu indeksleyici yılda %9-12 arasında teklif verir. -For example, if a Delegator were to delegate 15k GRT to an Indexer offering 10%, the Delegator would receive ~1500 GRT in rewards annually. +Örneğin, bir delegatör, %10 teklif veren bir indeksleyiciye 15.000 GRT stake ederse, delegatör yılda ~1500 GRT ödül alacaktır. -There is a 0.5% delegation tax which is burned whenever a Delegator delegates GRT on the network. If a Delegator chooses to withdraw their delegated GRT, the Delegator must wait for the 28-epoch unbonding period. Each epoch is 6,646 blocks, which means 28 epochs ends up being approximately 26 days. +Bir delegatör ağ üzerinde GRT stake ettiğinde, yakılan %0,5'lik bir delegasyon vergisi bulunmakta. Bir delegatör, stake edilen GRT'sini geri çekmek isterse, delegatör 28 dönemlik cooldown süresini beklemelidir. Her dönem 6.646 bloktur, bu da 28 dönemin yaklaşık 26 gün olduğu anlamına gelir. -If you're reading this, you're capable of becoming a Delegator right now by heading to the [network participants page](https://thegraph.com/explorer/participants/indexers), and delegating GRT to an Indexer of your choice. +Bunu okuyorsanız, şu anda [network participants page](https://thegraph.com/explorer/participants/indexers)'e giderek ve GRT'yi seçtiğiniz bir indeksleyiciye stake ederek bir delegatör olabilirsiniz. -## Curators (Earn GRT) +## Küratörler (GRT Kazanırlar) -Curators identify high-quality subgraphs, and "curate" them (i.e., signal GRT on them) to earn curation shares, which guarantee a percentage of all future query fees generated by the subgraph. While any independent network participant can be a Curator, typically subgraph developers are among the first Curators for their own subgraphs because they want to ensure their subgraph is indexed. +Küratörler, yüksek kaliteli subgraph'leri belirler ve subgraph tarafından oluşturulan gelecekteki tüm sorgu ücretlerinin bir yüzdesini garanti eden kürasyon payları kazanmak için bunları "düzenler" (yani üzerlerinde GRT sinyali verir). Herhangi bir bağımsız ağ katılımcısı bir küratör olabilirken, genellikle subgraph geliştiricileri kendi subgraph'leri için ilk küratörler arasındadır çünkü subgraph'lerinin indekslendiğinden emin olmak isterler. -As of December 2022, subgraph developers are encouraged to curate their subgraph with at least 10,000 GRT. However, this number may be impacted by network activity and community participation. +Aralık 2022 itibariyle, subgraph geliştiricilerin subgraph'lerini en az 10.000 GRT ile kürasyon yapmaları teşvik edilmektedir. Ancak, bu sayı ağ etkinliğinden ve topluluk katılımından etkilenebilir. -Curators pay a 1% curation tax when they curate a new subgraph. This curation tax is burned, decreasing the supply of GRT. +Küratörler, yeni bir subgraph'in küratörlüğünü yaptıklarında %1 kürasyon vergisi öderler. Bu kürasyon vergisi yakılır ve GRT arzından düşer. -## Developers +## Geliştiriciler -Developers build and query subgraphs to retrieve blockchain data. Since subgraphs are open source, developers can query existing subgraphs to load blockchain data into their dapps. Developers pay for queries they make in GRT, which is distributed to network participants. +Geliştiriciler, blockchain verilerini almak için subgrpah'ler inşa eder ve sorgular. Subgraph'ler açık kaynak olduğundan, geliştiriciler blok zinciri verilerini kendi veri uygulamalarına yüklemek için mevcut subgraph'leri sorgulayabilir. Geliştiriciler, ağ katılımcılarına dağıtılan GRT'de yaptıkları sorgular için ödeme yaparlar. -### Creating a subgraph +### Subgraph oluşturma -Developers can [create a subgraph](/developing/creating-a-subgraph/) to index data on the blockchain. Subgraphs are instructions for Indexers about which data should be served to consumers. +Geliştiriciler, blok zincirindeki verileri indekslemek için [buraya](/developing/creating-a-subgraph/) göz atabilir. Subgraph'ler, indeksleyiciler için tüketicilere hangi verilerin sunulması gerektiğine ilişkin talimatlardır. -Once developers have built and tested their subgraph, they can [publish their subgraph](/publishing/publishing-a-subgraph/) on The Graph's decentralized network. +Geliştiriciler subgraph'lerini inşa edip test ettikten sonra, Graph'in merkeziyetsiz ağında [subgraph'lerini yayınlayabilir](/publishing/publishing-a-subgraph/). -### Querying an existing subgraph +### Mevcut bir subgraph'ı sorgulama -Once a subgraph is [published](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/) to The Graph's decentralized network, anyone can create an API key, add GRT to their billing balance, and query the subgraph. +Bir subgraph, Graph'in merkeziyetsiz ağında [yayınlandığında](https://thegraph.com/docs/en/publishing/publishing-a-subgraph/), herkes bir API anahtarı oluşturabilir, GRT'yi fatura bakiyelerine ekleyebilir ve subgraph'i sorgulayabilir. -Subgraphs are [queried using GraphQL](/querying/querying-the-graph/), and the query fees are paid for with GRT in [Subgraph Studio](https://thegraph.com/studio/). Query fees are distributed to network participants based on their contributions to the protocol. +Subgraph'ler [GraphQL kullanılarak](/querying/querying-the-graph/) sorgulanır ve sorgu ücretleri [Subgraph Studio](https://thegraph.com/studio/)'da GRT ile ödenir. Sorgu ücretleri, ağ katılımcılarına katkılarına göre dağıtılır. protokol. -1% of the query fees paid to the network are burned. +Ağa ödenen sorgu ücretlerinin %1'i yakılır. -## Indexers (Earn GRT) +## İndeksleyiciler (GRT Kazanırlar) -Indexers are the backbone of The Graph. They operate independent hardware and software powering The Graph’s decentralized network. Indexers serve data to consumers based on instructions from subgraphs. +İndeksleyiciler Graph'in omurgasıdır. Graph'in merkeziyetsiz ağına güç sağlayan bağımsız donanım ve yazılım kullanırlar. İndeksleyiciler, subgraph'lerden gelen talimatlara dayalı olarak tüketicilere veri sunar. -Indexers can earn GRT rewards in two ways: +İndeksleyiciler, GRT ödüllerini iki şekilde kazanabilir: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Sorgu ücretleri: Subgraph veri sorguları için geliştiriciler veya kullanıcılar tarafından ödenen GRT'dir. Sorgu ücretleri, üstel indirim fonksiyonuna göre doğrudan İndeksleyicilere dağıtılır ([buradan](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) GIP'e ulaşın). -2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. +2. İndeksleme ödülleri: %3'lük yıllık ihraç, indeksleyicilere indeksledikleri subgraph sayısına göre dağıtılır. Bu ödüller, indeksleyicileri, verileri doğru bir şekilde dizine eklediklerini doğrulayan İndeksleme Kanıtlarını (POI'ler) tahakkuk ettirmek ve sunmak için ara sıra sorgu ücretleri başlamadan önce subgraphleri indekslemeye teşvik eder. -Each subgraph is allotted a portion of the total network token issuance, based on the amount of the subgraph’s curation signal. That amount is then rewarded to Indexers based on their allocated stake on the subgraph. +Her bir subgraph'e, subgraph'in kürasyon sinyalinin miktarına bağlı olarak toplam ağ belirteci düzenlemesinin bir kısmı tahsis edilir. Bu miktar daha sonra subgraph'te tahsis edilen paylarına göre indeksleyicilere ödül olarak gönderilir. -In order to run an indexing node, Indexers must stake 100,000 GRT or more with the network. Indexers are incentivized to stake GRT in proportion to the amount of queries they serve. +Bir indeksleme node'u çalıştırmak için, indeksleyicilerin ağ üzerinde 100.000 GRT veya daha fazlasını stake etmesi gerekir. İndeksleyiciler, hizmet ettikleri sorgu miktarıyla orantılı olarak GRT'yi paylaştırmaya teşvik edilir. -Indexers can increase their GRT allocations on subgraphs by accepting GRT delegation from Delegators, and they can accept up to 16 times their initial stake. If an Indexer becomes "over-delegated" (i.e., more than 16 times their initial stake), they will not be able to use the additional GRT from Delegators until they increase their stake in the network. +İndeksleyiciler, delegatörlerden GRT delegasyonunu kabul ederek subgraph'lerdeki GRT tahsislerini artırabilir ve ilk paylarının 16 katına kadar kabul edebilirler. Bir indeksleyici "aşırı yetkilendirilmiş" hale gelirse (yani, ilk paylarının 16 katından fazla), ağdaki paylarını artırana kadar delegatörlerden gelen ek GRT'yi kullanamazlar. -The amount of rewards an Indexer receives can vary based on the initial stake, accepted delegation, quality of service, and many more factors. The following chart is publicly available data from an active Indexer on The Graph's decentralized network. +Bir indeksleyicinin aldığı ödüllerin miktarı, ilk stake'e, kabul edilen delegasyona, hizmet kalitesine ve daha birçok faktöre göre değişebilir. Aşağıdaki tablo, Graph'in merkeziyetsiz ağındaki etkin bir indeksleyiciden alınan, halka açık verilerdir. -### The Indexer stake & reward of allnodes-com.eth +### allnodes-com.eth'nin İndeksleyici stake ve ödülü -![Indexing stake and rewards](/img/indexing-stake-and-income.png) +![İndeksleme stake ve ödülleri](/img/indexing-stake-and-income.png) -This data is from February 2021 to September 2022. +Bu veriler Şubat 2021'den Eylül 2022'ye kadardır. -> Please note, this will improve when the [Arbitrum migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) is complete, making gas costs a significantly lower burden for participating on the network. +> Lütfen unutmayın, [Arbitrum migration](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551) tamamlandığında bu durum düzelecek ve gaz maliyetlerini ağa katılım için önemli ölçüde daha düşük bir yük haline getirecektir. -## Token Supply: Burning & Issuance +## Token Arzı: Yakma & İhraç -The initial token supply is 10 billion GRT, with a target of 3% new issuance annually to reward Indexers for allocating stake on subgraphs. This means that the total supply of GRT tokens will increase by 3% each year as new tokens are issued to Indexers for their contribution to the network. +İlk token arzı 10 milyar GRT'dir ve indeksleyiciler subgraph'lere pay ayırdıkları için ödüllendirmek adına yıllık %3 yeni ihraç hedefi vardır. Bu, ağa katkılarından dolayı indeksleyicilere yeni tokenlar verildikçe, GRT tokenlarının toplam arzının her yıl %3 artacağı anlamına gelir. -The Graph is designed with multiple burning mechanisms to offset new token issuance. Approximately 1% of the GRT supply is burned annually through various activities on the network, and this number has been increasing as network activity continues to grow. These burning activities include a 0.5% delegation tax whenever a Delegator delegates GRT to an Indexer, a 1% curation tax when Curators signal on a subgraph, and a 1% of query fees for blockchain data. +Graph, yeni token ihracını dengelemek için çoklu yazma mekanizmalarıyla tasarlanmıştır. GRT arzının yaklaşık %1'i her yıl ağdaki çeşitli faaliyetler yoluyla yakılmaktadır ve bu sayı, ağ etkinliği artmaya devam ettikçe artmaktadır. Bu yakma faaliyetleri, bir delegatör GRT'yi bir indeksleyiciye stake ettiğinde %0,5'lik bir delegasyon vergisi, küratörler bir subgraph'te sinyal verdiğinde %1'lik bir kürasyon vergisi ve blok zinciri verileri için %1'lik bir sorgulama ücreti içerir. -![Total burned GRT](/img/total-burned-grt.jpeg) +![Toplam Yakılmış GRT](/img/total-burned-grt.jpeg) -In addition to these regularly occurring burning activities, the GRT token also has a slashing mechanism in place to penalize malicious or irresponsible behavior by Indexers. If an Indexer is slashed, 50% of their indexing rewards for the epoch are burned (while the other half goes to the fisherman), and their self-stake is slashed by 2.5%, with half of this amount being burned. This helps to ensure that Indexers have a strong incentive to act in the best interests of the network and to contribute to its security and stability. +Düzenli olarak meydana gelen bu yakma faaliyetlerine ek olarak, GRT tokenı ayrıca indeksleyicilerin kötü niyetli veya sorumsuz davranışlarını cezalandırmak için yürürlükte olan bir kesme mekanizmasına sahiptir. Bir indeksleyici kesilirse, dönem için indeksleme ödüllerinin %50'si yakılır (diğer yarısı fisherman'e gider) ve öz payı %2,5 oranında azaltılır ve bu miktarın da yarısı yakılır. Bu, indeksleyicilerin ağın çıkarları doğrultusunda hareket etme ve güvenlik ve istikrarına katkıda bulunma yönünde güçlü bir teşvike sahip olmasını sağlamaya yardımcı olur. -## Improving the Protocol +## Protokolün Geliştirilmesi -The Graph Network is ever-evolving and improvements to the economic design of the protocol are constantly being made to provide the best experience for all network participants. The Graph Council oversees protocol changes and community members are encouraged to participate. Get involved with protocol improvements in [The Graph Forum](https://forum.thegraph.com/). +Graph ağı sürekli gelişmektedir ve tüm ağ katılımcılarına en iyi deneyimi sağlamak için protokolün ekonomik tasarımında sürekli iyileştirmeler yapılmaktadır. Graph Konseyi, protokol değişikliklerini denetler ve topluluk üyelerinin katılmaya teşvik edilir. [Graph Forum](https://forum.thegraph.com/)'daki protokol iyileştirmelerine dahil olmayı unutmayın. diff --git a/website/pages/uk/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/uk/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..43f96152931a 100644 --- a/website/pages/uk/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/uk/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## Subgraph Transfer -## How do I transfer my subgraph? +### How do I transfer my subgraph? + + To transfer your subgraph, you will need to complete the following steps: @@ -30,55 +54,147 @@ To transfer your subgraph, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Where should I initiate my transfer from? You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. -## How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -## Will my subgraph still be discoverable after I transfer it to L2? +### Will my subgraph still be discoverable after I transfer it to L2? Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. -## Does my subgraph need to be published to transfer it? +### Does my subgraph need to be published to transfer it? To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. -## After I transfer, do I also need to re-publish on Arbitrum? +### After I transfer, do I also need to re-publish on Arbitrum? After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Will my subgraph's curation move with my subgraph? If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my subgraph back to Ethereum mainnet after I transfer? Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Why do I need bridged ETH to complete my transfer? Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curation Signal -## How do I transfer my curation? +### How do I transfer my curation? To transfer your curation, you will need to complete the following steps: @@ -90,25 +206,29 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -## How will I know if the subgraph I curated has moved to L2? +### How will I know if the subgraph I curated has moved to L2? When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. -## What if I do not wish to move my curation to L2? +### What if I do not wish to move my curation to L2? When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. -## How do I know my curation successfully transferred? +### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -## Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one subgraph at a time? There is no bulk transfer option at this time. ## Indexer Stake -## How do I transfer my stake to Arbitrum? +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + To transfer your stake, you will need to complete the following steps: @@ -120,7 +240,7 @@ To transfer your stake, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Will all of my stake transfer? You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. @@ -128,91 +248,45 @@ If you plan on transferring parts of your stake over multiple transactions, you Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. -## How much time do I have to confirm my stake transfer to Arbitrum? +### How much time do I have to confirm my stake transfer to Arbitrum? \*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. -## What if I have open allocations? +### What if I have open allocations? If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. -## How long will it take to transfer my stake? +### How long will it take to transfer my stake? It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. -## Do I have to index on Arbitrum before I transfer my stake? +### Do I have to index on Arbitrum before I transfer my stake? You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. -## Can Delegators move their delegation before I move my indexing stake? +### Can Delegators move their delegation before I move my indexing stake? No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -## Delegation - -## How do I transfer my delegation? - -To transfer your delegation, you will need to complete the following steps: - -1. Initiate delegation transfer on Ethereum mainnet - -2. Wait 20 minutes for confirmation - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? - -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. - -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -## What if I can't find the Indexer I'm delegating to on L2? +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -The L2 transfer tool will automatically detect the Indexer you previously delegated to. - -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? - -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. - -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? - -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? - -It is anticipated that all network participation will move to Arbitrum One in the future. - -## How long does it take to complete the transfer of my delegation to L2? - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -## Is there any delegation tax? - -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## Vesting Contract Transfer -## How do I transfer my vesting contract? +### How do I transfer my vesting contract? To transfer your vesting, you will need to complete the following steps: @@ -222,7 +296,9 @@ To transfer your vesting, you will need to complete the following steps: 3. Confirm vesting transfer on Arbitrum -## How do I transfer my vesting contract if I am only partially vested? +### How do I transfer my vesting contract if I am only partially vested? + + 1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) @@ -232,7 +308,9 @@ To transfer your vesting, you will need to complete the following steps: 4. Withdraw any remaining ETH from the transfer tool contract -## How do I transfer my vesting contract if I am fully vested? +### How do I transfer my vesting contract if I am fully vested? + + For those that are fully vested, the process is similar: @@ -244,7 +322,7 @@ For those that are fully vested, the process is similar: 4. Withdraw any remaining ETH from the transfer tool contract -## Can I transfer my vesting contract to Arbitrum? +### Can I transfer my vesting contract to Arbitrum? You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). @@ -256,27 +334,27 @@ Please note that you will not be able to release/withdraw GRT from the L2 vestin If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -298,18 +376,36 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/uk/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/uk/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..11b9ba5a10ef 100644 --- a/website/pages/uk/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/uk/arbitrum/l2-transfer-tools-guide.mdx @@ -2,14 +2,14 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. diff --git a/website/pages/uk/billing.mdx b/website/pages/uk/billing.mdx index e7d70af8b427..5e8797b90d55 100644 --- a/website/pages/uk/billing.mdx +++ b/website/pages/uk/billing.mdx @@ -37,8 +37,12 @@ Banxa дозволяє вам обійти необхідність обміну ### Поповнення GRT за допомогою криптогаманця + + > Цей розділ написаний для тих, хто вже має GRT на балансі криптогаманця в основній мережі Ethereum. Якщо у вас немає GRT, ви можете дізнатися як поповнити гаманець [тут](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Перейдіть на [ білінгову сторінку Subgraph Studio](https://thegraph.com/studio/billing/). 2. Натисніть на кнопку "Connect Wallet" у правому верхньому куті сторінки. Ви будете перенаправлені на сторінку вибору гаманця. Виберіть той, який вам підходить, і натисніть кнопку "Connect". @@ -71,6 +75,8 @@ Banxa дозволяє вам обійти необхідність обміну ### Поповнення GRT за допомогою гаманця з мультипідписами + + 1. Перейдіть на [ білінгову сторінку Subgraph Studio](https://thegraph.com/studio/billing/). 2. Натисніть на кнопку "Connect Wallet" у верхньому правому куті сторінки. Оберіть свій гаманець і натисніть на "Connect". Якщо ви використовуєте [Gnosis-Safe](https://gnosis-safe.io/), у вас буде змога під'єднати ваш гаманець з мультипідписами та гаманець для підпису. Після цього підпишіть відповідну транзакцію. Це не потребуватиме оплати за газ. @@ -97,11 +103,11 @@ Banxa дозволяє вам обійти необхідність обміну ## Купівля GRT -У цьому розділі ви дізнаєтеся як купити GRT для того, щоб сплачувати комісію за запити. +This section will show you how to get GRT to pay for query fees. ### Coinbase -Це покрокова інструкція, яка допоможе вам придбати GRT на біржі Coinbase. +This will be a step by step guide for purchasing GRT on Coinbase. 1. Перейдіть на [Coinbase](https://www.coinbase.com/) та створіть обліковий запис. 2. Як тільки ви створите обліковий запис, вам потрібно буде верифікувати свою особу за допомогою процесу, відомого як KYC (Знай свого клієнта). Це стандартна процедура для всіх централізованих або кастодіальних криптобірж. @@ -117,11 +123,11 @@ Banxa дозволяє вам обійти необхідність обміну - Введіть кількість токенів GRT, яку ви хочете відправити та адресу гаманця, на який відправляєте токени. - Натисніть "Continue" та підтвердіть вашу транзакцію. Зверніть увагу на те, що коли ви виводите велику суму, Coinbase може затримати цей переказ на 7-10 днів, перш ніж перевести всю суму на ваш криптогаманець. -За [цим](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) посиланням ви можете дізнатися більше про покупку GRT на біржі Coinbase. +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -Це покрокова інструкція, яка допоможе вам придбати GRT на Binance. +This will be a step by step guide for purchasing GRT on Binance. 1. Перейдіть на [Binance](https://www.binance.com/en) та створіть обліковий запис. 2. Як тільки ви створите обліковий запис, вам потрібно буде верифікувати свою особу за допомогою процесу, відомого як KYC (Знай свого клієнта). Це стандартна процедура для всіх централізованих або кастодіальних криптобірж. @@ -137,11 +143,11 @@ Banxa дозволяє вам обійти необхідність обміну - Введіть кількість токенів GRT, яку ви хочете надіслати, та адресу гаманця, на яку ви хочете їх надіслати. - Натисніть "Continue" та підтвердіть транзакцію. -За [цим](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582) посиланням ви можете дізнатися більше про покупку GRT на біржі Binance. +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -Це інструкція, за допомогою якої ви можете придбати GRT на Uniswap. +This is how you can purchase GRT on Uniswap. 1. Перейдіть на [Uniswap](https://app.uniswap.org/#/swap) та приєднайте ваш криптогаманець. 2. Виберіть монету, яку ви хочете обміняти. Наприклад оберіть ETH. @@ -151,8 +157,52 @@ Banxa дозволяє вам обійти необхідність обміну 5. Натисніть кнопку "Swap". 6. Підтвердіть транзакцію у своєму гаманці та очікуйте, поки транзакція буде оброблена. -За [цим](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) посиланням ви можете дізнатися більше про покупку GRT на Uniswap. +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Перейдіть на [Coinbase](https://www.coinbase.com/) та створіть обліковий запис. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Натисніть "Continue" та підтвердіть транзакцію. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Перейдіть на [Binance](https://www.binance.com/en) та створіть обліковий запис. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Натисніть "Continue" та підтвердіть транзакцію. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Міст Arbitrum -Платіжний контракт призначений лише для того, щоб переводити токени GRT з основної мережі Ethereum в мережу Arbitrum. Якщо вам потрібно перевести GRT з мережі Arbitrum назад в мережу Ethereum, тоді вам потрібно використовувати [міст Arbitrum ](https://bridge.arbitrum.io/?l2ChainId=42161). +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/uk/chain-integration-overview.mdx b/website/pages/uk/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/uk/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/uk/cookbook/arweave.mdx b/website/pages/uk/cookbook/arweave.mdx index 15aaf1a38831..332c1f61edeb 100644 --- a/website/pages/uk/cookbook/arweave.mdx +++ b/website/pages/uk/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -53,7 +53,7 @@ $ graph codegen # generates types from the schema file identified in the manifes $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -## Subgraph Manifest Definition +## Визначення маніфесту підграфів The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for an Arweave subgraph: @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -97,13 +97,13 @@ Arweave data sources support two types of handlers: > Note: [Bundlr](https://bundlr.network/) transactions are not supported yet. -## Schema Definition +## Визначення схеми Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on the subgraph schema definition [here](/developing/creating-a-subgraph/#the-graphql-schema). ## AssemblyScript Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). Arweave indexing introduces Arweave-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -162,7 +162,7 @@ graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph The GraphQL endpoint for Arweave subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. -## Example Subgraphs +## Приклади підграфів Here is an example subgraph for reference: diff --git a/website/pages/uk/cookbook/base-testnet.mdx b/website/pages/uk/cookbook/base-testnet.mdx index 50ddaf0c0b7a..b97efa5a9076 100644 --- a/website/pages/uk/cookbook/base-testnet.mdx +++ b/website/pages/uk/cookbook/base-testnet.mdx @@ -65,7 +65,7 @@ The previous command creates a scaffold subgraph that you can use as a starting - Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. - Schema (schema.graphql) - The GraphQL schema defines what data you wish to retreive from the subgraph. -- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. +- AssemblyScript Mappings (mapping.ts) - Це код, який транслює дані з ваших джерел даних до елементів, визначених у схемі. If you want to index additional data, you will need extend the manifest, schema and mappings. diff --git a/website/pages/uk/cookbook/cosmos.mdx b/website/pages/uk/cookbook/cosmos.mdx index d1c171814941..04a60bc959db 100644 --- a/website/pages/uk/cookbook/cosmos.mdx +++ b/website/pages/uk/cookbook/cosmos.mdx @@ -1,51 +1,51 @@ --- -title: Building Subgraphs on Cosmos +title: Розробка підграфів на Cosmos --- -This guide is an introduction on building subgraphs indexing [Cosmos](https://docs.cosmos.network/) based blockchains. +Цей гайд є вступом до побудови підграфів, що індексують блокчейни на основі [Cosmos](https://docs.cosmos.network/). -## What are Cosmos subgraphs? +## Що таке Cosmos підграфи? -The Graph allows developers to process blockchain events and make the resulting data easily available via an open GraphQL API, known as a subgraph. [Graph Node](https://github.com/graphprotocol/graph-node) is now able to process Cosmos events, which means Cosmos developers can now build subgraphs to easily index on-chain events. +The Graph дозволяє розробникам проводити обробку подій блокчейну і робити отримані дані легко доступними за допомогою відкритого API GraphQL, відомого як підграф. [Graph Node](https://github.com/graphprotocol/graph-node) тепер може обробляти події на Cosmos, що означає, що розробники Cosmos тепер можуть створювати підграфи для легкого індексування подій у блокчейні. -There are four types of handlers supported in Cosmos subgraphs: +У підграфах на Cosmos підтримується чотири типи обробників: -- **Block handlers** run whenever a new block is appended to the chain. -- **Event handlers** run when a specific event is emitted. -- **Transaction handlers** run when a transaction occurs. -- **Message handlers** run when a specific message occurs. +- **Обробники блоків** запускаються щоразу, коли до мережі додається новий блок. +- **Обробники подій** запускаються, коли відбувається певна подія. +- **Обробники транзакцій** запускаються, коли виконується транзакція. +- **Обробники повідомлень** запускаються, коли надходить конкретне повідомлення. -Based on the [official Cosmos documentation](https://docs.cosmos.network/): +Згідно з [офіційною документацією Cosmos](https://docs.cosmos.network/): -> [Events](https://docs.cosmos.network/main/core/events) are objects that contain information about the execution of the application. They are mainly used by service providers like block explorers and wallets to track the execution of various messages and index transactions. +> [Події (events)](https://docs.cosmos.network/main/core/events) - це об'єкти, які містять інформацію про роботу програми. Вони в основному використовуються провайдерами послуг, такими як block explorers та гаманці, для відстеження ходу виконання різних повідомлень та індексації транзакцій. -> [Transactions](https://docs.cosmos.network/main/core/transactions) are objects created by end-users to trigger state changes in the application. +> [Транзакції](https://docs.cosmos.network/main/core/transactions) - це об'єкти, створені кінцевими користувачами для ініціювання змін стану програми. -> [Messages](https://docs.cosmos.network/main/core/transactions#messages) are module-specific objects that trigger state transitions within the scope of the module they belong to. +> [Повідомлення](https://docs.cosmos.network/main/core/transactions#messages) - це специфічні для модуля об'єкти, які викликають переходи стану в межах області видимості модуля, до якого вони належать. -Even though all data can be accessed with a block handler, other handlers enable subgraph developers to process data in a much more granular way. +Хоча до всіх даних можна отримати доступ за допомогою блок-обробника, інші обробники дозволяють розробникам підграфів обробляти дані у значно детальніший спосіб. -## Building a Cosmos subgraph +## Розробка підграфів на Cosmos ### Subgraph Dependencies -[graph-cli](https://github.com/graphprotocol/graph-cli) is a CLI tool to build and deploy subgraphs, version `>=0.30.0` is required in order to work with Cosmos subgraphs. +[graph-cli](https://github.com/graphprotocol/graph-cli) - інструмент CLI для побудови та розгортання підграфів, для роботи з підграфами на Cosmos потрібна версія `>=0.30.0`. -[graph-ts](https://github.com/graphprotocol/graph-ts) is a library of subgraph-specific types, version `>=0.27.0` is required in order to work with Cosmos subgraphs. +[graph-ts](https://github.com/graphprotocol/graph-ts) - бібліотека специфічних для підграфів типів, для роботи з підграфами на Cosmos потрібна версія `>=0.27.0`. -### Subgraph Main Components +### Основні компоненти підграфа -There are three key parts when it comes to defining a subgraph: +Визначення підграфа складається з трьох ключових компонентів: -**subgraph.yaml**: a YAML file containing the subgraph manifest, which identifies which events to track and how to process them. +**subgraph.yaml**: YAML-файл, що містить маніфест підграфів, який визначає, які події відстежувати і яким чином їх обробляти. -**schema.graphql**: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL. +**schema.graphql**: схема GraphQL, яка визначає, які дані зберігаються для вашого підграфа і як їх запитувати через GraphQL. -**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from blockchain data to the entities defined in your schema. +**AssemblyScript Mappings**: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) переводить дані блокчейну в елементи, визначені у вашій схемі. -### Subgraph Manifest Definition +### Визначення маніфесту підграфів -The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions (`handlers`) that should be run in response to those triggers. See below for an example subgraph manifest for a Cosmos subgraph: +Маніфест підграфа (`subgraph.yaml`) визначає джерела даних для підграфа, тригери, що нас цікавлять, та функції (`handlers`), які слід запускати у відповідь на ці тригери. Нижче наведено приклад маніфесту підграфів для підграфа на Cosmos: ```yaml specVersion: 0.0.5 @@ -74,18 +74,18 @@ dataSources: file: ./src/mapping.ts # link to the file with the Assemblyscript mappings ``` -- Cosmos subgraphs introduce a new `kind` of data source (`cosmos`). -- The `network` should correspond to a chain in the Cosmos ecosystem. In the example, the Cosmos Hub mainnet is used. +- Підграфи на Cosmos вводять новий `kind` джерела даних (`cosmos`). +- `Мережа блокчейну` повинна відповідати мережі в екосистемі Cosmos. У прикладі використовується основна мережа Cosmos Hub. -### Schema Definition +### Визначення схеми -Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph/#the-graph-ql-schema). +Визначення схеми описує структуру отриманої бази даних підграфів та зв'язки між елементами. Це не залежить від вихідного джерела даних. Детальніше з визначенням схеми підграфів ви можете ознайомитись [тут](/developing/creating-a-subgraph/#the-graph-ql-schema). ### AssemblyScript Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). -Cosmos indexing introduces Cosmos-specific data types to the [AssemblyScript API](/developing/assemblyscript-api/). +Індексація Cosmos вводить специфічні для Cosmos типи даних до [AssemblyScript API](/developing/assemblyscript-api/). ```tsx class Block { @@ -163,40 +163,40 @@ class Any { } ``` -Each handler type comes with its own data structure that is passed as an argument to a mapping function. +Кожен тип обробника має власну структуру даних, яка передається як аргумент функції маппінгу. -- Block handlers receive the `Block` type. -- Event handlers receive the `EventData` type. -- Transaction handlers receive the `TransactionData` type. -- Message handlers receive the `MessageData` type. +- Обробники блоків отримують код типу `Block`. +- Обробники подій отримують код типу `EventData`. +- Обробники транзакцій отримують код типу `TransactionData`. +- Обробники повідомлень отримують код типу `MessageData`. -As a part of `MessageData` the message handler receives a transaction context, which contains the most important information about a transaction that encompasses a message. The transaction context is also available in the `EventData` type, but only when the corresponding event is associated with a transaction. Additionally, all handlers receive a reference to a block (`HeaderOnlyBlock`). +Як частина `MessageData` обробник повідомлення отримує контекст транзакції, який містить найважливішу інформацію про транзакцію, що охоплює повідомлення. Контекст транзакції також доступний у коді типу `EventData`, але лише тоді, коли відповідна подія пов'язана з транзакцією. Додатково всі обробники отримують посилання на блок (`HeaderOnlyBlock`). -You can find the full list of types for the Cosmos integration [here](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). +Ви можете знайти повний список типів коду для інтеграції в Cosmos [тут](https://github.com/graphprotocol/graph-ts/blob/4c064a8118dff43b110de22c7756e5d47fcbc8df/chain/cosmos.ts). -### Message decoding +### Розшифровка повідомлень -It's important to note that Cosmos messages are chain-specific and they are passed to a subgraph in the form of a serialized [Protocol Buffers](https://developers.google.com/protocol-buffers/) payload. As a result, the message data needs to be decoded in a mapping function before it can be processed. +Важливо зазначити, що повідомлення Cosmos є специфічними та передаються до підграфа у вигляді послідовного [Protocol Buffers](https://developers.google.com/protocol-buffers/) навантаження. Як наслідок, дані повідомлення повинні бути розшифровані у функції схеми, перш ніж їх можна буде обробити. -An example of how to decode message data in a subgraph can be found [here](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). +Приклад розшифровки даних повідомлення в підграфі можна знайти [тут](https://github.com/graphprotocol/graph-tooling/blob/main/examples/cosmos-validator-delegations/src/decoding.ts). -## Creating and building a Cosmos subgraph +## Створення та побудова підграфа на Cosmos -The first step before starting to write the subgraph mappings is to generate the type bindings based on the entities that have been defined in the subgraph schema file (`schema.graphql`). This will allow the mapping functions to create new objects of those types and save them to the store. This is done by using the `codegen` CLI command: +Першим кроком перед початком написання схем підграфів є генерація приналежності типів на основі елементів, які були визначені у файлі схеми підграфів (`schema.graphql`). Це дозволить функціям схем створювати нові об'єкти цих типів і зберігати їх у сховищі. Це робиться за допомогою використання CLI команди `codegen`: ```bash $ graph codegen ``` -Once the mappings are ready, the subgraph needs to be built. This step will highlight any errors the manifest or the mappings might have. A subgraph needs to build successfully in order to be deployed to the Graph Node. It can be done using the `build` CLI command: +Після того, як схеми готові, потрібно побудувати підграф. На цьому кроці буде показано всі помилки, які можуть бути у маніфесті або схемах. Підграф має бути успішно побудований для того, щоб його можна було розгорнути у Graph Node. Це можна зробити, використовуючи CLI команду `build`: ```bash $ graph build ``` -## Deploying a Cosmos subgraph +## Розгортання підграфа на Cosmos -Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: +Після створення підграфа ви можете розгорнути його за допомогою CLI команди `graph deploy` після запуску команди CLI `graph create`: **Хостинговий сервіс** @@ -208,7 +208,7 @@ graph create account/subgraph-name --product hosted-service graph deploy account/subgraph-name --product hosted-service ``` -**Local Graph Node (based on default configuration):** +**Локальна Graph Node (на основі конфігурації за замовчуванням):** ```bash graph create subgraph-name --node http://localhost:8020 @@ -218,42 +218,42 @@ graph create subgraph-name --node http://localhost:8020 graph deploy subgraph-name --node http://localhost:8020/ --ipfs http://localhost:5001 ``` -## Querying a Cosmos subgraph +## Запит до Cosmos підграфа -The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api/) for more information. +Кінцева точка GraphQL для підграфів Cosmos визначається визначенням схеми за допомогою наявного інтерфейсу API. Відвідайте [GraphQL API документацію](/querying/graphql-api/), щоб дізнатись більше. -## Supported Cosmos Blockchains +## Блокчейни, що підтримуються Cosmos ### Cosmos Hub -#### What is Cosmos Hub? +#### Що таке Cosmos Hub? -The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. +[Cosmos Hub блокчейн](https://hub.cosmos.network/) це перша мережа в екосистемі [Cosmos](https://cosmos.network/). Ви можете відвідати [офіційну документацію](https://docs.cosmos.network/), щоб дізнатись більше. -#### Networks +#### Мережі -Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. +Основна мережа Cosmos Hub - `cosmoshub-4`. Актуальна тестова мережа Cosmos Hub - `theta-testnet-001`.
    Інші Cosmos Hub мережі, зокрема `cosmoshub-3`, зупинені, тому дані для них не надаються. ### Osmosis -> Osmosis support in Graph Node and on the Hosted Service is in beta: please contact the graph team with any questions about building Osmosis subgraphs! +> Підтримка Osmosis у Graph Node та в хостинговому сервісі знаходиться на стадії бета-версії: будь ласка, звертайтеся до команди розробників The Graph з будь-якими питаннями щодо побудови підграфів на Osmosis! -#### What is Osmosis? +#### Що таке Osmosis? -[Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. +[Osmosis](https://osmosis.zone/) - це децентралізований, крос-чейн протокол автоматизованого маркет-мейкера (AMM), побудований на основі Cosmos SDK. Він дозволяє користувачам створювати власні пули ліквідності та торгувати токенами з підтримкою IBC. Ви можете відвідати [офіційну документацію](https://docs.osmosis.zone/), щоб дізнатися більше. -#### Networks +#### Мережі -Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. +Основна мережа Osmosis - `osmosis-1`. Актуальна тестова мережа Osmosis - `osmo-test-4`. -## Example Subgraphs +## Приклади підграфів -Here are some example subgraphs for reference: +Ось кілька прикладів підграфів для ознайомлення: -[Block Filtering Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) +[Приклад фільтрації блоків](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-block-filtering) -[Validator Rewards Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) +[Приклад винагороди для валідатора](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-rewards) -[Validator Delegations Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) +[Приклад делегування для валідаторів](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-validator-delegations) -[Osmosis Token Swaps Example](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) +[Приклад обміну токенів Osmosis](https://github.com/graphprotocol/graph-tooling/tree/main/examples/cosmos-osmosis-token-swaps) diff --git a/website/pages/uk/cookbook/grafting.mdx b/website/pages/uk/cookbook/grafting.mdx index 54ad7a0eaff8..a61eabe8cb28 100644 --- a/website/pages/uk/cookbook/grafting.mdx +++ b/website/pages/uk/cookbook/grafting.mdx @@ -1,40 +1,56 @@ --- -title: Replace a Contract and Keep its History With Grafting +title: Замініть контракт та збережіть його історію за допомогою графтингу --- -In this guide, you will learn how to build and deploy new subgraphs by grafting existing subgraphs. +У цьому гайді ви дізнаєтеся, як створювати та розгортати нові підграфи шляхом поєднання наявних підграфів. -## What is Grafting? +## Що таке Grafting? -Grafting reuses the data from an existing subgraph and starts indexing it at a later block. This is useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. Also, it can be used when adding a feature to a subgraph that takes long to index from scratch. +При цьому процесі повторно використовуються дані з наявного підграфа і починається їх індексування з наступного блоку. Це корисно під час розробки для швидкого усунення простих помилок у схемах або для тимчасового відновлення працездатності наявного підграфа після його збою. Також його можна використовувати при додаванні об'єкта до підграфа, індексація якого з нуля займає багато часу. -The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: +Підграф, утворений в результаті може використовувати схему GraphQL, яка не є ідентичною схемі базового підграфа, а лише сумісною з нею. Вона повинна бути валідною схемою підграфа сама по собі, але може відхилятися від схеми базового підграфа у такому випадку: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Додає або видаляє типи елементів +- Видаляє атрибути з типів елементів +- Додає до типів об'єктів атрибути, які можна скасувати +- Перетворює атрибути, які не можна скасувати, на атрибути, які можна скасувати +- Додає значення до переліків +- Додає або видаляє інтерфейси +- Визначає, для яких типів елементів реалізовано інтерфейс -For more information, you can check: +Для отримання додаткової інформації ви можете ознайомитися: - [Grafting](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) -In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +У цьому матеріалі ми розглянемо базовий випадок використання. Ми замінимо наявний контракт на ідентичний (з новою адресою, але тим самим кодом). -## Building an Existing Subgraph +## Важливе зауваження щодо графтингу при оновленні в мережі -Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: +> **Увага**: якщо ви оновлюєте свій підграф за допомогою Subgraph Studio або хостингового сервісу для децентралізованої мережі, настійно рекомендується уникати використання графтингу під час процесу оновлення. -- [Subgraph example repo](https://github.com/t-proctor/grafting-tutorial) +### Чому це так важливо? -> Note: The contract used in the subgraph was taken from the following [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). +Grafting - це потужна функція, яка дозволяє " накладати" один підграф на інший, ефективно переносячи історичні дані з наявного підграфа в нову версію. Хоча це ефективний спосіб зберегти дані та заощадити час на індексацію, але при перенесенні з хостингу в децентралізовану мережу можуть виникнути складнощі та потенційні проблеми. Неможливо трансплантувати підграф з The Graph Network назад до хостингового сервісу або Subgraph Studio. -## Subgraph Manifest Definition +### Найкращі практики -The subgraph manifest `subgraph.yaml` identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest that you will use: +**Початкова міграція**: коли ви вперше розгортаєте підграф у децентралізованій мережі, робіть це без графтингу. Переконайтеся, що підграф стабільний і функціонує належним чином. + +**Подальші оновлення**: після того, як ваш підграф буде запущено і стабільно працюватиме у децентралізованій мережі, ви можете використовувати графтинг для майбутніх версій, щоб зробити перехід більш плавним і зберегти історичні дані. + +Дотримуючись цих рекомендацій, ви мінімізуєте ризики та забезпечите безперешкодний процес міграції. + +## Побудова наявного підграфа + +Побудова підграфів є важливою частиною The Graph, про яку ми розповімо докладніше [тут](http://localhost:3000/en/cookbook/quick-start/). Для того, щоб мати змогу побудувати та розгорнути наявний підграф, який використовується у цьому матеріалі, надається наступний репозиторій: + +- [Репозиторій з прикладом та відповідним підграфом](https://github.com/t-proctor/grafting-tutorial) + +> Примітка: Контракт, що використаний у підграфі, був взятий з [Hackathon Starterkit](https://github.com/schmidsi/hackathon-starterkit). + +## Визначення маніфесту підграфів + +Маніфест підграфів `subgraph.yaml` визначає джерела даних для підграфа, тригери, що нас цікавлять, та функції, які слід запускати у відповідь на ці тригери. Нижче наведено приклад маніфесту підграфів, який ви будете використовувати: ```yaml specVersion: 0.0.4 @@ -63,13 +79,13 @@ dataSources: file: ./src/lock.ts ``` -- The `Lock` data source is the abi and contract address we will get when we compile and deploy the contract -- The network should correspond to a indexed network being queried. Since we're running on Goerli testnet, the network is `goerli` -- The `mapping` section defines the triggers of interest and the functions that should be run in response to those triggers. In this case, we are listening for the `Withdrawal` event and calling the `handleWithdrawal` function when it is emitted. +- Джерелом даних `Lock` є адреса abi та адреса контракту, яку ми отримаємо під час компіляції та розгортання контракту +- Дана мережа повинна відповідати індексованій мережі, яку ми запитуємо. Оскільки ми працюємо в тестовій мережі Goerli, то ми використовуємо `goerli` +- Розділ `mapping` визначає тригери, що нас цікавлять, і функції, які мають бути запущені у відповідь на ці тригери. У цьому випадку ми очікуємо на `Withdrawal` і після цього викликаємо функцію `handleWithdrawal` коли вона з'являється. -## Grafting Manifest Definition +## Визначення Grafting Manifest -Grafting requires adding two new items to the original subgraph manifest: +Графтинг вимагає додавання двох нових елементів до оригінального маніфесту підграфів: ```yaml --- @@ -80,16 +96,16 @@ graft: block: 1502122 # block number ``` -- `features:` is a list of all used [feature names](developing/creating-a-subgraph/#experimental-features). -- `graft:` is a map of the `base` subgraph and the block to graft on to. The `block` is the block number to start indexing from. The Graph will copy the data of the base subgraph up to and including the given block and then continue indexing the new subgraph from that block on. +- `features:` це список всіх використаних [feature names](developing/creating-a-subgraph/#experimental-features). +- `graft:` це схема підграфа `base` і блоку, з яким відбудеться графтинг. `block` - це номер блоку, з якого починається індексування. The Graph скопіює дані базового підграфа до заданого блоку включно, а потім продовжить індексування нового підграфа, починаючи з цього блоку. -The `base` and `block` values can be found by deploying two subgraphs: one for the base indexing and one with grafting +`base` і `block` можна знайти, розгорнувши два підграфа: один для базової індексації та один для графтингу -## Deploying the Base Subgraph +## Розгортання базового підграфа -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-example` -2. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-example` folder from the repo -3. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Перейдіть на [The Graph Studio UI](https://thegraph.com/studio/) і створіть підграф в тестовій мережі Goerli під назвою `graft-example` +2. Дотримуйтесь інструкцій у розділах `AUTH & DEPLOY` на сторінці вашого підграфа в папці `graft-example` з репозиторію +3. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground ```graphql { @@ -101,7 +117,7 @@ The `base` and `block` values can be found by deploying two subgraphs: one for t } ``` -It returns something like this: +Це повертає щось на зразок цього: ``` { @@ -122,16 +138,16 @@ It returns something like this: } ``` -Once you have verified the subgraph is indexing properly, you can quickly update the subgraph with grafting. +Після того, як ви переконалися, що підграф індексується належним чином, ви можете швидко оновити його за допомогою графтингу. -## Deploying the Grafting Subgraph +## Розгортання підграфів для графтингу -The graft replacement subgraph.yaml will have a new contract address. This could happen when you update your dapp, redeploy a contract, etc. +При цьому процесі підрозділ subgraph.yaml матиме нову адресу контракту. Це може статися, коли ви оновлюєте децентралізований додаток, перерозподіляєте контракт тощо. -1. Go to [The Graph Studio UI](https://thegraph.com/studio/) and create a subgraph on Goerli testnet called `graft-replacement` -2. Create a new manifest. The `subgraph.yaml` for `graph-replacement` contains a different contract address and new information about how it should graft. These are the `block` of the [last event emitted](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) you care about by the old contract and the `base` of the old subgraph. The `base` subgraph ID is the `Deployment ID` of your original `graph-example` subgraph. You can find this in The Graph Studio UI. -3. Follow the directions in the `AUTH & DEPLOY` section on your subgraph page in the `graft-replacement` folder from the repo -4. Once finished, verify the subgraph is indexing properly. If you run the following command in The Graph Playground +1. Перейдіть на [The Graph Studio UI](https://thegraph.com/studio/) і створіть підграф в тестовій мережі Goerli під назвою `graft-replacement` +2. Створіть новий маніфест. `subgraph.yaml` для `graph-replacement` який містить іншу контрактну адресу та нову інформацію про те, як має відбуватися графтинг. Це `block` [останньої події, що відбулася](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498), старого контракту і `base` старого підграфа `base` Subgraph ID це `Deployment ID` вашого оригінального `graph-example` підграфа. Ви можете побачити це в The Graph Studio UI. +3. Дотримуйтесь інструкцій у розділах `AUTH & DEPLOY` на вашій сторінці підграфа в папці `graft-replacement` з репозиторію +4. Закінчивши, перевірте, чи правильно індексується підграф. Ви можете зробити це запустивши наступну команду у вікні The Graph Playground ```graphql { @@ -143,7 +159,7 @@ The graft replacement subgraph.yaml will have a new contract address. This could } ``` -It should return the following: +Це має повернути наступне: ``` { @@ -169,18 +185,18 @@ It should return the following: } ``` -You can see that the `graft-replacement` subgraph is indexing from older `graph-example` data and newer data from the new contract address. The original contract emitted two `Withdrawal` events, [Event 1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) and [Event 2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). The new contract emitted one `Withdrawal` after, [Event 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). The two previously indexed transactions (Event 1 and 2) and the new transaction (Event 3) were combined together in the `graft-replacement` subgraph. +Ви можете помітити, що `graft-replacement` та підграф індексується за допомогою старих даних `graph-example` і нових даних з нової адреси контракту. Оригінальний контракт містив дві `Withdrawal` події [1](https://goerli.etherscan.io/tx/0x800c92fcc0edbd26f74e19ad058c62008a47c7789f2064023b987028343dd498) and [2](https://goerli.etherscan.io/address/0x4ed995e775d3629b0566d2279f058729ae6ea493). Новий контракт, в свою чергу, містив одну `Withdrawal` подію [ 3](https://goerli.etherscan.io/tx/0xb4010e4c76f86762beb997a13cf020231778eaf7c64fa3b7794971a5e6b343d3). Дві раніше проіндексовані транзакції (Подія 1 і 2) і нова транзакція (Подія 3) були об'єднані в один підграф `graft-replacement`. -Congrats! You have succesfully grafted a subgraph onto another subgraph. +Вітаємо! Ви успішно здійснили графтинг одного підграфа з іншим. -## Additional Resources +## Додаткові матеріали -If you want more experience with grafting, here's a few examples for popular contracts: +Якщо ви хочете отримати більше досвіду роботи зі процесом графтингу, ось кілька прикладів популярних контрактів: - [Curve](https://github.com/messari/subgraphs/blob/master/subgraphs/curve-finance/protocols/curve-finance/templates/curve.template.yaml) - [ERC-721](https://github.com/messari/subgraphs/blob/master/subgraphs/erc721-metadata/subgraph.yaml) - [Uniswap](https://github.com/messari/subgraphs/blob/master/subgraphs/uniswap-v3/protocols/uniswap-v3/config/templates/uniswap.v3.template.yaml), -To become even more of a Graph expert, consider learning about other ways to handle changes in underlying datasources. Alternatives like [Data Source Templates](developing/creating-a-subgraph/#data-source-templates) can achieve similar results +Щоб стати ще кращим експертом в The Graph, розгляньте інші способи обробки змін в основних джерелах даних. За допомогою альтернатив по типу [Data Source Templates](developing/creating-a-subgraph/#data-source-templates) можна досягти подібних результатів -> Note: A lot of material from this article was taken from the previously published [Arweave article](/cookbook/arweave/) +> Примітка: Багато матеріалів для цієї статті було взято з раніше опублікованої [ статті від Arweave](/cookbook/arweave/) diff --git a/website/pages/uk/cookbook/near.mdx b/website/pages/uk/cookbook/near.mdx index 93ae5b8ef268..71137c7ce838 100644 --- a/website/pages/uk/cookbook/near.mdx +++ b/website/pages/uk/cookbook/near.mdx @@ -48,7 +48,7 @@ $ graph codegen # generates types from the schema file identified in the manifes $ graph build # generates Web Assembly from the AssemblyScript files, and prepares all the subgraph files in a /build folder ``` -### Subgraph Manifest Definition +### Визначення маніфесту підграфів The subgraph manifest (`subgraph.yaml`) identifies the data sources for the subgraph, the triggers of interest, and the functions that should be run in response to those triggers. See below for an example subgraph manifest for a NEAR subgraph: @@ -92,13 +92,13 @@ NEAR data sources support two types of handlers: - `blockHandlers`: run on every new NEAR block. No `source.account` is required. - `receiptHandlers`: run on every receipt where the data source's `source.account` is the recipient. Note that only exact matches are processed ([subaccounts](https://docs.near.org/docs/concepts/account#subaccounts) must be added as independent data sources). -### Schema Definition +### Визначення схеми Schema definition describes the structure of the resulting subgraph database and the relationships between entities. This is agnostic of the original data source. There are more details on subgraph schema definition [here](/developing/creating-a-subgraph#the-graphql-schema). ### AssemblyScript Mappings -The handlers for processing events are written in [AssemblyScript](https://www.assemblyscript.org/). +Обробники для виконання подій написані на мові [AssemblyScript](https://www.assemblyscript.org/). NEAR indexing introduces NEAR-specific data types to the [AssemblyScript API](/developing/assemblyscript-api). @@ -231,9 +231,9 @@ We will provide more information on running the above components soon. The GraphQL endpoint for NEAR subgraphs is determined by the schema definition, with the existing API interface. Please visit the [GraphQL API documentation](/querying/graphql-api) for more information. -## Example Subgraphs +## Приклади підграфів -Here are some example subgraphs for reference: +Ось кілька прикладів підграфів для ознайомлення: [NEAR Blocks](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-blocks) @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/uk/cookbook/upgrading-a-subgraph.mdx b/website/pages/uk/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/uk/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/uk/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/uk/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/uk/deploying/hosted-service.mdx b/website/pages/uk/deploying/hosted-service.mdx index ce00e43a8da9..0d5df683852a 100644 --- a/website/pages/uk/deploying/hosted-service.mdx +++ b/website/pages/uk/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/uk/deploying/subgraph-studio.mdx b/website/pages/uk/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/uk/deploying/subgraph-studio.mdx +++ b/website/pages/uk/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/uk/developing/creating-a-subgraph.mdx b/website/pages/uk/developing/creating-a-subgraph.mdx index 1fc288833c35..68f00c22f6e2 100644 --- a/website/pages/uk/developing/creating-a-subgraph.mdx +++ b/website/pages/uk/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,11 +1019,11 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. -### Overview +### Короткий огляд Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/uk/developing/developer-faqs.mdx b/website/pages/uk/developing/developer-faqs.mdx index 0f524d80c237..598387ca119f 100644 --- a/website/pages/uk/developing/developer-faqs.mdx +++ b/website/pages/uk/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/uk/developing/graph-ts/api.mdx b/website/pages/uk/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/uk/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/uk/developing/graph-ts/common-issues.mdx b/website/pages/uk/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/uk/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/uk/developing/supported-networks.mdx b/website/pages/uk/developing/supported-networks.mdx index f84ea192a0c6..398b3257db7a 100644 --- a/website/pages/uk/developing/supported-networks.mdx +++ b/website/pages/uk/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/uk/firehose.mdx b/website/pages/uk/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/uk/firehose.mdx +++ b/website/pages/uk/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/uk/glossary.mdx b/website/pages/uk/glossary.mdx index 695e447136d7..2b202bd836aa 100644 --- a/website/pages/uk/glossary.mdx +++ b/website/pages/uk/glossary.mdx @@ -12,7 +12,7 @@ title: Глосарій - **Підграф**: Спеціальний API, побудований на даних блокчейну, які можна запитувати за допомогою [GraphQL](https://graphql.org/). Розробники можуть створювати, розгортати та публікувати підграфи в децентралізованій мережі The Graph. Після цього індексатори можуть почати індексувати підграфи, щоб зробити їх доступними для запитів зі сторони користувачів підграфів. -- **Hosted Service**: Тимчасовий сервіс для розробки та запитів до підграфів, оскільки децентралізована мережа The Graph розвивається, підвищуючи вартість послуг, якість обслуговування та накопиченому досвіду розробників. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Індексатори**: Користувачі мережі, які запускають ноди індексації для індексування даних з блокчейнів та обслуговування запитів до GraphQL. @@ -24,6 +24,8 @@ title: Глосарій - **Indexer's Self Stake**: Сума токенів GRT, яку Індексатори стейкають, щоб брати участь у децентралізації мережі. Мінімальна сума становить 100 000 GRT, без верхнього ліміту. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Делегати**: Користувачі мережі, які володіють токеном GRT та делегують його Індексаторам. Це дозволяє індексаторам збільшити кількість застейканих токенів на власних підграфах всередині мережі. Натомість делегати отримують частину винагороди за індексування, яку індексатори отримують за свою роботу. - **Delegation Tax**: Комісія у розмірі 0.5% сплачується делегатами, коли вони делегують власні GRT індексаторам. GRT, який використовувався для сплати цієї комісії, спалюється. @@ -38,27 +40,21 @@ title: Глосарій - **Маніфест підграфів**: JSON-файл, який описує схему GraphQL підграфа, джерела даних та інші метадані. [Тут](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) ви можете побачити приклад. -- **Rebate Pool**: Засіб економічної безпеки, який утримує плату за запити, сплачену користувачами підграфів, доти, доки індексатори не зможуть заклеймити її як надходження від користувачів підграфів за обслуговування запитів. Токени GRT, які залишилися, спалюються. - -- **Епоха**: Одиниця часу в мережі. Наразі одна епоха становить 6 646 блоків або приблизно 1 день. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Розподіл**: Індексатор може розподілити свою загальну частку застейканих GRT токенів (включаючи токени від делегатів) на підграфи, які були розміщені в децентралізованій мережі The Graph. Розподіл відбувається на одному з чотирьох етапів. 1. **Активний**: Розподіл вважається активним, коли він створюється всередині мережі. Це називається відкриттям розподілу і вказує мережі на те, що індексатор активно індексує та обслуговує запити для конкретного підграфа. При активному розподілі нараховується винагорода за індексацію пропорційно до кількості сигналів на підграфі та суми розподілених GRT токенів. - 2. **Закриття**: Індексатор може отримати нараховану винагороду за індексацію певного підграфа, надавши нещодавнє та дійсне підтвердження індексації (POI). Це називається закриттям розподілу. Перед тим, як розподіл буде закрито, він має бути відкритим щонайменше одну епоху. Максимальний період розподілу - 28 епох. Якщо індексатор залишає розподіл відкритим більш ніж на 28 епох, він називається "застарілим". Коли розподіл знаходиться на етапі **Закриття**, риболов все ще може відкрити диспут, щоб оскаржити дії індексатора, який подав неправдиві дані. - - 3. **Завершення**: Період оскарження закінчився, і індексатори можуть отримати відповідну комісію за опрацювання запитів. - - 4. **Отримання**: На завершальному етапі розподілу всі винагороди, що відповідають критеріям, були розподілені, а відшкодування за запити були отримані. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: Потужний додаток для створення, розгортання та публікації підграфів. -- **Риболов (Fishermen)**: Учасники мережі можуть оскаржувати результати індексації та POI. Саме таких людей називають риболовами. Спір, вирішений на його користь, призводить до фінансового штрафу для Індексатора, а також до винагороди для самого риболова, таким чином стимулюючи добросовісність індексації та виконання запитів, що здійснюються Індексаторами в мережі. Штраф або слешинг, наразі становить 2,5% від власного стейка Індексатора, при цьому 50% від штрафних GRT токенів переходять до риболова, а інші 50% спалюються. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Арбітри**: Арбітри - це учасники мережі, призначені шляхом голосування. Роль арбітра полягає у вирішенні спорів щодо індексації та запитів. Їх мета - максимальне збільшення корисності та надійності The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Слешинг**: Індексатори можуть бути позбавлені застейканих GRT за надання неправильного підтвердження індексації (POI) або за надання неточних даних. Відсоток слешингу - це параметр протоколу, який наразі встановлено на рівні 2,5% від власного стейка індексатора. 50% від кількості позбавлених GRT переходять до риболова, який поставив під сумнів неточні дані або неправильний POI. Інші 50% - спалюються. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Нагороди за індексацію**: Винагорода, яку отримують Індексатори за індексування підграфів. Винагороди за індексацію розподіляються в токенах GRT. @@ -66,7 +62,7 @@ title: Глосарій - **GRT**: Функціональний токен екосистеми The Graph. GRT надає економічні заохочення учасникам мережі за їх внесок у її розвиток. -- **POI або підтвердження індексації**: Коли індексатор закриває свій розподіл і хоче отримати нараховану винагороду за певний підграф, він повинен надати дійсне та актуальне підтвердження індексації (Proof of Indexing, POI). Риболови можуть оскаржувати POI, що надані Індексатором. Суперечка, вирішена на користь риболова, призведе до слешингу Індексатора. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node - це компонент, який індексує підграфи та робить отримані дані доступними для запитів через GraphQL API. Загалом, він є центральним елементом стека індексатора, і правильна робота Graph Node має вирішальне значення для успішної роботи індексатора. @@ -80,10 +76,10 @@ title: Глосарій - **Період очікування**: Час, що залишився до того моменту, як індексатор, який змінив параметри делегування, зможе зробити це знову. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/uk/graphcast.mdx b/website/pages/uk/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/uk/graphcast.mdx +++ b/website/pages/uk/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/uk/index.json b/website/pages/uk/index.json index 2445296aadf6..8de6d91ebafd 100644 --- a/website/pages/uk/index.json +++ b/website/pages/uk/index.json @@ -23,8 +23,8 @@ "description": "Використання студії для створення субграфів" }, "migrateFromHostedService": { - "title": "Міграція з хостингового сервісу", - "description": "Міграція субграфів до мережі Graph" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Хостинговий сервіс", - "description": "Створюйте та досліджуйте субграфи на хостинговому сервісі" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Мережі, які підтримуються", - "description": "Graph підтримує наступні мережі в загальній мережі Graph та хостинговий сервіс.", - "graphNetworkAndHostedService": "Graph мережа та хостинговий сервіс", - "hostedService": "Хостинговий сервіс", - "betaWarning": "У бета-версії." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/uk/mips-faqs.mdx b/website/pages/uk/mips-faqs.mdx index 73efe82662cb..0388446d07d1 100644 --- a/website/pages/uk/mips-faqs.mdx +++ b/website/pages/uk/mips-faqs.mdx @@ -2,17 +2,19 @@ title: MIPs FAQs --- -## Introduction +## Введення -It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. +> Примітка: програма MIPs закрита з травня 2023 року. Дякуємо всім індексаторам, які взяли участь! -To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +Це чудовий час для того, щоб взяти участь в екосистемі The graph. Протягом [Graph Day 2022] \(https://thegraph.com/graph-day/2022/) Yaniv Tal анонсував [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), момент, для якого екосистема The Graph працювала протягом багатьох років. -The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. +Щоб підтримати завершення роботи хостингового сервісу та перенесення всієї активності в децентралізовану мережу, The Graph Foundation оголосив про [Migration Infrastructure Providers (MIPs) program] \(https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. +Програма MIPs - це оплачувана програма для Індексаторів, яка надає їм необхідні ресурси для індексації різних мереж, за межами мережі Ethereum і допомагає протоколу The Graph розширити децентралізовану мережу до рівня мультичейн інфраструктури. -### Useful Resources +На програму MIPs виділено 0.75% від загальної кількості токенів GRT (75 мільйонів GRT), з яких 0.5% буде використано для нагороди Індексаторів, які роблять свій вклад на бутстрап мережі та 0.25% зарезервовані під Network Grants для розробників підграфів, які використовують мультичейн підграфи. + +### Корисні посилання - [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) - [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) @@ -20,106 +22,106 @@ The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to r - [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) - [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? +### 1. Чи можна згенерувати дійсний доказ індексації (POI), навіть у тому випадку, якщо підграф виявився невдалим? -Yes, it is indeed. +Так, це правда. -For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. +Для довідки, arbitration charter \[дізнатися більше про charter можете тут (https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract) визначає методологію генерації POI для невдалого підграфа. -A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). +Учасник спільноти [SunTzu] \(https://github.com/suntzu93) створив скрипт для автоматизації цього процесу відповідно до методології arbitration charter. Ознайомтеся з ним [here](https://github.com/suntzu93/get_valid_poi_subgraph). -### 2. Which chain will the MIPs program incentivise first? +### 2. Яка мережа буде стимулюватись програмою MIPs в першу чергу? -The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. +Перша мережа, яка буде підтримуватись в децентралізованій мережі - це Gnosis Chain! Раніше відома як xDAI, Gnosis Chain - це мережа на основі EVM. Gnosis Chain була обрана першою з огляду на зручність запуску нод, готовність до роботи з індексатором, узгодженість з The Graph та web3 адаптацію. -### 3. How will new chains be added to the MIPs program? +### 3. Як нові мережі будуть додаватись до програми MIPs? -New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. +Нові мережі будуть анонсовані протягом програми MIPs, відповідно до готовності Індексаторів, попиту та загального настрою спільноти. Мережі спочатку будуть працювати в тестнеті, а після цього GIP буде передано для підтримки цієї мережі в мейннеті. Індексатори, які беруть участь в програмі MIPs, будуть обирати, в підтримці якої мережі вони зацікавлені та отримуватимуть нагороди за кожну мережу, додатково до винагород, які вони вже отримують за запити та винагород за індексацію в мережі за обслуговування підграфів. Учасники програми MIPs будуть оцінюватися на основі їх ефективності, здатності обслуговувати потреби мережі та підтримки з боку спільноти. -### 4. How will we know when the network is ready for a new chain? +### 4. Як ми дізнаємось, коли мережа буде готова до додання нових блокчейнів? -The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. +The Graph Foundation відстежуватиме показники якості обслуговування, продуктивність мережі та різні канали спільноти, щоб найкращим чином оцінити готовність. Пріоритетом є забезпечення того, щоб мережа відповідала вимогам продуктивності для тих мультичейн додатків, які зможуть перенести свої підграфи. -### 5. How are rewards divided per chain? +### 5. Як розподіляються винагороди на кожну мережу? -Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. +Враховуючи, що мережі відрізняються за своїми вимогами до синхронізаційних нод, а також за обсягом запитів і прийняттям, винагорода для кожної мережі буде визначатися в кінці її циклу, щоб гарантувати, що весь зворотний зв'язок і навчання будуть враховані. Однак індексатори завжди зможуть заробляти плату за запити та винагороду за індексацію, якщо ланцюжок буде підтримуватися в мережі. -### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? +### 6. Чи потрібно індексувати всі мережі в програмі MIPs, чи можна вибрати лише одну і проіндексувати її? -You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. +Ви можете індексувати будь-яку мережу, яка вам подобається! Мета програми MIPs - надати індексаторам інструменти та знання для індексування мереж, які вони бажають, і підтримки тих екосистем web3, які їх цікавлять. Однак, для кожної мережі є фази, починаючи від тестової мережі до мейннету. Переконайтеся, що ви пройшли всі ці фази для мереж, які ви індексуєте. Дивіться [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059), щоб дізнатись більше про ці фази. -### 7. When will rewards be distributed? +### 7. Коли будуть розподілені нагороди? -MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. +Винагороди MIPs будуть розподілені між мережами, як тільки будуть досягнуті показники ефективності та перенесені підграфи будуть підтримуватися цими індексаторами. Шукайте інформацію про загальну кількість винагород для кожної мережі в середині її циклу. -### 8. How does scoring work? +### 8. Як працює система підрахунку? -Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: +Індексатори змагатимуться за нагороди на основі набраних балів в таблиці лідерів протягом програми. Підрахунок балів за програму базуватиметься на: -**Subgraph Coverage** +**Покриття підграфів** -- Are you providing maximal support for subgraphs per chain? +- Чи забезпечується максимальна підтримка підграфів у мережі? -- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. +- Очікується, що під час MIP великі Індексатори стейкатимуть 50%+ підграфів у кожній мережі, яку вони підтримують. -**Quality Of Service** +**Якість обслуговування** -- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? +- Чи забезпечує індексатор хорошу якість обслуговування мережі (затримки, свіжість даних, час безвідмовної роботи тощо)? -- Is the Indexer supporting dapp developers being reactive to their needs? +- Чи реагує індексатор, що підтримує розробників додатків, на їхні потреби? -Is Indexer allocating efficiently, contributing to the overall health of the network? +Чи ефективно розподіляє ресурси Індексатор, сприяючи загальному стану мережі? -**Community Support** +**Підтримка спільноти** -- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? +- Чи співпрацює Індексатор з іншими Індексаторами, щоб допомогти їм налаштуватися на мультичейн роботу? -- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? +- Чи надає Індексатор зворотний зв'язок основним розробникам протягом програми або ділиться інформацією з іншими Індексаторами на Форумі? -### 9. How will the Discord role be assigned? +### 9. Як будуть розподілятись ролі в Discord? -Moderators will assign the roles in the next few days. +Модератори розподілять ролі протягом наступних кількох днів. -### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? +### 10. Чи можна почати програму на тестовій мережі, а потім перейти в мейннет? Чи зможете ви ідентифікувати мою ноду і врахувати її при розподілі винагород? -Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. +Так, саме це вам і потрібно зробити. Кілька фаз знаходяться на Görli та одна - в мейннеті. -### 11. At what point do you expect participants to add a mainnet deployment? +### 11. На якому етапі учасники розпочнуть розгортання в мейннеті, відповідно до ваших очікувань? -There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +Під час третьої фази буде вимагатися наявність індексатора в основній. Більше інформації про це ви можете отримати тут [скоро буде опубліковано на сторінці у Notion.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -### 12. Will rewards be subject to vesting? +### 12. Чи будуть винагороди розблоковуватись поступово? -The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. +Відсоток, який буде розподілений наприкінці програми, підлягатиме поступовому розблокуванню. Більше інформації про це буде надано в Indexer Agreement. -### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? +### 13. Для команд з більш ніж одним учасником, чи всі члени команди отримають роль MIPs Discord? -Yes +Так -### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? +### 14. Чи можна використовувати заблоковані токени з програми кураторів The Graph для участі в тестнеті MIPs? -Yes +Так -### 15. During the MIPs program, will there be a period to dispute invalid POI? +### 15. Чи буде наданий період для оскарження недійсних POI під час програми MIPs? -To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation +Це буде вирішено пізніше. Будь ласка, періодично повертайтеся на цю сторінку для отримання більш детальної інформації або, якщо ваш запит є терміновим, напишіть на пошту info@thegraph.foundation -### 17. Can we combine two vesting contracts? +### 17. Чи можна об'єднати два вестинг контракти? -No. The options are: you can delegate one to the other one or run two separate indexers. +Ні. Варіанти такі: ви можете делегувати один індексатор іншому або запустити два окремих індексатори. -### 18. KYC Questions? +### 18. Питання по KYC? -Please email info@thegraph.foundation +Будь ласка напишіть на пошту info@thegraph.foundation -### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? +### 19. Я не готовий індексувати в мережі Gnosis, чи можу я пропустити цей етап і почати індексування з іншої мережі, коли буду готовий? -Yes +Так -### 20. Are there recommended regions to run the servers? +### 20. Чи є рекомендовані регіони для запуску серверів? -We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. +Ми не даємо рекомендацій щодо регіонів. Обираючи місце розташування, ви можете подумати про те, де знаходяться основні ринки криптовалют. -### 21. What is “handler gas cost”? +### 21. Що таке "вартість газу для обслуговування"? -It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. +Це детермінована міра вартості виконання обслуговування. Всупереч тому, що може здатися з назви, це ніяк не пов'язано з вартістю газу в блокчейні. diff --git a/website/pages/uk/network/benefits.mdx b/website/pages/uk/network/benefits.mdx index 26b1c42f8b3e..b12002a1b0ec 100644 --- a/website/pages/uk/network/benefits.mdx +++ b/website/pages/uk/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - на 60-98% нижчі щомісячні витрати - $0 витрат на налаштування інфраструктури - Високий час безвідмовної роботи -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - Технічна підтримка 24/7 від глобальної спільноти ## The Benefits Explained @@ -90,7 +90,7 @@ Note that gas fees on [Arbitrum](/arbitrum/arbitrum-faq) are substantially lower ## Надійність & Стійкість -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/uk/network/indexing.mdx b/website/pages/uk/network/indexing.mdx index c40fd87a22fe..2169804aabe3 100644 --- a/website/pages/uk/network/indexing.mdx +++ b/website/pages/uk/network/indexing.mdx @@ -1,55 +1,55 @@ --- -title: Indexing +title: Індексація --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Індексатори - це оператори нод у The Graph Network, які стейкають токени Graph (GRT), щоб надавати послуги з індексування та обробки запитів. За свої послуги індексатори отримують плату за запити та винагороду за індексацію. Вони також заробляють комісію за запити, яка повертається відповідно до експоненціальної функції компенсації. -GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. +GRT, які застейкані в протоколі, підлягають періоду "розблокування" і можуть бути порізані (slashing), якщо індексатори є шкідливими та надають некоректні дані додаткам або якщо вони неправильно індексують. Індексатори також отримують винагороду за стейк, який вони отримують від делегатів, щоб зробити свій внесок у розвиток мережі. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Індексатори вибирають підграфи для індексування на основі сигналу від кураторів, де куратори стейкають GRT, щоб вказати, які підграфи є якісними та мають бути пріоритетними. Споживачі (наприклад, додатки) також можуть задавати параметри, за якими індексатори обробляють запити до їхніх підграфів, і встановлювати налаштування щодо оплати за запити. -## FAQ +## Поширені запитання -### What is the minimum stake required to be an Indexer on the network? +### Яка мінімальна кількість GRT необхідна для того, щоб стати індексатором в мережі? -The minimum stake for an Indexer is currently set to 100K GRT. +Мінімальна кількість для індексатора наразі встановлена на рівні 100 тис. GRT. -### What are the revenue streams for an Indexer? +### Які джерела доходу для індексатора? -**Query fee rebates** - Payments for serving queries on the network. These payments are mediated via state channels between an Indexer and a gateway. Each query request from a gateway contains a payment and the corresponding response a proof of query result validity. +**Отримання комісії за опрацювання запитів** - Платежі за обслуговування запитів у мережі. Ці платежі здійснюються через відповідні канали між індексатором та сіткою. Кожен запит містить платіж, а відповідна відповідь - доказ правдивості результату запиту. -**Indexing rewards** - Generated via a 3% annual protocol wide inflation, the indexing rewards are distributed to Indexers who are indexing subgraph deployments for the network. +**Винагорода за індексацію** - Винагорода за індексацію, що генерується шляхом 3% річної інфляції в масштабі всього протоколу, розподіляється серед індексаторів, які індексують розгортання підграфів для мережі. -### How are indexing rewards distributed? +### Як розподіляються винагороди за індексацію? -Indexing rewards come from protocol inflation which is set to 3% annual issuance. They are distributed across subgraphs based on the proportion of all curation signal on each, then distributed proportionally to Indexers based on their allocated stake on that subgraph. **An allocation must be closed with a valid proof of indexing (POI) that meets the standards set by the arbitration charter in order to be eligible for rewards.** +Винагорода за індексацію надходить від інфляції протоколу, яка встановлена на рівні 3% річної емісії. Вони розподіляються між підграфами на основі частки всіх кураторських сигналів на кожному, а потім розподіляються пропорційно між індексаторами на основі їхнього виділеного стейку на відповідному підграфі. **Щоб мати право на винагороду, розподіл має бути закрито за допомогою доказу індексації (proof of indexing - POI), яке відповідає стандартам, установленим арбітражним регламентом.** Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. -### What is a proof of indexing (POI)? +### Що за доказ індексації (POI)? -POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. +POI використовуються в мережі для перевірки того, що Індексатор індексує підграфи, на які вони були розподілені. POI для першого блоку поточної епохи має бути надісланий при закритті розподілу, щоб цей розподіл мав право на винагороду за індексацію. POI для блоку - це дайджест усіх транзакцій сховища об'єктів для певного розгортання підграфів до цього блоку включно. -### When are indexing rewards distributed? +### Коли розподіляються винагороди за індексацію? -Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). +Винагороди безперервно накопичуються, поки алокації активні та розподілені протягом 28 періодів. Винагороди збираються Індексаторами та розподіляються щоразу, коли їхні розподіли закриваються. Це відбувається або вручну, коли Індексатор хоче примусово закрити їх, або після 28 епох, делегат може закрити розподіл для Індексатора, але це не призводить до отримання винагороди. 28 епох - це максимальний час роботи розподілів (зараз одна епоха триває ~24 години). -### Can pending indexing rewards be monitored? +### Чи можна відстежувати винагороди за індексацію, що очікують на розгляд? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +Контракт RewardsManager має функцію [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317), доступну тільки для читання, яку можна використовувати для перевірки очікуваних винагород для конкретного розподілу. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +Багато інформаційних панелей, створених спільнотою, містять очікувані значення винагород, і їх можна легко перевірити вручну, виконавши ці кроки: -1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: +1. Надішліть запит на [підграф в основній мережі](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet), щоб отримати ідентифікатори всіх активних розподілів: ```graphql -query indexerAllocations { - indexer(id: "") { +запит indexerAllocations { + indexer(id: "") { { } }) { indexer(id: "") allocations { activeForIndexer { - allocations { + allocations { id id } } @@ -58,139 +58,139 @@ query indexerAllocations { } ``` -Use Etherscan to call `getRewards()`: +Використовуйте Etherscan для виклику `getRewards()`: -- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Перейдіть до [EtherScan інтерфейсу, потім до контракту Rewards](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) -* To call `getRewards()`: - - Expand the **10. getRewards** dropdown. - - Enter the **allocationID** in the input. - - Click the **Query** button. +* Оберіть `getRewards()`: + - Відкрийте список **10. getRewards**. + - Введіть **allocationID** у вхідних даних. + - Натисніть кнопку **Query**. -### What are disputes and where can I view them? +### Що таке спори (disputes) та де я можу їх переглянути? -Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. +Запити індексатора та розподіли можуть бути оскаржені на Graph протягом відповідного періоду оскарження. Цей період варіюється в залежності від типу спору. Для запитів/атестацій вікно спору триває 7 епох, тоді як для розподілів - 56. Після закінчення цих періодів спори не можуть бути відкриті ні проти розподілів, ні проти запитів. При відкритті спору учасник повинен внести депозит у розмірі не менше 10 000 GRT, який буде заблокований до завершення спору і винесення рішення по ній. Fisherman (рибалка) - це будь-який учасник мережі, який відкриває спори. -Disputes have **three** possible outcomes, so does the deposit of the Fishermen. +Спори мають **три** можливих результати, так само як і депозит учасників. -- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. -- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. -- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. +- Якщо спір буде відхилено, GRT, внесений в якості депозиту, буде спалено, а Індексатор не буде порізаний. +- Якщо суперечка буде вирішена внічию, депозит користувача буде повернуто, а індексатора не буде порізано. +- Якщо спір буде задоволено, GRT, внесений учасником, буде повернуто, Індексатора буде порізано, а рибалка отримає 50% від GRT, які були порізані. -Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. +Спори можна переглянути в інтерфейсі на сторінці профілю індексатора у вкладці `Disputes`. -### What are query fee rebates and when are they distributed? +### Що за комісії за опрацювання запитів і коли вони розподіляються? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Після закриття розподілу виплати можуть бути отримані індексатором. Після клейму, комісії за запити розподіляються між індексатором та його делегатами на основі зниження цін за запити та експоненціальної функції повернень. -### What is query fee cut and indexing reward cut? +### Що таке query fee cut і indexing reward cut? -The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. +Значення `queryFeeCut` і `indexingRewardCut` є параметрами делегування, які Індексатор може встановлювати разом із cooldownBlocks, щоб контролювати розподіл GRT між Індексатором і його Делегатами. Перегляньте останні кроки в розділі [Staking in the Protocol](/network/indexing#stake-in-the-protocol), щоб отримати інструкції щодо встановлення параметрів делегування. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - відсоток від комісій за опрацювання запитів, який буде розподілено між Індексатором та Делегатами. Якщо цей параметр встановлено на рівні в 95%, індексатор отримає 95% від комісій за запити, зароблених при закритті розподілу, а решта 5% підуть Делегатам. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - відсоток від винагород за індексацію, який буде розподілено між Індексатором та Делегатами. Якщо цей параметр встановлено на рівні в 95%, індексатор отримає 95% винагороди за індексацію, коли розподіл буде закрито, а делегати розділять між собою решту 5%. -### How do Indexers know which subgraphs to index? +### Як індексатори знають, які підграфи індексувати? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Індексатори можуть відрізнятися один від одного, застосовуючи передові методи для прийняття рішень щодо індексування підграфів, але для того, щоб дати загальне уявлення, ми обговоримо кілька ключових метрик, які використовуються для оцінки підграфів у мережі: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Сигнали від кураторів ** - якщо велика частка від загальної кількості сигналів у мережі припадає на певний підграф, то це є хорошим показником інтересу до цього підграфа, особливо під час фази бутстрапу, коли обсяг запитів зростає. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Збори за запити** - Історичні дані про обсяг зборів за запити, зібрані для певного підграфа, є хорошим індикатором майбутнього попиту. -- **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. +- **Кількість застейканих токенів** - Спостереження за поведінкою інших індексаторів або аналіз пропорцій від загального стейку токенів, виділених на конкретні підграфи, може дозволити індексатору відстежувати попит на запити до підграфів, щоб виявити підграфи, яким мережа довіряє, або підграфи, які можуть показати потребу в більшій кількості токенів. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Підграфи без винагороди за індексування** - Деякі підграфи не отримують винагороди за індексування переважно через те, що вони використовують непідтримувані можливості, такі як IPFS, або тому, що вони запитують іншу мережу за межами основної мережі. Ви побачите повідомлення про те, що підграф не генерує винагороду за індексацію. -### What are the hardware requirements? +### Які вимоги до апаратного обладнання? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. -- **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Small** - достатній для початку індексування декількох підграфів, ймовірно, потрібно буде розширити. +- **Standard** - налаштування за замовчуванням, це те, що використовується у прикладі маніфестів розгортання k8s/terraform. +- **Medium** - продуктивний індексатор, що підтримує 100 підграфів і 200-500 запитів на секунду. +- **Large** - підготовлений для індексації всіх підграфів, що використовуються наразі, і обслуговування запитів на відповідний трафік. -| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | +| Налаштування | Postgres
    (CPU) | Postgres
    (пам'ять в GB) | Postgres
    (диск у ТБ) | VMs
    (Центральні CPU) | VMs
    (пам'ять у ГБ) | | --- | :-: | :-: | :-: | :-: | :-: | | Small | 4 | 8 | 1 | 4 | 16 | | Standard | 8 | 30 | 1 | 12 | 48 | | Medium | 16 | 64 | 2 | 32 | 64 | | Large | 72 | 468 | 3.5 | 48 | 184 | -### What are some basic security precautions an Indexer should take? +### Яких основних заходів безпеки повинен дотримуватися індексатор? -- **Operator wallet** - Setting up an operator wallet is an important precaution because it allows an Indexer to maintain separation between their keys that control stake and those that are in control of day-to-day operations. See [Stake in Protocol](/network/indexing#stake-in-the-protocol) for instructions. +- **Operator wallet**. Налаштування гаманця оператора є важливим запобіжним заходом, оскільки він дозволяє Індексатору підтримувати відокремлення між своїми ключами, які контролюють застейкані токени, і тими, які використовуються для щоденних операцій. Інструкції див. у розділі [Stake in Protocol](/network/indexing#stake-in-the-protocol). -- **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. +- **Firewall** - Публічно доступною має бути лише сервіс-індексатор, і особливу увагу слід приділити блокуванню портів адміністратора і доступу до бази даних: не слід відкривати кінцеву точку JSON-RPC Graph Node (порт за замовчуванням: 8030), кінцеву точку API управління індексатором (порт за замовчуванням: 18000) і кінцеву точку бази даних Postgres (порт за замовчуванням: 5432). -## Infrastructure +## Інфраструктура -At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. +Центром інфраструктури індексатора є Graph Node, яка відстежує індексовані мережі, вибирає і завантажує дані відповідно до визначення підграфів і слугує як [GraphQL API](/about/#how-the-graph-works). Graph Node має бути підключена до кінцевої точки, яка надає дані з кожної проіндексованої мережі; ноди IPFS для отримання даних; бази даних PostgreSQL для їх зберігання; і компонентів індексатора, які полегшують його взаємодію з мережею. -- **PostgreSQL database** - The main store for the Graph Node, this is where subgraph data is stored. The Indexer service and agent also use the database to store state channel data, cost models, indexing rules, and allocation actions. +- **PostgreSQL database** - основне сховище для Graph Node, саме тут зберігаються дані підграфів. Сервіс-індексатор та агент також використовують базу даних для зберігання даних каналів стану, моделей витрат, правил індексації та дій з розподілу. -- **Data endpoint** - For EVM-compatible networks, Graph Node needs to be connected to an endpoint that exposes an EVM-compatible JSON-RPC API. This may take the form of a single client or it could be a more complex setup that load balances across multiple. It's important to be aware that certain subgraphs will require particular client capabilities such as archive mode and/or the parity tracing API. +- **Data endpoint** - Для EVM-сумісних мереж Graph Node має бути підключена до кінцевої точки, який надає EVM-сумісний JSON-RPC API. Це може бути як один клієнт, так і більш складне налаштування, яке розподіляє навантаження між кількома. Важливо знати, що певні підграфи потребують особливих можливостей клієнта, таких як режим архівування та/або API відстеження парності. -- **IPFS node (version less than 5)** - Subgraph deployment metadata is stored on the IPFS network. The Graph Node primarily accesses the IPFS node during subgraph deployment to fetch the subgraph manifest and all linked files. Network Indexers do not need to host their own IPFS node, an IPFS node for the network is hosted at https://ipfs.network.thegraph.com. +- **Нода IPFS (версія менше ніж 5)** - метадані розгортання підграфів зберігаються у мережі IPFS. Graph Node звертається до вузла IPFS під час розгортання підграфів, щоб отримати маніфест підграфів і всі пов'язані файли. Індексаторам в мережі не потрібно запускати власну ноду IPFS, ноду IPFS для мережі розміщено на https://ipfs.network.thegraph.com. -- **Indexer service** - Handles all required external communications with the network. Shares cost models and indexing statuses, passes query requests from gateways on to a Graph Node, and manages the query payments via state channels with the gateway. +- **Indexer service** - виконує всі необхідні зовнішні комунікації з мережею. Обмінюється моделями витрат і статусами індексації, передає запити від шлюзів до Graph Node, а також керує оплатою запитів через відповідні канали зі шлюзом. -- **Indexer agent** - Facilitates the Indexers interactions on chain including registering on the network, managing subgraph deployments to its Graph Node/s, and managing allocations. +- **Indexer agent** - полегшує взаємодію індексаторів в мережі, включаючи реєстрацію у мережі, керування розгортанням підграфів у Graph Node/-ах та керуванням розподілами. -- **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. +- **Сервер метрик Prometheus** - Компоненти Graph Node та індексаторів реєструють свої метрики на відповідному на сервері. -Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. +Примітка: Для підтримки гнучкого масштабування рекомендується розділити завдання запитів та індексації між різними наборами нод: нодами запитів та нодами індексації. -### Ports overview +### Огляд портів -> **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. +> **Важливо**: Будьте обережні з публічним відкриттям портів - **адміністративні порти** слід тримати закритими. Це стосується і JSON-RPC Graph Node та кінцевих точок керування індексатором, описаних нижче. #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | HTTP-сервер GraphQL
    (для запитів до підграфів) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-порт | - | +| 8001 | GraphQL WS
    (для підписок на підграфи) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (для керування розгортаннями) | / | --admin-port | - | +| 8030 | API стану індексації підграфів | /graphql | --index-node-port | - | +| 8040 | Метрики Prometheus | /metrics | --metrics-port | - | -#### Indexer Service +#### Служба індексації -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
    (for paid subgraph queries) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| 7600 | HTTP-сервер GraphQL
    (для платних запитів до підграфів) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Метрики Prometheus | /metrics | --metrics-port | - | -#### Indexer Agent +#### Агент індексації -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | +| --- | --- | --- | --- | --- | +| 8000 | API для керування індексатором | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Setup server infrastructure using Terraform on Google Cloud +### Налаштування серверної інфраструктури з використанням Terraform на Google Cloud -> Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. +> Примітка: Індексатори можуть альтернативно використовувати AWS, Microsoft Azure або Alibaba. -#### Install prerequisites +#### Встановіть необхідні умови - Google Cloud SDK -- Kubectl command line tool +- Kubectl - інструмент командного рядка - Terraform -#### Create a Google Cloud Project +#### Створіть проєкт на Google Cloud -- Clone or navigate to the Indexer repository. +- Клонуйте або перейдіть до репозиторію індексатора. -- Navigate to the ./terraform directory, this is where all commands should be executed. +- Перейдіть до каталогу ./terraform, саме тут слід виконати всі команди. ```sh cd terraform ``` -- Authenticate with Google Cloud and create a new project. +- Авторизуйтесь у Google Cloud і створіть новий проєкт. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use the Google Cloud Console's billing page to enable billing for the new project. +- Використовуйте сторінку виставлення рахунків у Google Cloud Console, щоб увімкнути виставлення рахунків для нового проєкту. -- Create a Google Cloud configuration. +- Створіть конфігурацію Google Cloud. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Enable required Google Cloud APIs. +- Увімкніть необхідні Google Cloud API. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Create a service account. +- Створіть обліковий запис сервісу. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Enable peering between database and Kubernetes cluster that will be created in the next step. +- Увімкніть взаємодію між базою даних і кластером Kubernetes, який буде створено на наступному кроці. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Create minimal terraform configuration file (update as needed). +- Створіть мінімальний конфігураційний файл terraform (оновлюйте за потреби). ```sh indexer= @@ -262,24 +262,24 @@ database_password = "" EOF ``` -#### Use Terraform to create infrastructure +#### Використовуйте Terraform для створення інфраструктури -Before running any commands, read through [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) and create a file `terraform.tfvars` in this directory (or modify the one we created in the last step). For each variable where you want to override the default, or where you need to set a value, enter a setting into `terraform.tfvars`. +Перед виконанням будь-яких команд прочитайте [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) і створіть файл `terraform .tfvars` у цьому каталозі (або змініть той, який ми створили на останньому кроці). Для кожної змінної, де ви бажаєте замінити значення за замовчуванням або де вам потрібно встановити значення, введіть параметр у `terraform.tfvars`. -- Run the following commands to create the infrastructure. +- Запустіть наступні команди, щоб створити інфраструктуру. ```sh -# Install required plugins +# Встановіть необхідні плагіни terraform init -# View plan for resources to be created +# Переглянути план для ресурсів, які будуть створені terraform plan -# Create the resources (expect it to take up to 30 minutes) +# Створіть ресурси (очікується, що це займе до 30 хвилин) terraform apply ``` -Download credentials for the new cluster into `~/.kube/config` and set it as your default context. +Завантажте облікові дані нового кластера до `~/.kube/config` і встановіть його як контекст за замовчуванням. ```sh gcloud container clusters get-credentials $indexer @@ -287,21 +287,21 @@ kubectl config use-context $(kubectl config get-contexts --output='name' | grep $indexer) ``` -#### Creating the Kubernetes components for the Indexer +#### Створення компонентів Kubernetes для індексатора -- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. +- Скопіюйте каталог `k8s/overlays` до нового каталогу `$dir,` і налаштуйте запис `bases` у `$dir/kustomization.yaml< /code>, щоб він вказував на каталог k8s/base`. -- Read through all the files in `$dir` and adjust any values as indicated in the comments. +- Прочитайте всі файли в `$dir` і змініть будь-які значення, як зазначено в коментарях. -Deploy all resources with `kubectl apply -k $dir`. +Установіть усі ресурси за допомогою `kubectl apply -k $dir`. ### Graph Node -[Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. +[Graph Node](https://github.com/graphprotocol/graph-node) - це реалізація Rust з відкритим вихідним кодом, яка використовує події блокчейну Ethereum для детермінованого оновлення сховища даних, які можна запитувати через кінцеву точку GraphQL. Розробники використовують підграфи для визначення своєї схеми та набір відображень для перетворення даних, отриманих з блоків мережі, а Graph Node займається синхронізацією всієї мережі, моніторингом нових блоків і обслуговуванням їх через кінцеву точку GraphQL. -#### Getting started from source +#### Початок роботи з базового коду -#### Install prerequisites +#### Встановіть необхідні умови - **Rust** @@ -309,15 +309,15 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Додаткові вимоги для користувачів Ubuntu** - Для запуску Graph Node на Ubuntu може знадобитися декілька додаткових програм. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Налаштування -1. Start a PostgreSQL database server +1. Запуск сервера бази даних PostgreSQL ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Клонуйте [Graph Node](https://github.com/graphprotocol/graph-node) репозиторій і створіть базовий код, запустивши `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Тепер, коли всі необхідні складові налаштовано, запустіть Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -336,48 +336,48 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Початок роботи з Docker -#### Prerequisites +#### Передумови -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. +- **Нода Ethereum**. За замовчуванням, docker compose використовуватиме основну мережу: [http:// host.docker.internal:8545](http://host.docker.internal:8545) для підключення до ноди Ethereum на вашій основній машині. Ви можете замінити це ім’я та Url-адресу мережі, оновивши `docker-compose.yaml`. -#### Setup +#### Налаштування -1. Clone Graph Node and navigate to the Docker directory: +1. Клонуйте Graph Node і перейдіть до каталогу Docker: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml`using the included script: +2. Лише для користувачів Linux – використовуйте IP-адресу хоста замість `host.docker.internal` у `docker-compose.yaml` за допомогою доданого скрипта: ```sh ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Запустіть локальну Graph Node, яка буде підключена до вашої кінцевої точки Ethereum: ```sh docker-compose up ``` -### Indexer components +### Компоненти індексатора -To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: +Для успішної участі в мережі потрібен майже постійний моніторинг і взаємодію, тому ми створили набір додатків Typescript для полегшення участі в мережі індексаторів. Існує три компоненти Індексатора: -- **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards on chain and how much is allocated towards each. +- **Indexer agent** - Агент відстежує мережу та власну інфраструктуру індексатора і керує розгортаннями підграфів, які індексуються та розподіляються в мережі, а також визначає, скільки ресурсів виділяється для кожного з них. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Indexer service** - Єдиний компонент, який потрібно виставляти назовні, сервіс передає запити підграфів до graph node, керує каналами стану для оплати запитів, ділиться важливою інформацією для прийняття рішень з клієнтами, такими як шлюзи. -- **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. +- **Indexer CLI** - інтерфейс командного рядка для керування агентом індексатора. Він дозволяє індексаторам керувати моделями витрат, ручним розподілом, чергою дій та правилами індексування. -#### Getting started +#### Початок роботи The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! -#### From NPM packages +#### З NPM-пакетів ```sh npm install -g @graphprotocol/indexer-service @@ -400,7 +400,7 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### From source +#### З базового коду ```sh # From Repo root directory @@ -420,16 +420,16 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Using docker +#### Використання docker -- Pull images from the registry +- Витягнути images з реєстру ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Or build images locally from source +Або створіть images локально з базового коду ```sh # Indexer service @@ -444,22 +444,22 @@ docker build \ -t indexer-agent:latest \ ``` -- Run the components +- Запустіть компоненти ```sh docker run -p 7600:7600 -it indexer-service:latest ... docker run -p 18000:8000 -it indexer-agent:latest ... ``` -**NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). +**ПРИМІТКА**. Після запуску контейнерів сервіс-індексатора повинен бути доступний за адресою [http://localhost:7600](http://localhost:7600), а агент індексатора повинен виставляти API управління індексатором за адресою [http://localhost:18000/](http://localhost:18000/). -#### Using K8s and Terraform +#### Використання K8 та Terraform -See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section +Див. розділ [Налаштування серверної інфраструктури з використанням Terraform на Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) -#### Usage +#### Використання -> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **ПРИМІТКА**. Усі змінні конфігурації середовища виконання можна застосовувати як параметри до команди під час запуску або за допомогою змінних середовища у форматі `COMPONENT_NAME_VARIABLE_NAME`(наприклад, `INDEXER_AGENT_ETHEREUM`). #### Indexer agent @@ -518,56 +518,56 @@ graph-indexer-service start \ #### Indexer CLI -The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. +Indexer CLI — це плагін для [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) доступного у терміналі в `graph indexer`. ```sh graph indexer connect http://localhost:18000 graph indexer status ``` -#### Indexer management using Indexer CLI +#### Керування індексатором за допомогою Indexer CLI -The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. +Пропонованим інструментом для взаємодії з **API керування індексатором** є **Indexer CLI**, розширення **Graph CLI**. Агенту потрібні дані від індексатора, щоб автономно взаємодіяти з мережею від імені індексатора. Механізмом визначення поведінки агента індексатора є режим **керування розподілом** і **правила індексування**. У автоматичному режимі індексатор може використовувати **правила індексування**, щоб застосувати свою конкретну стратегію вибору підграфів для індексування та обслуговування запитів. Правила керуються через GraphQL API, який обслуговує агент і відомий як Indexer Management API. У ручному режимі індексатор може створювати дії розподілу за допомогою **черги дій** і явно затверджувати їх перед виконанням. У режимі нагляду **правила індексування** використовуються для заповнення **черги дій** і також вимагають явного схвалення для виконання. -#### Usage +#### Використання -The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. +**Indexer CLI** з'єднується з агентом індексатора, як правило, за допомогою переадресації портів, тому CLI не потрібно запускати на тому ж сервері або кластері. Щоб допомогти вам розпочати роботу і надати деякий контекст, тут буде коротко описано CLI. -- `graph indexer connect ` - Connect to the Indexer management API. Typically the connection to the server is opened via port forwarding, so the CLI can be easily operated remotely. (Example: `kubectl port-forward pod/ 8000:8000`) +- `graph indexer connect ` – приєднатися до API керування індексатором. Зазвичай підключення до сервера відкривається через перенаправлення портів, тому CLI можна легко керувати віддалено. (Приклад: `kubectl port-forward pod/ 8000:8000`) -- `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. +- `graph indexer rules get [options] [ ...]` - отримайте одне або кілька правил індексування, використовуючи `all` як ``, щоб отримати всі правила, або `global`, щоб отримати глобальні значення за замовчуванням. Додатковий аргумент `--merged` можна використовувати, щоб вказати, що правила розгортання об’єднуються з глобальним правилом. Ось як вони застосовуються в агенті індексатора. -- `graph indexer rules set [options] ...` - Set one or more indexing rules. +- `graph indexer rules set [options] ...` - задати одне або декілька правил індексування. -- `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. +- `graph indexer rules start [options] ` – почніть індексувати розгортання підграфа, якщо доступно, і встановіть для його `decisionBasis` значення `always`, тому агент індексатора завжди вирішить індексувати його. Якщо для глобального правила встановлено значення завжди, усі доступні підграфи в мережі будуть проіндексовані. -- `graph indexer rules stop [options] ` - Stop indexing a deployment and set its `decisionBasis` to never, so it will skip this deployment when deciding on deployments to index. +- `graph indexer rules stop [options] ` – зупиніть індексацію розгортання та встановіть для параметра `decisionBasis` значення «never», тому він пропустить це розгортання при прийнятті рішення про те, які розгортання індексувати. -- `graph indexer rules maybe [options] ` — Set the `decisionBasis` for a deployment to `rules`, so that the Indexer agent will use indexing rules to decide whether to index this deployment. +- `graph indexer rules maybe [options] ` — установіть `decisionBasis` для розгортання на `rules`, щоб агент індексатора використовував правила індексування, щоб вирішити, чи індексувати це розгортання. -- `graph indexer actions get [options] ` - Fetch one or more actions using `all` or leave `action-id` empty to get all actions. An additonal argument `--status` can be used to print out all actions of a certain status. +- `graph indexer actions get [options] ` - отримання однієї або декількох дій за допомогою `all` або можливість залишити `action-id` пустим, щоб отримати всі дії. Додатковий аргумент `--status` можна використовувати для виведення всіх дій певного статусу. -- `graph indexer action queue allocate ` - Queue allocation action +- `graph indexer action queue allocate ` – розподіл черги -- `graph indexer action queue reallocate ` - Queue reallocate action +- `graph indexer action queue reallocate ` – перерозподіл черги -- `graph indexer action queue unallocate ` - Queue unallocate action +- `graph indexer action queue unallocate ` – скасування розподілу черги -- `graph indexer actions cancel [ ...]` - Cancel all action in the queue if id is unspecified, otherwise cancel array of id with space as separator +- `graph indexer actions cancel [ ...]` - скасувати всі дії в черзі, якщо ідентифікатор не вказано, інакше скасувати масив ідентифікаторів із пробілом у якості розмежувача -- `graph indexer actions approve [ ...]` - Approve multiple actions for execution +- `graph indexer actions approve [ ...]` - затвердити кілька дій для виконання -- `graph indexer actions execute approve` - Force the worker to execute approved actions immediately +- `graph indexer actions execute approve` - змусити виконавця негайно виконати затверджені дії -All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. +Усі команди, які відображають правила у виводі, можуть вибирати між підтримуваними форматами виводу (`table`, `yaml` та `json`) за допомогою `- output` аргументу. -#### Indexing rules +#### Правила індексації -Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. +Правила індексування можуть бути застосовані як глобальні за замовчуванням або для конкретних розгортань підграфів, використовуючи їхні ідентифікатори. Поля `deployment` і `decisionBasis` є обов’язковими, тоді як усі інші поля необов’язкові. Якщо правило індексування має `rules` як `decisionBasis`, тоді агент індексатора порівнює ненульові порогові значення цього правила зі значеннями, отриманими з мережі для відповідного розгортання. Якщо розгортання підграфа має значення вище (або нижче) будь-якого з порогів, його буде вибрано для індексування. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +Наприклад, якщо глобальне правило має значення `minStake` в **5** (GRT), будь-яке розгортання підграфа, що має 5 (GRT) в стейкінгу, буде проіндексоване. Порогові правила включають `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, та `minAverageQueryFees`. -Data model: +Модель даних: ```graphql type IndexingRule { @@ -601,7 +601,7 @@ IndexingDecisionBasis { } ``` -Example usage of indexing rule: +Приклад використання правила індексації: ``` graph indexer rules offchain QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK @@ -615,18 +615,18 @@ graph indexer rules delete QmZfeJYR86UARzp9HiXbURWunYgC9ywvPvoePNbuaATrEK #### Actions queue CLI -The indexer-cli provides an `actions` module for manually working with the action queue. It uses the **Graphql API** hosted by the indexer management server to interact with the actions queue. +indexer-cli надає модуль `actions` для ручної роботи з чергою дій. Для взаємодії з чергою дій він використовує **Graphql API**, розміщений на сервері керування індексатором. -The action execution worker will only grab items from the queue to execute if they have `ActionStatus = approved`. In the recommended path actions are added to the queue with ActionStatus = queued, so they must then be approved in order to be executed on-chain. The general flow will look like: +Виконавець дії буде брати елементи з черги на виконання, тільки якщо вони мають `ActionStatus = accepted`. У рекомендованому шляху дії додаються до черги зі статусом ActionStatus = queued, тому вони повинні бути схвалені, щоб бути виконаними в мережі. Загальний потік буде виглядати так: -- Action added to the queue by the 3rd party optimizer tool or indexer-cli user -- Indexer can use the `indexer-cli` to view all queued actions -- Indexer (or other software) can approve or cancel actions in the queue using the `indexer-cli`. The approve and cancel commands take an array of action ids as input. -- The execution worker regularly polls the queue for approved actions. It will grab the `approved` actions from the queue, attempt to execute them, and update the values in the db depending on the status of execution to `success` or `failed`. -- If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. -- The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. +- Дія, додана до черги стороннім оптимізатором або користувачем indexer-cli +- Індексатор може використовувати `indexer-cli` для перегляду всіх дій у черзі +- Індексатор (або інша програма) може затверджувати або скасовувати дії у черзі за допомогою `indexer-cli`. Команди затвердження та скасування приймають на вхід масив ідентифікаторів дій. +- Виконавець регулярно проводить опитування черги на предмет схвалення дій. Він бере `approved` дії з черги, пробує виконати їх і потім оновлює значення в db в залежності від статусу виконання до `success` або `failed`. +- Якщо дія успішна, виконавець забезпечить наявність правила індексації, яке підкаже агенту, як керувати розподілом далі, що корисно при виконанні ручних дій під час перебування агента в режимі `auto` або `oversight`. +- Індексатор може стежити за чергою дій, щоб бачити історію виконання дій і при необхідності повторно затверджувати та оновлювати елементи дій, якщо вони не були виконані. У черзі дій відображається історія всіх дій, поставлених у чергу і виконаних. -Data model: +Модель даних: ```graphql Type ActionInput { @@ -659,64 +659,64 @@ ActionType { } ``` -Example usage from source: +Приклад використання: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` -Note that supported action types for allocation management have different input requirements: +Зверніть увагу, що підтримувані типи дій для управління розподілами мають різні вимоги до вхідних даних: -- `Allocate` - allocate stake to a specific subgraph deployment +- `Allocate` - розподілити стейк на конкретне розгортання підграфа - - required action params: + - необхідні параметри: - deploymentID - amount -- `Unallocate` - close allocation, freeing up the stake to reallocate elsewhere +- `Unallocate` - закритий розподіл, що звільняє стейк для перерозподілу в інше місце - - required action params: + - необхідні параметри: - allocationID - deploymentID - - optional action params: + - необов'язкові параметри: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (змушує використовувати наданий POI, навіть якщо він не збігається з тим, що надає graph-node) -- `Reallocate` - atomically close allocation and open a fresh allocation for the same subgraph deployment +- `Reallocate` - автоматично закриваємо розподіл і відкриває новий для того ж розгортання підграфа - - required action params: + - необхідні параметри: - allocationID - deploymentID - amount - - optional action params: + - необов'язкові параметри: - poi - - force (forces using the provided POI even if it doesn’t match what the graph-node provides) + - force (змушує використовувати наданий POI, навіть якщо він не збігається з тим, що надає graph-node) -#### Cost models +#### Моделі витрат -Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. +Моделі витрат забезпечують динамічне ціноутворення для запитів на основі атрибутів ринку і запиту. Сервіс-індексатора ділиться моделлю вартості зі шлюзами для кожного підграфа, для якого вони мають намір відповідати на запити. Шлюзи, і собі, використовують модель вартості для прийняття рішень про вибір індексатора для кожного запиту і для обговорень про оплату з обраними індексаторами. #### Agora -The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. +Мова Agora надає гнучкий формат для оголошення цінових моделей для запитів. Цінова модель Agora - це послідовність операторів, які виконуються по черзі для кожного запиту верхнього рівня в запиті GraphQL. Для кожного запиту верхнього рівня перший оператор, який йому відповідає, визначає ціну для цього запиту. -A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. +Оператор складається з параметра, який використовується для зіставлення запитів GraphQL, і виразу вартості, який при обчисленні виводить вартість у десяткових GRT. Значення в позиції іменованого аргументу запиту можуть бути перехоплені в пропозицію і використані у виразі. Globals також можна встановлювати та підставляти замість символів-замінників у виразі. -Example cost model: +Приклад моделі витрат: ``` # This statement captures the skip value, @@ -729,77 +729,73 @@ query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTE default => 0.1 * $SYSTEM_LOAD; ``` -Example query costing using the above model: +Приклад розрахунку вартості запиту за наведеною вище моделлю: -| Query | Price | +| Запит | Ціна | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### Applying the cost model +#### Застосування вартісної моделі -Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. +Моделі витрат застосовуються за допомогою Indexer CLI, яка передає їх до Indexer Management API агента індексатора для зберігання в базі даних. Потім сервіс-індексатора забирає їх і надає моделі витрат шлюзам, коли вони їх запитують. ```sh indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interacting with the network +## Взаємодія з мережею -### Stake in the protocol +### Стейкінг в протоколі -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ +Перші кроки до участі в мережі в якості індексатора — це затвердження протоколу, стейкінгу коштів і (за бажанням) встановлення адреси оператора для щоденної взаємодії протоколу. _ **Примітка**. Для цілей цих інструкцій Remix використовуватиметься для взаємодії за контрактом, але не соромтеся використовувати інструмент за вибором ([OneClickDapp](https:// oneclickdapp.com/), [ABItopic](https://abitopic.io/) і [MyCrypto](https://www.mycrypto.com/account) – це ще кілька відомих інструментів)._ -Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. +Після того, як індексатор застейкав GRT токени у протоколі, [Indexer components](/network/indexing#indexer-components) можна запустити та почати взаємодію з мережею. -#### Approve tokens +#### Approve токенів -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Відкрийте [програму Remix](https://remix.ethereum.org/) у браузері -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. У `File Explorer` створіть файл під назвою **GraphToken.abi** з [токен ABI](https://raw.githubusercontent.com/graphprotocol /contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. +3. Вибравши `GraphToken.abi` та відкривши його в редакторі, перейдіть до розділу Deploy і `Run Transactions` в інтерфейсі Remix. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. У розділі середовища виберіть `Injected Web3`, а в розділі `Account` виберіть свою адресу індексатора. -5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. +5. Установіть адресу контракту GraphToken. Вставте адресу контракту GraphToken (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) поруч із полем `At Address` та натисніть кнопку `At Address`, щоб застосувати. -6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). +6. Виберіть функцію `approve(spender, amount)`, для схвалення транзакції про взаємодію зі стейкінг контрактом. Заповніть поле `spender` адресою стейкінг контракта (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) і поле `amount` кількістю токенів, які буду використані для стейка (у wei). -#### Stake tokens +#### Стейкінг токенів -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Відкрийте [програму Remix](https://remix.ethereum.org/) у браузері -2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. +2. У `File Explorer` створіть файл під назвою **Staking.abi** зі стейкінгом ABI. -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. +3. Вибравши `Staking.abi` та відкривши його в редакторі, перейдіть до розділу `Deploy` і `Run Transactions` в інтерфейсі Remix. -4. Under environment select `Injected Web3` and under `Account` select your Indexer address. +4. У розділі середовища виберіть `Injected Web3`, а в розділі `Account` виберіть свою адресу індексатора. -5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. +5. Установіть адресу стейкінг контракта. Вставте цю адресу (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) поруч із полем `At Address` та натисніть кнопку `At Address`, щоб застосувати. -6. Call `stake()` to stake GRT in the protocol. +6. Викличте `stake()`, щоб застейкати токени GRT у протоколі. -7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. +7. (Необов’язково) Індексатори можуть схвалити іншу адресу як оператора своєї інфраструктури індексатора, щоб відокремити ключі, які контролюють кошти, від тих, які виконують щоденні дії, такі як розподіл на підграфах і обслуговування (оплачених) запитів. Щоб встановити оператора, викликайте `setOperator()` з адресою оператора. -8. (Optional) In order to control the distribution of rewards and strategically attract Delegators Indexers can update their delegation parameters by updating their indexingRewardCut (parts per million), queryFeeCut (parts per million), and cooldownBlocks (number of blocks). To do so call `setDelegationParameters()`. The following example sets the queryFeeCut to distribute 95% of query rebates to the Indexer and 5% to Delegators, set the indexingRewardCutto distribute 60% of indexing rewards to the Indexer and 40% to Delegators, and set `thecooldownBlocks` period to 500 blocks. +8. (Необов'язково) Для того, щоб контролювати розподіл винагород і стратегічно залучати делегатів, індексатори можуть оновлювати свої параметри делегування, змінюючи indexingRewardCut (частини на мільйон), queryFeeCut (частини на мільйон) і cooldownBlocks (кількість блоків). Для цього викличте `setDelegationParameters()`. У наступному прикладі queryFeeCut налаштовує на розподіл 95% комісії за запити для Індексатора та 5% для Делегатів, та встановлює indexingRewardCutto розподіляти 60% винагород за індексування для Індексатора та 40% для Делегатів, і встановлює `thecooldownBlocks` період до 500 блоків. ``` setDelegationParameters(950000, 600000, 500) ``` -### The life of an allocation - -After being created by an Indexer a healthy allocation goes through four states. - -- **Active** - Once an allocation is created on-chain ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) it is considered **active**. A portion of the Indexer's own and/or delegated stake is allocated towards a subgraph deployment, which allows them to claim indexing rewards and serve queries for that subgraph deployment. The Indexer agent manages creating allocations based on the Indexer rules. +### Термін розподілу -- **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). +Після створення індексатором правильний розподіл проходить через чотири стани. -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. +- **Active** – після створення розподілу в мережі ([allocateFrom()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/ Staking.sol#L873)) вважається **активним**. Частина власного та/або делегованого стейку Індексатора розподіляється для розгортання підграфа, що дозволяє йому отримувати винагороди за індексування та обслуговувати запити для цього розгортання підграфа. Агент індексатора керує створенням розподілів на основі правил індексування. -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. +- **Closed** – індексатор може вільно закрити виділення, коли мине 1 епоха ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master) /contracts/staking/Staking.sol#L873)) або їхній агент автоматично закриє розподіл після **maxAllocationEpochs** (наразі 28 днів). Коли розподіл закрито за допомогою доказу індексації (POI), їх винагороди за індексацію розподіляються між Індексатором і його Делегатами (див. "how are rewards distributed?" нижче, щоб дізнатися більше). -Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. +Індексаторам рекомендується використовувати функцію offchain синхронізації для синхронізації розгортань підграфів з head of chain перед створенням розподілів у мережі. Ця функція особливо корисна для підграфів, синхронізація яких може зайняти понад 28 епох, або для яких існує ймовірність невизначеного збою. diff --git a/website/pages/uk/network/overview.mdx b/website/pages/uk/network/overview.mdx index bee546908372..ca85a9f5e322 100644 --- a/website/pages/uk/network/overview.mdx +++ b/website/pages/uk/network/overview.mdx @@ -1,15 +1,15 @@ --- -title: Network Overview +title: Загальний огляд мережі --- -The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. +Graph Network - це протокол децентралізованої індексації для організації даних блокчейну. Додатки використовують GraphQL для запитів до відкритих API, які називаються підграфами, щоб отримати дані, які індексуються в мережі. За допомогою The Graph розробники можуть створювати без серверні додатки, які повністю працюють на загальнодоступній інфраструктурі. -## Overview +## Короткий огляд -The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. +Graph Network складається з індексаторів, кураторів та делегатів, які надають послуги мережі та передають дані до web3 додатків. Споживачі використовують додатки та користуються даними. -![Token Economics](/img/Network-roles@2x.png) +![Економіка токенів](/img/Network-roles@2x.png) -To ensure economic security of The Graph Network and the integrity of data being queried, participants stake and use Graph Tokens ([GRT](/tokenomics)). GRT is a work utility token that is an ERC-20 used to allocate resources in the network. +Для забезпечення економічної безпеки The Graph Network і цілісності даних, що запитуються, учасники стейкають і використовують Graph токени ([GRT](/tokenomics)). GRT - це функціональний (utility) токен, який існує в мережі ERC-20 та використовується для розподілу ресурсів в мережі. -Active Indexers, Curators and Delegators can provide services and earn income from the network, proportional to the amount of work they perform and their GRT stake. +Активні індексатори, куратори та делегати можуть надавати послуги та отримувати дохід від мережі, пропорційний обсягу виконаної ними роботи та стейку GRT. diff --git a/website/pages/uk/new-chain-integration.mdx b/website/pages/uk/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/uk/new-chain-integration.mdx +++ b/website/pages/uk/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/uk/operating-graph-node.mdx b/website/pages/uk/operating-graph-node.mdx index 832b6cccf347..7bcb6ac9dab6 100644 --- a/website/pages/uk/operating-graph-node.mdx +++ b/website/pages/uk/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Getting started from source +### Початок роботи з базового коду -#### Install prerequisites +#### Встановіть необхідні умови - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Додаткові вимоги для користувачів Ubuntu** - Для запуску Graph Node на Ubuntu може знадобитися декілька додаткових програм. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Налаштування -1. Start a PostgreSQL database server +1. Запуск сервера бази даних PostgreSQL ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Клонуйте [Graph Node](https://github.com/graphprotocol/graph-node) репозиторій і створіть базовий код, запустивши `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Тепер, коли всі необхідні складові налаштовано, запустіть Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Порт | Призначення | Розташування | Аргумент CLI | Перемінна оточення | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | HTTP-сервер GraphQL
    (для запитів до підграфів) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-порт | - | +| 8001 | GraphQL WS
    (для підписок на підграфи) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (для керування розгортаннями) | / | --admin-port | - | +| 8030 | API стану індексації підграфів | /graphql | --index-node-port | - | +| 8040 | Метрики Prometheus | /metrics | --metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. diff --git a/website/pages/uk/publishing/publishing-a-subgraph.mdx b/website/pages/uk/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/uk/publishing/publishing-a-subgraph.mdx +++ b/website/pages/uk/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/uk/querying/distributed-systems.mdx b/website/pages/uk/querying/distributed-systems.mdx index 85337206bfd3..8c1c765f3e37 100644 --- a/website/pages/uk/querying/distributed-systems.mdx +++ b/website/pages/uk/querying/distributed-systems.mdx @@ -1,37 +1,37 @@ --- -title: Distributed Systems +title: Розподілені системи --- -The Graph is a protocol implemented as a distributed system. +The Graph - це протокол, що реалізований як розподілена система. -Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. +З'єднання не вдається. Запити поступають не за порядком. Різні комп'ютери з неузгодженими часовими та становими процесами обробляють пов'язані між собою запити. Сервери перезавантажуються. Між запитами відбуваються реорганізації. Ці проблеми притаманні всім розподіленим системам, але вони поглиблюються в системах, що працюють в глобальному масштабі. -Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. +Розглянемо на цьому прикладі, що може статися, якщо клієнт запросить в індексатора останні дані під час повторної реорганізації. -1. Indexer ingests block 8 -2. Request served to the client for block 8 -3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +1. Індексатор індексує блок 8 +2. Запит від клієнта для блоку 8 обробляється +3. Індексатор індексує блок 9 +4. Індексатор індексує блок 10А +5. Запит від клієнта для блоку 10А обробляється +6. Індексатор знаходить перерозподіл на блоці 10B і відкочується на 10A +7. Запит від клієнта для блоку 9 обробляється +8. Індексатор індексує блок 10В +9. Індексатор індексує блок 11 +10. Запит від клієнта для блоку 11 обробляється -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +З точки зору Індексатора, все розвивається цілком логічно. Час рухається вперед, хоча нам довелося відкотити старий блок назад і відтворити блок за принципом консенсусу на його основі. Протягом цього часу Індексатор обслуговує запити, використовуючи останній відомий йому актуальний статус. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +З точки зору клієнта, щоправда, все виглядає хаотично. Клієнт зауважив, що відповіді були для блоків 8, 10, 9 та 11, саме в такому порядку. Ми називаємо це проблемою "block wobble" або "коливанням блоків". Коли клієнт стикається з цим, дані можуть з часом суперечити один одному. Ситуація погіршується, якщо врахувати те, що Індексатори не індексують останні блоки одночасно разом, і ваші запити можуть бути направлені до декількох різних Індексаторів. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +Клієнт і сервер несуть відповідальність за спільну роботу з метою надання користувачеві узгоджених даних. Залежно від бажаної послідовності необхідно використовувати різні підходи, оскільки не існує єдиної правильної стратегії для розв'язання кожної проблеми. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Складно зрозуміти наслідки використання розподілених систем, але рішення може взагалі не існувати! Ми створили API та шаблони, які допоможуть вам орієнтуватися в найбільш поширених варіантах використання. Наступні приклади ілюструють ці шаблони, але все ж не розкривають деталей, необхідних у програмному коді (наприклад, обробка та усунення помилок), щоб не затьмарити основні ідеї. -## Polling for updated data +## Запит на отримання оновлених даних -The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. +The Graph надає `block: { number_gte: $minBlock }` API, який гарантує, що відповідь для одного блоку дорівнює або перевищує `$minBlock`. Якщо запит зроблено до `graph-node`, на приклад, а block min ще не синхронізовано, то `graph-node` видасть помилку. Якщо `graph-node` синхронізувала block min, то, в такому випадку, він дасть надішле відповідь для останнього блоку. Якщо запит надіслано до Edge & Node Gateway, Gateway відфільтрує всіх Індексаторів, які ще не синхронізували min block, і зробить запит на останній блок, який Індексатор синхронізував. -We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: +Ми можемо використовувати `number_gte`, щоб гарантувати, що час ніколи не рухатиметься назад під час циклічного збору даних. Ось приклад: ```javascript /// Updates the protocol.paused variable to the latest @@ -74,11 +74,11 @@ async function updateProtocolPaused() { } ``` -## Fetching a set of related items +## Отримання взаємопов'язаних елементів -Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. +Ще один випадок використання - пошук великого набору або, в більш широкому розумінні, пошук пов'язаних елементів за кількома запитами. На відміну від випадку з запитом (де необхідна узгодженість полягала в тому, щоб переміщатися вперед у часі), бажана узгодженість стосується лише одного конкретного моменту в часі. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Тут ми використаємо `block: { hash: $blockHash }` аргумент, щоб прив'язати всі наші результати до одного блоку. ```javascript /// Gets a list of domain names from a single block using pagination @@ -131,4 +131,4 @@ async function getDomainNames() { } ``` -Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. +Зауважте, що у випадку повторної операції клієнту потрібно буде повторити спробу з першого запиту, щоб оновити хеш блоку до не старого або попереднього блоку. diff --git a/website/pages/uk/querying/querying-best-practices.mdx b/website/pages/uk/querying/querying-best-practices.mdx index 98c0ffb72c61..4edc8bebf95a 100644 --- a/website/pages/uk/querying/querying-best-practices.mdx +++ b/website/pages/uk/querying/querying-best-practices.mdx @@ -1,22 +1,22 @@ --- -title: Querying Best Practices +title: Найкращі практики виконання запитів --- -The Graph provides a decentralized way to query data from blockchains. +The Graph забезпечує децентралізований спосіб запиту даних з блокчейнів. -The Graph network's data is exposed through a GraphQL API, making it easier to query data with the GraphQL language. +Дані мережі The Graph відображаються через GraphQL API, що полегшує запити даних за допомогою мови програмування GraphQL. -This page will guide you through the essential GraphQL language rules and GraphQL queries best practices. +Ця сторінка допоможе вам ознайомитися з основними правилами мови GraphQL та найкращими практиками виконання запитів в GraphQL. --- -## Querying a GraphQL API +## Запити в GraphQL API -### The anatomy of a GraphQL query +### Анатомія запитів на GraphQL -Unlike REST API, a GraphQL API is built upon a Schema that defines which queries can be performed. +На відміну від REST API, GraphQL API побудований на основі Schema, яка визначає, які запити можуть бути виконані. -For example, a query to get a token using the `token` query will look as follows: +Наприклад, запит на отримання токена за допомогою команди `token` буде виглядати наступним чином: ```graphql query GetToken($id: ID!) { @@ -27,7 +27,7 @@ query GetToken($id: ID!) { } ``` -which will return the following predictable JSON response (_when passing the proper `$id` variable value_): +який поверне наступну прогнозовану відповідь JSON (_при проходженні відповідного `$id` variable value_): ```json { @@ -38,9 +38,9 @@ which will return the following predictable JSON response (_when passing the pro } ``` -GraphQL queries use the GraphQL language, which is defined upon [a specification](https://spec.graphql.org/). +Запити GraphQL використовують мову GraphQL, яка визначається [специфікацією](https://spec.graphql.org/). -The above `GetToken` query is composed of multiple language parts (replaced below with `[...]` placeholders): +Вищенаведений запит `GetToken` складається з декількох частин мови програмування (замінено нижче на вставку `[...]`): ```graphql query [operationName]([variableName]: [variableType]) { @@ -52,33 +52,33 @@ query [operationName]([variableName]: [variableType]) { } ``` -While the list of syntactic do's and don'ts is long, here are the essential rules to keep in mind when it comes to writing GraphQL queries: +Хоча список синтаксичних "можна" і "не можна" довгий, ось основні правила, про які слід пам'ятати, коли йдеться про написання запитів в GraphQL: -- Each `queryName` must only be used once per operation. -- Each `field` must be used only once in a selection (we cannot query `id` twice under `token`) -- Some `field`s or queries (like `tokens`) return complex types that require a selection of sub-field. Not providing a selection when expected (or providing one when not expected - for example, on `id`) will raise an error. To know a field type, please refer to [The Graph Explorer](/network/explorer). -- Any variable assigned to an argument must match its type. -- In a given list of variables, each of them must be unique. -- All defined variables must be used. +- Кожне `queryName` може бути використане лише один раз за одну операцію. +- Кожне `field` може бути використане лише один раз за вибірку (ми не можемо запитувати `id` двічі `token`) +- Якийсь `field`s або запити (наприклад `tokens`) повертають складні типи, які вимагають вибору субполя. Не надання вибору, коли це очікується (або надання його, коли цього не очікували, наприклад в `id`) буде видавати помилку. Щоб дізнатися тип поля, будь ласка, зверніться до [The Graph Explorer](/network/explorer). +- Будь-яка змінна, що присвоюється параметру, повинна відповідати його типу. +- У заданому списку змінних кожна з них має бути унікальною. +- Всі задані змінні повинні бути використаними. -Failing to follow the above rules will end with an error from the Graph API. +Недотримання вищевказаних правил призведе до отримання помилки від Graph API. -For a complete list of rules with code examples, please look at our GraphQL Validations guide. +Повний список правил з прикладами коду можна знайти в нашому гайді по валідації GraphQL. -### Sending a query to a GraphQL API +### Відправлення запиту до GraphQL API -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL - це мова програмування і набір механізмів, які передаються через HTTP. -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +Це означає, що ви можете запитувати API GraphQL, використовуючи стандартні команди `fetch` (безпосередньо або через `@whatwg-node/fetch` or `isomorphic-fetch`). -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +Проте, як зазначено в ["Querying from an Application"](/querying/querying-from-an-application), ми рекомендуємо використовувати `graph-client`, який підтримує такі унікальні функції, як: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Робота з кросс-чейн підграфами: Отримання інформації з декількох підграфів за один запит +- [Автоматичне відстежування блоків](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Автоматична розбивка на сторінки](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Повністю введений результат -Here's how to query The Graph with `graph-client`: +Тут ви можете побачити, як робити запит в The Graph через `graph-client`: ```tsx import { execute } from '../.graphclient' @@ -102,17 +102,17 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +Інші альтернативні GraphQL клієнти описані в ["Querying from an Application"](/querying/querying-from-an-application). -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +Тепер, коли ми розглянули основні правила синтаксису запитів GraphQL, розгляньмо найкращі практики написання запитів в GraphQL. --- -## Writing GraphQL queries +## Написання запитів в GraphQL -### Always write static queries +### Завжди пишіть статичні запити -A common (bad) practice is to dynamically build query strings as follows: +Поширеною (негативною) практикою є динамічна побудова рядків запитів наступним чином: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +У наведеному вище фрагменті створено коректний запит до GraphQL, **він має багато недоліків**: -- it makes it **harder to understand** the query as a whole -- developers are **responsible for safely sanitizing the string interpolation** -- not sending the values of the variables as part of the request parameters **prevent possible caching on server-side** -- it **prevents tools from statically analyzing the query** (ex: Linter, or type generations tools) +- це ** ускладнює розуміння** запиту в цілому +- розробник ** відповідає за безпечне очищення інтерполяції рядків** +- не передається значення змінних у складі параметрів запиту **, щоб запобігти можливому кешуванню на стороні сервера ** +- це ** не дозволяє інструментам статично аналізувати запит ** (Linter, або інструменти для генерації типів) -For this reason, it is recommended to always write queries as static strings: +З цієї причини рекомендується завжди писати запити як статичні рядки: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +Це приносить **багато користі**: -- **Easy to read and maintain** queries -- The GraphQL **server handles variables sanitization** -- **Variables can be cached** at server-level -- **Queries can be statically analyzed by tools** (more on this in the following sections) +- **Легко читати та обслуговувати** запити +- The Graph Ql **сервер обробляє очищення змінних** +- **Змінні можуть бути кешовані** на рівні сервера +- **Запити можна статично аналізувати за допомогою інструментів** (більше про це в наступних розділах) -**Note: How to include fields conditionally in static queries** +**Примітка: Як умовно додавати поля в статичні запити** -We might want to include the `owner` field only on a particular condition. +Можливо, ми захочемо додати поле `owner` лише за певних умов. -For this, we can leverage the `@include(if:...)` directive as follows: +Для цього ми можемо використовувати директиву `@include(if:...)` наступним чином: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,21 +191,21 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +Примітка: Протилежною директивою є `@skip(if: ...)`. -### Performance tips +### Поради щодо ефективності -**"Ask for what you want"** +**"Проси, що хочеш"** -GraphQL became famous for its "Ask for what you want" tagline. +GraphQL став відомим завдяки своєму слогану "Ask what you want" ("Проси, що хочеш"). -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +З цієї причини в GraphQL не існує способу отримати всі доступні поля без необхідності виведення кожного з них окремо. -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +Запитуючи API GraphQL, завжди запитуйте тільки ті поля, які дійсно будуть використовуватися. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +Поширеною причиною надмірної вибірки є колекції об'єктів. За замовчуванням запити отримують 100 об'єктів з колекції, що зазвичай набагато більше, ніж буде використано, наприклад, для демонстрації користувачеві. Тому в запитах майже завжди слід явно встановлювати перше значення, і переконатися, що вони отримують стільки об'єктів, скільки їм насправді потрібно. Це стосується не лише колекцій верхнього рівня в запиті, але й навіть більше - відкладених колекцій об'єктів. -For example, in the following query: +Наприклад, у наступному запиті: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +Результат може містити 100 транзакцій для кожного зі 100 токенів. -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +Якщо програмі потрібно лише 10 транзакцій, у запиті слід явно задати `first: 10` у полі транзакцій. -**Combining multiple queries** +**Поєднання декількох запитів** -Your application might require querying multiple types of data as follows: +Ваша програма може потребувати запитів до декількох типів даних, як показано нижче: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +Хоча ця версія є цілком прийнятною, вона потребуватиме двох раундів звернень до API GraphQL. -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +На щастя, також допустимо надсилати декілька запитів в одному до GraphQL, як показано нижче: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +Такий підхід **покращить загальну продуктивність** за рахунок зменшення часу, витраченого на мережу (заощадить вам час на повернення до API) і забезпечить **компактнішу реалізацію**. -### Leverage GraphQL Fragments +### Використання фрагментів GraphQL -A helpful feature to write GraphQL queries is GraphQL Fragment. +Корисною функцією для написання запитів до GraphQL є GraphQL Fragment. -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +Дивлячись на наступний запит, ви помітите, що деякі поля повторюються в декількох Selection-Sets (`{ ... }`): ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +Такі поля, що повторюються (`id`, `active`, `status`) створюють багато проблем: -- harder to read for more extensive queries -- when using tools that generate TypeScript types based on queries (_more on that in the last section_), `newDelegate` and `oldDelegate` will result in two distinct inline interfaces. +- важче читається для більш розгорнутих запитів +- при використанні інструментів, які генерують типи TypeScript на основі запитів (_детальніше про це в останньому розділі_), `newDelegate` і `oldDelegate` призводять до появи двох різних вбудованих інтерфейсів. -A refactored version of the query would be the following: +Рефакторизована версія запиту виглядатиме наступним чином: ```graphql query { @@ -334,15 +334,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +Використання GraphQL `fragment` покращить зручність читання (особливо в масштабі), а також призведе до кращої генерації типів TypeScript. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +При використанні інструменту генерації типів, вищенаведений запит згенерує правильний `DelegateItemFragment` тип (_див. попередній розділ "Tools"_). -### GraphQL Fragment do's and don'ts +### Фрагмент GraphQL, що можна і що не можна робити -**Fragment base must be a type** +**База фрагменту повинна бути типом** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +Фрагмент не може ґрунтуватися на незастосовному типі, тобто на типі, що не має полів, тобто **на типі, що не має полів**: ```graphql fragment MyFragment on BigInt { @@ -350,13 +350,13 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt` - це **скалярний** (нативний "звичайний" тип), який не можна використовувати як основу фрагмента. -**How to spread a Fragment** +**Як розповсюдити Фрагмент** -Fragments are defined on specific types and should be used accordingly in queries. +Фрагменти визначені для певних типів і повинні використовуватися в запитах відповідно до цього. -Example: +Приклад: ```graphql query { @@ -377,20 +377,20 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` та `oldDelegate` відносяться до типу `Transcoder`. -It is not possible to spread a fragment of type `Vote` here. +Неможливо розповсюдити фрагмент типу `Vote` тут. -**Define Fragment as an atomic business unit of data** +**Визначте фрагмент як атомну бізнес-одиницю даних** -GraphQL Fragment must be defined based on their usage. +Фрагменти GraphQL повинні бути заданими на основі їх використання. -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +Для більшості випадків використання достатньо визначити один фрагмент для кожного типу (у випадку повторного використання полів або генерації типів). -Here is a rule of thumb for using Fragment: +Ось практичне правило використання Фрагмента: -- when fields of the same type are repeated in a query, group them in a Fragment -- when similar but not the same fields are repeated, create multiple fragments, ex: +- коли поля одного типу повторюються в запиті, згрупуйте їх у Фрагмент +- коли повторюються схожі, але не однакові поля, створіть кілька фрагментів, наприклад: ```graphql # base fragment (mostly used in listing) @@ -413,51 +413,51 @@ fragment VoteWithPoll on Vote { --- -## The essential tools +## Необхідні інструменти ### GraphQL web-based explorers -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +Виконання запитів у вашому додатку може бути трудомістким. Саме тому, не соромтеся використовувати [The Graph Explorer](https://thegraph.com/explorer), щоб протестувати свої запити, перш ніж додавати їх до додатка. The Graph Explorer надасть вам попередньо сконфігурований тестовий простір GraphQL для тестування ваших запитів. -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +Якщо ви шукаєте більш гнучкий спосіб для усунення помилок/тестування ваших запитів, можна використати інші подібні веб-інструменти, такі як [Altair](https://altair.sirmuel.design/) і [GraphiQL](https://graphiql-online.com/graphiql). ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +Для того, щоб відповідати вищезгаданим найкращим практикам і синтаксичним правилам, настійно рекомендується використовувати наступні робочі алгоритми та інструменти IDE. **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) допоможе вам залишатися в курсі найкращих практик GraphQL без зайвих зусиль. -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +[Налаштування "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) забезпечить виконання важливих правил, таких як: -- `@graphql-eslint/fields-on-correct-type`: is a field used on a proper type? -- `@graphql-eslint/no-unused variables`: should a given variable stay unused? -- and more! +- `@graphql-eslint/fields-on-correct-type`: чи використовується поле належного типу? +- `@graphql-eslint/no-unused variables`: чи повинна дана змінна залишатися невикористаною? +- та більше! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +Це дозволить вам **помічати помилки, навіть не тестуючи запити** в тестовому просторі або запускати їх у якості продукту! -### IDE plugins +### Плагіни IDE -**VSCode and GraphQL** +**VSCode і GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +[GraphQL VSCode розширення](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) є чудовим доповненням до вашого процесу розробки, щоб отримати: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets -- go to definition for fragments and input types +- виділення синтаксису +- автозаповнення пропозицій +- валідацію за схемою +- фрагменти +- перехід до визначення для фрагментів і типів вхідних даних -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +Якщо ви використовуєте `graphql-eslint`, [ESLint VSCode розширення](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint), що вкрай необхідне для правильної візуалізації помилок та попереджень, закладених у вашому коді. -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij і GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) значно покращить ваш досвід роботи з GraphQL, надавши: -- syntax highlighting -- autocomplete suggestions -- validation against schema -- snippets +- виділення синтаксису +- автозаповнення пропозицій +- валідацію за схемою +- фрагменти -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +Більше інформації можна знайти тут [у статті від WebStorm](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/), де продемонстровано всі основні функції плагіна. diff --git a/website/pages/uk/querying/querying-from-an-application.mdx b/website/pages/uk/querying/querying-from-an-application.mdx index 30b6c2264d64..90e71e286935 100644 --- a/website/pages/uk/querying/querying-from-an-application.mdx +++ b/website/pages/uk/querying/querying-from-an-application.mdx @@ -1,10 +1,10 @@ --- -title: Querying from an Application +title: Отримання запиту з додатка --- -Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: +Після розгортання підграфа у Subgraph Studio або The Graph Explorer, ви отримаєте кінцеву точку для вашого GraphQL API, яка має виглядати приблизно так: -**Subgraph Studio (testing endpoint)** +**Subgraph Studio (тестування кінцевої точки)** ```sh Queries (HTTP) @@ -18,26 +18,26 @@ Queries (HTTP) https://gateway.thegraph.com/api//subgraphs/id/ ``` -Using the GraphQL endpoint, you can use various GraphQL Client libraries to query the subgraph and populate your app with the data indexed by the subgraph. +Використовуючи кінцеву точку GraphQL, ви можете використовувати різні клієнтські бібліотеки GraphQL для запитів до підграфа і заповнення вашого додатка даними, проіндексованими підграфом. -Here are a couple of the more popular GraphQL clients in the ecosystem and how to use them: +Ось декілька найпопулярніших GraphQL-клієнтів в екосистемі та способи їх використання: ## GraphQL clients ### Graph client -The Graph is providing it own GraphQL client, `graph-client` that supports unique features such as: +Graph надає власний GraphQL client, `graph-client`, який підтримує унікальні можливості, такі як: -- Cross-chain Subgraph Handling: Querying from multiple subgraphs in a single query -- [Automatic Block Tracking](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) -- [Automatic Pagination](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) -- Fully typed result +- Робота з кросс-чейн підграфами: Отримання інформації з декількох підграфів за один запит +- [Автоматичне відстежування блоків](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) +- [Автоматична розбивка на сторінки](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) +- Повністю введений результат -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +Окрім того, інтегрований з популярними GraphQL-клієнтами, такими як Apollo та URQL, і сумісний з усіма середовищами (React, Angular, Node.js, React Native), використання `graph-client` дасть вам найкращий досвід взаємодії з The Graph. -Let's look at how to fetch data from a subgraph with `graphql-client`. +Розгляньмо, як отримати дані з сабграфа за допомогою `graphql-client`. -To get started, make sure to install The Graph Client CLI in your project: +Щоб почати, переконайтеся, що ви встановили The Graph Client CLI у вашому проєкті: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +Опишіть ваш запит у файлі `.graphql` (або вставленому у ваш файл `.js` або `.ts`): ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +Потім створіть файл конфігурації (з назвою `.graphclientrc.yml`) і вкажіть у ньому кінцеві точки GraphQL, запропоновані The Graph, до прикладу: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +Виконання наступної команди The Graph Client CLI згенерує введений і готовий до використання JavaScript код: ```sh graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +Наостанок, оновіть ваш файл `.ts`, щоб використовувати згенеровані типізовані документи GraphQL: ```tsx import React, { useEffect } from 'react' @@ -134,33 +134,33 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ Важливе зауваження** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client` чудово інтегрований з іншими GraphQL-клієнтами, такими як Apollo client, URQL або React Query; ви можете знайти [ приклади в офіційному репозиторії](https://github.com/graphprotocol/graph-client/tree/main/examples). -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +Однак, якщо ви вирішите використовувати інший клієнт, майте на увазі, що **ви не зможете використовувати крос-чейн обробку підграфів або Automatic Pagination на сторінки, які є основними функціями для запитів до The Graph**. ### Apollo client -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[Apollo client](https://www.apollographql.com/docs/) - це розповсюджений клієнт GraphQL у фронт-енд екосистемі. -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +Доступний для React, Angular, Vue, Ember, iOS та Android, Apollo Client, хоча і є найважчим клієнтом, надає багато можливостей для створення розширеного інтерфейсу користувача поверх GraphQL: -- advanced error handling -- pagination -- data prefetching -- optimistic UI -- local state management +- покращена система обробки помилок +- пагінація +- попередня вибірка даних +- оптимістичний інтерфейс користувача +- місцеве публічне керування -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +Розгляньмо, як отримати дані з підграфа за допомогою клієнта Apollo у вебпроєкті. -First, install `@apollo/client` and `graphql`: +Спочатку, установіть `@apollo/client` і `graphql`: ```sh npm install @apollo/client graphql ``` -Then you can query the API with the following code: +Після цього ви можете зробити запит до API за допомогою наступного коду: ```javascript import { ApolloClient, InMemoryCache, gql } from '@apollo/client' @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +Щоб використовувати змінні, ви можете вказати в запиті аргумент `variables`: ```javascript const tokensQuery = ` @@ -226,22 +226,22 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +Іншим варіантом є [URQL](https://formidable.com/open-source/urql/), який доступний у середовищах Node.js, React/Preact, Vue та Svelte, з більш розширеними можливостями: -- Flexible cache system -- Extensible design (easing adding new capabilities on top of it) -- Lightweight bundle (~5x lighter than Apollo Client) -- Support for file uploads and offline mode +- Гнучка кеш-система +- Динамічний дизайн (полегшує додавання нових функцій) +- Полегшений варіант (приблизно в 5 разів легший за Apollo Client) +- Підтримка завантаження файлів та офлайн-режиму -Let's look at how to fetch data from a subgraph with URQL in a web project. +Розгляньмо, як отримати дані з підграфа за допомогою URQL у вебпроєкті. -First, install `urql` and `graphql`: +Для початку, установіть `urql` та `graphql`: ```sh npm install urql graphql ``` -Then you can query the API with the following code: +Після цього ви можете зробити запит до API за допомогою наступного коду: ```javascript import { createClient } from 'urql' diff --git a/website/pages/uk/querying/querying-the-hosted-service.mdx b/website/pages/uk/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/uk/querying/querying-the-hosted-service.mdx +++ b/website/pages/uk/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/uk/querying/querying-with-python.mdx b/website/pages/uk/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/uk/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/uk/quick-start.mdx b/website/pages/uk/quick-start.mdx new file mode 100644 index 000000000000..adfb2ad62a6a --- /dev/null +++ b/website/pages/uk/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Швидкий старт +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +Цей покроковий посібник написаний з урахуванням того, що у вас уже є: + +- Адреса смартконтракту в мережі, яку ви обрали +- GRT для того, щоб використати для подачі сигналу на підграф, у якості куратора +- Криптогаманець + +## 1. Створення підграфа в Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Як тільки ви це зробите, можете починати, натиснувши кнопку "create a subgraph" Оберіть ту мережу, з якою бажаєте працювати та натисніть продовжити. + +## 2. Встановлення Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +На вашому локальному комп'ютері запустіть одну з наведених нижче команд: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +Коли ви ініціалізуєте ваш підграф, CLI інструмент запитає вас про таку інформацію: + +- Протокол: виберіть протокол, з якого ваш підграф буде індексувати дані +- Підграф мітка: створіть ім'я для вашого підграфа. Ваша підграф мітка є ідентифікатором для вашого підграфа. +- Директорія для створення підграфа в ній: оберіть вашу локальну директорію +- Мережа Ethereum (необов'язково): можливо, вам потрібно буде вказати, з якої EVM-сумісної мережі ваш підграф буде індексувати дані +- Адреса контракту: Вкажіть адресу смарт-контракту, з якого ви хочете запитувати дані +- ABI: Якщо ABI не заповнюється автоматично, вам потрібно буде ввести його вручну у вигляді JSON-файлу +- Стартовий блок: рекомендується вказати стартовий блок, щоб заощадити час, поки ваш підграф індексує дані з блокчейну. Ви можете знайти стартовий блок, знайшовши блок, де був розгорнутий ваш контракт. +- Назва контракту: введіть назву вашого контракту +- Індексація подій контракту у якості елементів: рекомендується встановити значення true, оскільки це автоматично додасть відповідність вашого підграфа для кожної виданої події +- Додання ще одного контракту (необов'язково): ви можете додати ще один контракт + +Ініціалізуйте ваш підграф з наявного контракту, виконавши наступну команду: + +```sh +graph init --studio +``` + +На наступному скриншоті ви можете побачити, чого варто очікувати при ініціалізації вашого підграфа: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Написання вашого підграфа + +Попередні команди створюють так званий "скелет" підграфа, який ви можете використовувати як відправну точку для побудови вашого підграфа. При внесенні змін до підграфа ви будете працювати переважно з трьома файлами: + +- Маніфест (subgraph.yaml) - Маніфест визначає, які джерела даних будуть індексуватися вашими підграфами. +- Схема (schema.graphql) - схема The GraphQL визначає, які дані ви хочете отримати з підграфа. +- AssemblyScript Mappings (mapping.ts) - Це код, який транслює дані з ваших джерел даних до елементів, визначених у схемі. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Розгортання в Subgraph Studio + +Як тільки ваш підграф буде написаний, виконайте наступні команди: + +```sh +$ graph codegen +$ graph build +``` + +- Автентифікуйте та розгорніть ваш підграф. Ключ для розгортання можна знайти на сторінці підграфа у Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Тестування вашого підграфа + +Ви можете протестувати свій підграф, зробивши зразок запиту в розділі "playground" (середовище для тестування). + +Журнали покажуть вам, чи є якісь помилки у вашому підграфі. Журнал робочого підграфа матиме такий вигляд: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Публікація вашого підграфа в децентралізованій мережі The Graph + +Після того, як ваш підграф буде розгорнуто в Subgraph Studio, протестовано і ви будете готові запустити його у роботу, ви можете опублікувати його в децентралізованій мережі. + +У Subgraph Studio натисніть на ваш підграф. На сторінці підграфа ви зможете натиснути кнопку "publish" у верхньому правому куті. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Перш ніж ви зможете запитувати ваш підграф, індексатори повинні почати обслуговувати запити до нього. Щоб спростити цей процес, ви можете надіслати сигнал на власний підграф за допомогою токенів GRT. + +На момент написання статті рекомендується використовувати для подання сигналу щонайменше 10 000 GRT, щоб переконатися, що ваш підграф буде проіндексований і доступний для запитів якнайшвидше. + +Щоб заощадити на витратах на газ, ви можете надіслати сигнал на власний підграф у тій самій транзакції, в якій ви його опублікували, вибравши цю функцію під час публікації підграфа в децентралізованій мережі The Graph: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Запит до вашого підграфа + +Тепер ви можете запитувати ваш підграф, надсилаючи GraphQL-запити на URL-адресу запиту вашого підграфа, яку ви можете знайти, натиснувши на кнопку запиту. + +Якщо у вас немає ключа API, ви можете робити запити з вашого додатка через безплатну тимчасову URL-адресу запиту з обмеженим тарифом, яку можна використовувати для розробки та тестування. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/uk/substreams.mdx b/website/pages/uk/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/uk/substreams.mdx +++ b/website/pages/uk/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/uk/sunrise.mdx b/website/pages/uk/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/uk/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/uk/tokenomics.mdx b/website/pages/uk/tokenomics.mdx index c62199302ebc..afb0f5d46943 100644 --- a/website/pages/uk/tokenomics.mdx +++ b/website/pages/uk/tokenomics.mdx @@ -11,7 +11,7 @@ Graph - це децентралізований протокол, який за Це схоже на модель B2B2C, за винятком того, що вона працює на основі децентралізованої мережі користувачів. Користувачі мережі працюють разом, щоб надавати дані кінцевим користувачам в обмін на винагороду в токенах GRT. GRT - це робочий ютіліті токен, який координує постачальників і споживачів даних. GRT дає можливість для координації постачальників і споживачів даних у мережі та стимулює учасників протоколу до ефективної організації даних. -Використовуючи The Graph, користувачі з легкістю можуть отримати доступ до даних блокчейну, оплачуючи виключно якусь специфічну інформацію, яка їм потрібна. Сьогодні The Graph використовується багатьма [популярними додатками](https://thegraph.com/explorer) в веб-3 екосистемі. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph індексує дані блокчейну подібно до того, як Google індексує дані в інтернеті. Мало того, ви вже могли використовувати The Graph, навіть не розуміючи цього. Якщо ви розглядали інтерфейс децентралізованого застосунку, який отримує дані з підграфа, то ви автоматично зробили запит на дані з підграфа! @@ -75,7 +75,7 @@ Fishermen та Arbitrators також є невіднятною частиною Індексатори можуть заробляти GRT двома способами: -1. Комісія за запити: GRT, що сплачується розробниками або користувачами за запити даних до підграфів. Ця плата вноситься в компенсаційний пул і згодом розподіляється між індексаторами. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Винагороди за індексування: 3% щорічної емісії токенів розподіляється серед індексаторів на основі кількості підграфів, які вони індексують в той момент. Ці винагороди стимулюють індексаторів індексувати підграфи, іноді навіть до того, як почне стягуватися комісія за запити, нараховувати та подавати підтвердження індексування (POI), які підтверджують, що вони точно проіндексували дані. diff --git a/website/pages/ur/arbitrum/arbitrum-faq.mdx b/website/pages/ur/arbitrum/arbitrum-faq.mdx index d16ce3af8eb3..cf04e2f7ce74 100644 --- a/website/pages/ur/arbitrum/arbitrum-faq.mdx +++ b/website/pages/ur/arbitrum/arbitrum-faq.mdx @@ -41,7 +41,7 @@ L2 پر گراف استعمال کرنے کا فائدہ اٹھانے کے لی ## اگر میں L2 پر نیٹ ورک میں حصہ لینا چاہتا ہوں تو مجھے کیا کرنا چاہیے؟ -براہ کرم L2 پر [نیٹ ورک کی جانچ](https://testnet.thegraph.com/explorer) میں مدد کریں اور [ڈسکورڈ](https://discord.gg/graphprotocol) میں اپنے تجربے کے بارے میں تاثرات کی اطلاع دیں۔ +براہ کرم L2 پر [نیٹ ورک](https://testnet.thegraph.com/explorer) کی جانچ کرنے میں مدد کریں اور [Discord](https://discord.gg/graphprotocol) میں اپنے تجربے کے بارے میں تاثرات کی اطلاع دیں. ## کیا نیٹ ورک کو L2 کرنے سے متعلق کوئی خطرہ ہے؟ diff --git a/website/pages/ur/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/ur/arbitrum/l2-transfer-tools-faq.mdx index 726144dc83ea..eb1a3cd20a2c 100644 --- a/website/pages/ur/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/ur/arbitrum/l2-transfer-tools-faq.mdx @@ -3,253 +3,331 @@ title: | L2 ٹرانسفر ٹولز اکثر پوچھے گئے سوالات --- -> L2 ٹرانسفر ٹولز ابھی تک جاری نہیں کیے گئے ہیں۔ ان کے 2023 کے موسم گرما میں دستیاب ہونے کی امید ہے. +## General -## L2 ٹرانسفر ٹولز کیا ہیں؟ +### L2 ٹرانسفر ٹولز کیا ہیں؟ -گراف نے Arbitrum One میں پروٹوکول کو تعینات کرکے شراکت داروں کے لیے نیٹ ورک میں حصہ لینے کے لیے اسے 26 گنا سستا کر دیا ہے۔ L2 ٹرانسفر ٹولز کو کور ڈویلپرز نے L2 میں منتقل کرنا آسان بنانے کے لیے بنایا تھا۔ ہر پروٹوکول شریک کے لیے، منتقلی کے مددگاروں کے ایک سیٹ کا اشتراک کیا جائے گا تاکہ تجربہ کو ہموار بنانے کے لیے L2 میں منتقل ہونے، پگھلنے کے وقفوں سے گریز کرنے یا GRT کو دستی طور پر واپس لینے اور بریج کرنے کے لیے۔ یہ ٹولز آپ سے اس بات پر منحصر ہوں گے کہ آپ کا کردار گراف میں کیا ہے اور آپ L2 میں کیا منتقل کر رہے ہیں. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## کیا میں وہی والیٹ استعمال کر سکتا ہوں جو میں ایتھیریم مین نیٹ پر استعمال کرتا ہوں؟ +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### کیا میں وہی والیٹ استعمال کر سکتا ہوں جو میں ایتھیریم مین نیٹ پر استعمال کرتا ہوں؟ اگر آپ ایک [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) والیٹ استعمال کر رہے ہیں تو آپ وہی ایڈریس استعمال کر سکتے ہیں۔ اگر آپ کا ایتھیریم مین نیٹ والیٹ ایک کنٹریکٹ ہے (مثلاً ایک ملٹی سگ) تو آپ کو [آربٹرم والیٹ ایڈریس](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) کی وضاحت کرنا ہوگی جہاں آپ کی منتقلی بھیجی جائے گی۔ براہ کرم ایڈریس کو احتیاط سے چیک کریں کیونکہ کسی بھی غلط ایڈریس پر منتقلی کے نتیجے میں مستقل نقصان ہو سکتا ہے۔ اگر آپ L2 پر ملٹی سگ استعمال کرنا چاہتے ہیں، تو یقینی بنائیں کہ آپ Arbitrum One پر ملٹی سگ کنٹریکٹ تعینات کرتے ہیں. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### اگر میں 7 دن میں اپنی منتقلی مکمل نہیں کر پاتا تو کیا ہو گا؟ + +L2 ٹرانسفر ٹول L1 کو پیغامات بھیجنے کے لیے Arbitrum کا مقامی طریقہ استعمال کرتے ہیں۔ اس طریقہ کار کو "ریٹری ایبل ٹکٹ" کہا جاتا ہے اور اس کا استعمال تمام مقامی ٹوکن برجز بشمول Arbitrum GRT بریج کے ذریعے کیا جاتا ہے۔ آپ دوبارہ قابل کوشش ٹکٹوں کے بارے میں مزید پڑھ سکتے ہیں [Arbitrum دستاویزات](https://docs.arbitrum.io/arbos/l1-to-l2-messaging) میں۔ + +جب آپ اپنے اثاثے (سب گراف، سٹیک، ڈیلیگیشن یا کیوریشن) L2 پر منتقل کرتے ہیں، تو Arbitrum GRT بریج کے ذریعے ایک پیغام بھیجا جاتا ہے جو L2 میں دوبارہ ریٹری ایبل ٹکٹ بناتا ہے۔ ٹرانسفر ٹول میں ٹرانزیکشن میں کچھ ایتھیریم ویلیو شامل ہوتی ہے، جس کا استعمال 1) ٹکٹ بنانے کے لیے ادائیگی اور 2) L2 میں ٹکٹ کو انجام دینے کے لیے گیس کی ادائیگی کے لیے کیا جاتا ہے۔ تاہم، چونکہ L2 میں ٹکٹ کے مکمل ہونے کے لیے تیار ہونے تک گیس کی قیمتیں مختلف ہو سکتی ہیں، اس لیے یہ ممکن ہے کہ خودکار طریقے سے عمل درآمد کی یہ کوشش ناکام ہو جائے۔ جب ایسا ہوتا ہے، تو Arbitrum بریج دوبارہ کوشش کے قابل ٹکٹ کو 7 دنوں تک زندہ رکھے گا، اور کوئی بھی ٹکٹ کو "چھڑانے" کی دوبارہ کوشش کر سکتا ہے (جس کے لیے Arbitrum کے لیے کچھ ایتھیریم والے والیٹ کی ضرورت ہوتی ہے)۔ + +اسے ہم منتقلی کے تمام ٹولز میں "تصدیق" مرحلہ کہتے ہیں - یہ زیادہ تر معاملات میں خود بخود چلے گا، کیونکہ خود کار طریقے سے عمل اکثر کامیاب ہوتا ہے، لیکن یہ ضروری ہے کہ آپ اس بات کو یقینی بنانے کے لیے دوبارہ چیک کریں۔ اگر یہ کامیاب نہیں ہوتا ہے اور 7 دنوں میں کوئی کامیاب کوشش نہیں ہوتی ہے، تو Arbitrum بریج ٹکٹ کو رد کر دے گا، اور آپ کے اثاثے (سب گراف، سٹیک، ڈیلیگیشن یا کیوریشن) ضائع ہو جائیں گے اور بازیافت نہیں ہو سکیں گے۔ گراف کور ڈویلپرز کے پاس ان حالات کا پتہ لگانے کے لیے ایک نگرانی کا نظام موجود ہے اور بہت دیر ہونے سے پہلے ٹکٹوں کو چھڑانے کی کوشش کریں، لیکن یہ یقینی بنانا آپ کی ذمہ داری ہے کہ آپ کی منتقلی بروقت مکمل ہو جائے۔ اگر آپ کو اپنے ٹرانزیکشن کی تصدیق کرنے میں دشواری ہو رہی ہے، تو براہ کرم [اس فارم](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) اور کور ڈویلپرز کا استعمال کرتے ہوئے رابطہ کریں۔ وہاں آپ کی مدد ہو گی. + +### میں نے اپنا ڈیلیگیشن/سٹیک/کیوریشن کی منتقلی شروع کی ہے اور مجھے یقین نہیں ہے کہ آیا یہ L2 تک پہنچا ہے، میں کیسے تصدیق کر سکتا ہوں کہ اسے صحیح طریقے سے منتقل کیا گیا تھا؟ + +اگر آپ کو اپنے پروفائل پر کوئی بینر نظر نہیں آتا ہے جس میں آپ سے منتقلی مکمل کرنے کے لیے کہا گیا ہو، تو امکان ہے کہ ٹرانزیکشن نے اسے محفوظ طریقے سے L2 تک پہنچا دیا ہے اور مزید کارروائی کی ضرورت نہیں ہے۔ اگر شک ہو تو، آپ چیک کر سکتے ہیں کہ آیا ایکسپلورر آپ کی ڈیلیگیشن، سٹیک یا کیوریشن Arbitrum One پر دکھاتا ہے۔ + +اگر آپ کے پاس L1 ٹرانزیکشن ہیش ہے (جسے آپ اپنے والیٹ میں حالیہ ٹرانزیکشن کو دیکھ کر تلاش کر سکتے ہیں)، تو آپ اس بات کی بھی تصدیق کر سکتے ہیں کہ آیا L2 پر پیغام پہنچانے والے "دوبارہ کوشش کے قابل ٹکٹ" کو یہاں سے چھڑا لیا گیا تھا: https://retryable-dashboard .arbitrum.io/ - اگر خودکار نکالنا ناکام ہو جاتا ہے، تو آپ اپنے والیٹا کو وہاں بھی کنیکٹ کر کے اسے نکال سکتے ہیں۔ یقین رکھیں کہ بنیادی ڈویلپرز ان پیغامات کی بھی نگرانی کر رہے ہیں جو پھنس گئے ہیں، اور ان کی میعاد ختم ہونے سے پہلے انہیں نکالنے کی کوشش کریں گے۔ + ## سب گراف منتقلی -## ایتھیریم میں اپنے سب گراف کو کیسے منتقل کروں؟ +### میں اپنا سب گراف کیسے منتقل کروں؟ + + -اپنے سب گراف کو منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہوگی: +اپنے سب گراف کو منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: 1. ایتھیریم مین نیٹ پر منتقلی شروع کریں 2. تصدیق کے لیے 20 منٹ انتظار کریں -3. Arbitrum پر سب گراف ٹرانسفر کی تصدیق کریں\* +3. Arbitrum پر سب گراف منتقلی کی تصدیق کریں\* 4. Arbitrum پر سب گراف کی اشاعت مکمل کریں -5. کیوری URL کو اپ ڈیٹ کریں (تجویز کردہ) +5. کیوری لنک اپ ڈیٹ کریں (تجویز کردہ) -\*نوٹ کریں کہ آپ کو 7 دنوں کے اندر منتقلی کی تصدیق کرنی چاہیے ورنہ آپ کا سب گراف ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [Discord](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں. +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## مجھے اپنی منتقلی کہاں سے شروع کرنی چاہیے؟ +### مجھے اپنی منتقلی کہاں سے شروع کرنی چاہیے؟ -آپ اپنی منتقلی کو [سب گراف اسٹوڈیو](https://thegraph.com/studio/)، [Explorer,](https://thegraph.com/explorer) یا کسی بھی سب گراف کی تفصیلات کے صفحہ سے شروع کر سکتے ہیں۔ منتقلی شروع کرنے کے لیے سب گراف کی تفصیلات کے صفحہ میں "منتقلی سب گراف" بٹن پر کلک کریں. +آپ اپنی منتقلی کو [سب گراف سٹوڈیو](https://thegraph.com/studio/)، [ایکسپلورر](https://thegraph.com/explorer) یا کسی بھی سب گراف کی تفصیلات کے پیج سے شروع کر سکتے ہیں۔ منتقلی شروع کرنے کے لیے سب گراف کی تفصیلات کے پیج میں "سب گراف منتقل کریں" بٹن کلک کریں۔ -## میرا سب گراف منتقل ہونے تک مجھے کتنا انتظار کرنا پڑے گا +### میرا سب گراف منتقل ہونے تک مجھے کتنا انتظار کرنا پڑے گا منتقلی کا وقت تقریباً 20 منٹ لگتا ہے۔ Arbitrum بریج کی منتقلی کو خود بخود مکمل کرنے کے لیے پس منظر میں کام کر رہا ہے۔ کچھ معاملات میں، گیس کی قیمتیں بڑھ سکتی ہیں اور آپ کو دوبارہ ٹرانزیکشن کی تصدیق کرنی ہوگی. -## کیا میرا سب گراف L2 میں منتقل کرنے کے بعد بھی قابل دریافت ہوگا؟ +### کیا میرا سب گراف L2 میں منتقل کرنے کے بعد بھی قابل دریافت ہو گا؟ -آپ کا سب گراف صرف اس نیٹ ورک پر قابل دریافت ہوگا جس پر اسے شائع کیا گیا ہے۔ مثال کے طور پر، اگر آپ کا سب گراف Arbitrum One پر ہے، تو آپ اسے صرف Arbitrum One پر ایکسپلورر میں تلاش کر سکتے ہیں اور اسے ایتھیریم پر تلاش نہیں کر پائیں گے۔ براہ کرم یقینی بنائیں کہ آپ نے نیٹ ورک سوئچر میں صفحہ کے اوپری حصے میں Arbitrum One کا انتخاب کیا ہے تاکہ یہ یقینی بنایا جا سکے کہ آپ درست نیٹ ورک پر ہیں۔ منتقلی کے بعد، L1 سب گراف فرسودہ کے طور پر ظاہر ہوگا. +آپ کا سب گراف صرف اس نیٹ ورک پر قابل دریافت ہوگا جس پر اسے شائع کیا گیا ہے۔ مثال کے طور پر، اگر آپ کا سب گراف Arbitrum One پر ہے، تو آپ اسے صرف Arbitrum One پر ایکسپلورر میں تلاش کر سکتے ہیں اور اسے ایتھیریم پر تلاش نہیں کر پائیں گے۔ براہ کرم یقینی بنائیں کہ آپ نے نیٹ ورک سوئچر میں پیج کے اوپری حصے میں Arbitrum One کا انتخاب کیا ہے تاکہ یہ یقینی بنایا جا سکے کہ آپ درست نیٹ ورک پر ہیں۔ منتقلی کے بعد، L1 سب گراف فرسودہ کے طور پر ظاہر ہوگا. -## کیا میرے سب گراف کو منتقل کرنے کے لیے اسے شائع کرنے کی ضرورت ہے؟ +### کیا میرے سب گراف کو منتقل کرنے کے لیے اسے شائع کرنے کی ضرورت ہے؟ سب گراف ٹرانسفر ٹول سے فائدہ اٹھانے کے لیے، آپ کا سب گراف پہلے سے ہی ایتھیریم مین نیٹ پر شائع ہونا چاہیے اور اس میں کچھ کیوریشن سگنل ہونا چاہیے جو والیٹ کی ملکیت ہے جو سب گراف کا مالک ہے۔ اگر آپ کا سب گراف شائع نہیں ہوا ہے، تو یہ تجویز کیا جاتا ہے کہ آپ براہ راست Arbitrum One پر شائع کریں - متعلقہ گیس کی فیسیں کافی کم ہوں گی۔ اگر آپ شائع شدہ سب گراف کو منتقل کرنا چاہتے ہیں لیکن مالک کے اکاؤنٹ نے اس پر کوئی سگنل کیوریٹ نہیں کیا ہے، تو آپ اس اکاؤنٹ سے ایک چھوٹی رقم (جیسے 1 GRT) کا اشارہ دے سکتے ہیں۔ یقینی بنائیں کہ "خودکار منتقلی" سگنل کا انتخاب کریں. -## میرے سب گراف کے ایتھیریم مین نیٹ ورژن کا کیا ہوتا ہے جب میں Arbitrum میں منتقل ہو جاتا ہوں؟ +### میرے سب گراف کے ایتھیریم مین نیٹ ورزن کا کیا ہوتا ہے جب میں Arbitrum میں منتقل ہو جاتا ہوں؟ -آپ کے سب گراف کو Arbitrum میں منتقل کرنے کے بعد، ایتھیریم مین نیٹ ورژن فرسودہ ہو جائے گا۔ ہمارا مشورہ ہے کہ آپ 48 گھنٹوں کے اندر اپنے کیوری کے URL کو اپ ڈیٹ کریں۔ تاہم، ایک رعایتی مدت موجود ہے جو آپ کے مین نیٹ یو آر ایل کو کام کرتی رہتی ہے تاکہ کسی بھی فریق ثالث ڈی اے پی سپورٹ کو اپ ڈیٹ کیا جا سکے. +آپ کے سب گراف کو Arbitrum میں منتقل کرنے کے بعد، ایتھیریم مین نیٹ ورزن فرسودہ ہو جائے گا۔ ہمارا مشورہ ہے کہ آپ 48 گھنٹوں کے اندر اپنے کیوری کے لنک کو اپ ڈیٹ کریں۔ تاہم، ایک رعایتی مدت موجود ہے جو آپ کے مین نیٹ لنک کو کام میں لاتی رہتی ہے تاکہ کسی بھی فریق ثالث ڈیپ سپورٹ کو اپ ڈیٹ کیا جا سکے۔ -## میرے منتقلی کے بعد، کیا مجھے بھی Arbitrum پر دوبارہ شائع کرنے کی ضرورت ہے؟ +### میری منتقلی کے بعد، کیا مجھے بھی Arbitrum پر دوبارہ شائع کرنے کی ضرورت ہے؟ -20 منٹ کی ٹرانسفر ونڈو کے بعد، آپ کو ٹرانسفر کو ختم کرنے کے لیے UI میں ٹرانزیکشن کے ساتھ ٹرانسفر کی تصدیق کرنی ہوگی، لیکن ٹرانسفر ٹول اس میں آپ کی رہنمائی کرے گا۔ آپ کا L1 اینڈ پوائنٹ ٹرانسفر ونڈو اور اس کے بعد ایک رعایتی مدت کے دوران تعاون کرتا رہے گا۔ یہ حوصلہ افزائی کی جاتی ہے کہ جب آپ کے لیے آسان ہو تو آپ اپنا اختتامی نقطہ اپ ڈیٹ کریں. +20 منٹ کی ٹرانسفر ونڈو کے بعد، آپ کو ٹرانسفر کو ختم کرنے کے لیے UI میں ٹرانزیکشن کے ساتھ ٹرانسفر کی تصدیق کرنی ہوگی، لیکن ٹرانسفر ٹول اس میں آپ کی رہنمائی کرے گا۔ آپ کا L1 اینڈ پوائنٹ ٹرانسفر ونڈو اور اس کے بعد ایک رعایتی مدت کے دوران تعاون کرتا رہے گا۔ یہ حوصلہ افزائی کی جاتی ہے کہ جب آپ کے لیے آسان ہو تو آپ اپنا اینڈ پوائنٹ اپ ڈیٹ کریں. -## کیا دوبارہ شائع کرنے کے دوران میرے اختتامی نقطہ پر کوئی کم وقت ہوگا؟ +### کیا دوبارہ شائع کرنے کے دوران میرا اینڈ پوائنٹ ڈاؤن ٹائم کا تجربہ کرے گا؟ -اپنے سب گراف کو L2 میں منتقل کرنے کے لیے ٹرانسفر ٹول کا استعمال کرتے وقت کوئی کم وقت نہیں ہونا چاہیے۔ آپ کا L1 اینڈ پوائنٹ ٹرانسفر ونڈو کے دوران اور اس کے بعد رعایتی مدت کے دوران تعاون کرتا رہے گا۔ یہ حوصلہ افزائی کی جاتی ہے کہ جب آپ کے لیے آسان ہو تو آپ اپنا اختتامی نقطہ اپ ڈیٹ کریں. +اس بات کا امکان نہیں ہے، لیکن اس بات پر منحصر ہے کہ انڈیکسرز L1 پر سب گراف کو سپورٹ کر رہے ہیں اور کیا وہ اس کو انڈیکس کرتے رہیں گے جب تک کہ L2 پر سب گراف مکمل طور پر سپورٹ نہ ہو جائے، مختصر وقت کا تجربہ کرنا ممکن ہے۔ -## کیا L2 پر اشاعت اور ورژن ایتھیریم ایتھیریم مین نیٹ کی طرح ہے؟ +### کیا L2 پر اشاعت اور ورزن ایتھیریم مین نیٹ کی طرح ہے؟ -جی ہاں. سب گراف اسٹوڈیو میں شائع کرتے وقت اپنے شائع شدہ نیٹ ورک کے طور پر Arbitrum One کو منتخب کرنا یقینی بنائیں۔ اسٹوڈیو میں، تازہ ترین اختتامی نقطہ دستیاب ہوگا جو سب گراف کے تازہ ترین اپ ڈیٹ شدہ ورژن کی طرف اشارہ کرتا ہے. +جی ہاں. سب گراف سٹوڈیو میں شائع کرتے وقت اپنے شائع شدہ نیٹ ورک کے طور پر Arbitrum One کو منتخب کریں۔ سٹوڈیو میں، تازہ ترین اختتامی نقطہ دستیاب ہوگا جو سب گراف کے تازہ ترین اپ ڈیٹ شدہ ورژن کی طرف اشارہ کرتا ہے۔ -## کیا میرے سب گراف کا کیوریشن میرے سب گراف کے ساتھ منتقل ہو جائے گا؟ +### کیا میرے سب گراف کا کیوریشن میرے سب گراف کے ساتھ منتقل ہو جائے گا؟ -اگر آپ نے خودکار منتقلی کے سگنل کا انتخاب کیا ہے، تو آپ کی اپنی کیوریشن کا 100% حصہ آپ کے سب گراف کے ساتھ Arbitrum One میں منتقل ہو جائے گا۔ منتقلی کے وقت سب گراف کے تمام کیوریشن سگنل کو GRT میں تبدیل کر دیا جائے گا، اور آپ کے کیوریشن سگنل کے مطابق GRT L2 سب گراف پر ٹکسال سگنل کے لیے استعمال کیا جائے گا. +اگر آپ نے خودکار منتقلی کے سگنل کا انتخاب کیا ہے، تو آپ کی اپنی کیوریشن کا 100% حصہ آپ کے سب گراف کے ساتھ Arbitrum One میں منتقل ہو جائے گا۔ منتقلی کے وقت سب گراف کے تمام کیوریشن سگنل کو GRT میں تبدیل کر دیا جائے گا، اور آپ کے کیوریشن سگنل کے مطابق GRT L2 سب گراف پر سگنل منٹ کے لیے استعمال کیا جائے گا. -دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا GRT کا اپنا حصہ واپس لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے. +دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا GRT کا اپنا حصہ واپس لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے۔ -## کیا میں منتقلی کے بعد اپنے سب گراف کو واپس ایتھیریم مین نیٹ پر منتقل کر سکتا ہوں؟ +### کیا میں منتقلی کے بعد اپنے سب گراف کو واپس ایتھیریم مین نیٹ پر منتقل کر سکتا ہوں؟ -ایک بار منتقل ہونے کے بعد، اس سب گراف کا آپ کا ایتھیریم مین نیٹ ورژن فرسودہ ہو جائے گا۔ اگر آپ مین نیٹ پر واپس جانا چاہتے ہیں، تو آپ کو مین نیٹ پر دوبارہ تعینات اور شائع کرنے کی ضرورت ہوگی۔ تاہم، ایتھیریم مین نیٹ پر واپس منتقلی کی سختی سے حوصلہ شکنی کی جاتی ہے کیونکہ انڈیکسنگ انعامات بالآخر Arbitrum One پر مکمل طور پر تقسیم کیے جائیں گے. +ایک بار منتقل ہونے کے بعد، اس سب گراف کا آپ کا ایتھیریم مین نیٹ ورزن فرسودہ ہو جائے گا۔ اگر آپ مین نیٹ پر واپس جانا چاہتے ہیں، تو آپ کو مین نیٹ پر دوبارہ تعینات اور شائع کرنے کی ضرورت ہوگی۔ تاہم، ایتھیریم مین نیٹ پر واپس منتقلی کی سختی سے حوصلہ شکنی کی جاتی ہے کیونکہ انڈیکسنگ انعامات بالآخر Arbitrum One پر مکمل طور پر تقسیم کیے جائیں گے. -## مجھے اپنا ٹرانسفر مکمل کرنے کے لیے برجڈ ETH کی ضرورت کیوں ہے؟ +### مجھے اپنی منتقلی مکمل کرنے کے لیے پریجڈ ایتھیریم کی ضرورت کیوں ہے؟ -Arbitrum One پر گیس کی فیس برجڈ ای ٹی ایچ (یعنی ای ٹی ایچ جسے Arbitrum One پر پلایا گیا ہے) کا استعمال کرتے ہوئے ادا کیا جاتا ہے۔ تاہم، ایتھیریم مینیٹ کے مقابلے میں گیس کی فیس نمایاں طور پر کم ہے. +Arbitrum One پر گیس فیس بریجڈ ایتھیریم (یعنی ایتھیریم جسے Arbitrum One پر بریج کیا گیا ہے) کا استعمال کرتے ہوئے ادا کیا جاتا ہے۔ تاہم، ایتھیریم مین نیٹ کے مقابلے میں گیس فیس نمایاں طور پر کم ہے۔ -## کیوریشن سگنل +## ڈیلیگیشن -## میں اپنا کیوریشن کیسے منتقل کروں؟ +### میں اپنی ڈیلیگیشن کیسے منتقل کر سکتا ہوں؟ -اپنا کیوریشن منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہوگی: + -1. ایتھیریم مین نیٹ پر سگنل کی منتقلی شروع کریں +اپنی ڈیلیگیشن منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: -2. L2 کیوریٹر ایڈریس کی وضاحت کریں\* +1. ایتھیریم مین نیٹ پر ڈیلیگیشن کی منتقلی شروع کریں +2. تصدیق کے لیے 20 منٹ انتظار کریں +3. Arbitrum پر ڈیلیگیشن کی منتقلی کی تصدیق کریں -3. تصدیق کے لیے 20 منٹ انتظار کریں +\*\*\*\* Arbitrum پر ڈیلیگیشن کی منتقلی کو مکمل کرنے کے لیے آپ کو ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ ڈیلیگیشن ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [ڈسکورڈ](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں۔. -"اگر ضروری ہو تو - یعنی آپ کنٹریکٹ ایڈریس استعمال کر رہے ہیں. +### اگر میں ایتھیریم مین نیٹ پر کھلی مختص کے ساتھ منتقلی شروع کرتا ہوں تو میرے انعامات کا کیا ہوگا؟ -## مجھے کیسے پتہ چلے گا کہ میں نے جو سب گراف تیار کیا ہے وہ L2 میں چلا گیا ہے؟ +اگر انڈیکسر جس کو آپ ڈیلیگیٹ کر رہے ہیں وہ اب بھی L1 پر کام کر رہا ہے، جب آپ Arbitrum میں منتقل کرتے ہیں تو آپ ایتھیریم مین نیٹ پر کھلی مختص سے کسی بھی ڈیلیگیشن کے انعامات کو ضائع کر دیں گے۔ اس کا مطلب ہے کہ آپ زیادہ سے زیادہ، آخری 28 دن کی مدت کے انعامات سے محروم ہو جائیں گے۔ اگر آپ انڈیکسر کے مختص بند ہونے کے فوراً بعد منتقلی کا وقت دیتے ہیں تو آپ یقینی بنا سکتے ہیں کہ یہ کم سے کم رقم ہے۔ اگر آپ کا اپنے انڈیکسرز کے ساتھ ایک مواصلاتی چینل ہے، تو اپنی منتقلی کے لیے بہترین وقت تلاش کرنے کے لیے ان کے ساتھ بات چیت کرنے پر غور کریں۔. -سب گراف کی تفصیلات کا صفحہ دیکھتے وقت، ایک بینر آپ کو مطلع کرے گا کہ اس سب گراف کو منتقل کر دیا گیا ہے۔ آپ اپنے کیوریشن کو منتقل کرنے کے لیے پرامپٹ پر عمل کر سکتے ہیں۔ آپ یہ معلومات کسی بھی سب گراف کے سب گراف کی تفصیلات کے صفحہ پر بھی حاصل کر سکتے ہیں جو منتقل ہوا ہے. +### اگر میں فی الحال جس انڈیکسر کو ڈیلیگیٹ کرتا ہوں وہ Arbitrum One پر نہیں ہے تو کیا ہو گا؟ -## اگر میں اپنے کیوریشن کو L2 میں منتقل نہیں کرنا چاہتا تو کیا ہوگا؟ +L2 ٹرانسفر ٹول صرف اس صورت میں فعال کیا جائے گا جب انڈیکسر جس کو آپ نے ڈیلیگیٹ کیا ہے اس نے اپنا سٹیک Arbitrum کو منتقل کر دیا ہے۔ -جب سب گراف فرسودہ ہو جاتا ہے تو آپ کے پاس اپنا سگنل واپس لینے کا اختیار ہوتا ہے۔ اسی طرح، اگر کوئی سب گراف L2 میں منتقل ہو گیا ہے، تو آپ ایتھیریم مین نیٹ میں اپنے سگنل کو واپس لینے یا L2 کو سگنل بھیجنے کا انتخاب کر سکتے ہیں. +### کیا ڈیلیگیٹرز کے پاس کسی دوسرے انڈیکسر کو ڈیلیگیٹ کرنے کا اختیار ہے؟ -## میں کیسے جان سکتا ہوں کہ میرا کیوریشن کامیابی سے منتقل ہو گیا ہے؟ +اگر آپ کسی دوسرے انڈیکسر کو ڈیلیگیٹ کرنا چاہتے ہیں، تو آپ Arbitrum پر اسی انڈیکسر کو منتقل کر سکتے ہیں، پھر ان ڈیلیگیٹ کریں اور ختم ہونے کی مدت کا انتظار کریں۔ اس کے بعد، آپ ڈیلیگیٹ کرنے کے لیے ایک اور فعال انڈیکسر منتخب کر سکتے ہیں۔ -L2 ٹرانسفر ٹول شروع ہونے کے تقریباً 20 منٹ بعد سگنل کی تفصیلات ایکسپلورر کے ذریعے قابل رسائی ہوں گی. +### کیا ہوگا اگر مجھے L2 پر انڈیکسر نہیں مل رہا جسے میں ڈیلیگیٹ کر رہا ہوں؟ -## کیا میں ایک وقت میں ایک سے زیادہ سب گراف پر اپنا کیوریشن منتقل کر سکتا ہوں؟ +L2 ٹرانسفر ٹول خود بخود اس انڈیکسر کا ایڈریس لگائے گا جسے آپ نے پہلے ڈیلیگیٹ کیا تھا. -اس وقت بلک ٹرانسفر کا کوئی آپشن نہیں ہے. +### کیا میں اپنے ڈیلیگیشن کو پہلے کے انڈیکسر کی بجائے نئے یا متعدد انڈیکسرز میں مکس اور میچ یا 'پھیلانے' کے قابل ہو جاؤں گا؟ -## انڈیکسر اسٹیک +L2 ٹرانسفر ٹول ہمیشہ آپ کے ڈیلیگیشن کو اسی انڈیکسر کی طرف لے جائے گا جسے آپ نے پہلے ڈیلیگیٹ کر دیا تھا۔ ایک بار جب آپ L2 میں منتقل ہو جائیں تو، آپ ان ڈیلیگیٹ کر سکتے ہیں، ختم ہونے کی مدت کا انتظار کر سکتے ہیں، اور فیصلہ کر سکتے ہیں کہ آیا آپ اپنے ڈیلیگیشن کو الگ کرنا چاہتے ہیں. -## میں اپنے حصص کو Arbitrum میں کیسے منتقل کروں؟ +### کیا میں کول ڈاؤن پیریڈ سے مشروط ہوں یا L2 ڈیلیگیشن ٹرانسفر ٹول استعمال کرنے کے فوراً بعد واپس لے سکتا ہوں؟ -اپنا حصہ منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہوگی: +ٹرانسفر ٹول آپ کو فوری طور پر L2 پر جانے کی اجازت دیتا ہے۔ اگر آپ ان ڈیلیگیٹ کرنا چاہتے ہیں تو آپ کو ختم ہونے کی مدت کا انتظار کرنا پڑے گا۔ تاہم، اگر کسی انڈیکسر نے اپنے تمام سٹیک L2 کو منتقل کر دیے ہیں، تو آپ ایتھیریم مین نیٹ پر فوری طور پر واپس لے سکتے ہیں. -1. ایتھیریم مین نیٹ پر حصص کی منتقلی شروع کریں +### اگر میں اپنے ڈیلیگیشن کو منتقل نہیں کرتا ہوں تو کیا میرے انعامات پر منفی اثر پڑ سکتا ہے؟ -2. تصدیق کے لیے 20 منٹ انتظار کریں +یہ متوقع ہے کہ تمام نیٹ ورک کی شرکت مستقبل میں Arbitrum One میں منتقل ہو جائے گی. -3. Arbitrum پر حصص کی منتقلی کی تصدیق کریں +### میرے ڈیلیگیشن کی L2 میں منتقلی کو مکمل کرنے میں کتنا وقت لگتا ہے؟ -\*نوٹ کریں کہ آپ کو 7 دنوں کے اندر منتقلی کی تصدیق کرنی چاہیے ورنہ آپ کا حصہ ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [Discord](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں. +ڈیلیگیشن کی منتقلی کے لیے 20 منٹ کی تصدیق درکار ہے۔ براہ کرم نوٹ کریں کہ 20 منٹ کی مدت کے بعد، آپ کو واپس آنا چاہیے اور 7 دنوں کے اندر منتقلی کے عمل کا مرحلہ 3 مکمل کرنا چاہیے۔ اگر آپ ایسا کرنے میں ناکام رہے تو آپ کا ڈیلیگیشن ضائع ہو سکتا ہے۔ نوٹ کریں کہ زیادہ تر معاملات میں ٹرانسفر ٹول آپ کے لیے یہ مرحلہ خود بخود مکمل کر دے گا۔ ناکام خودکار کوشش کی صورت میں، آپ کو اسے دستی طور پر مکمل کرنا ہوگا۔ اگر اس عمل کے دوران کوئی مسئلہ پیدا ہوتا ہے، تو پریشان نہ ہوں، ہم مدد کے لیے حاضر ہوں گے: ہم سے support@thegraph.com پر یا [ڈسکورڈ](https://discord.gg/graphprotocol) پر رابطہ کریں۔. -## کیا میرا سارا حصہ منتقل ہو جائے گا؟ +### کیا میں اپنی ڈیلیگیشن کو منتقل کر سکتا ہوں اگر میں GRT ویسٹنگ کنٹریکٹ/ٹوکن لاک والیٹ استعمال کر رہا ہوں؟ -آپ یہ منتخب کر سکتے ہیں کہ آپ کا کتنا حصہ منتقل کرنا ہے۔ اگر آپ اپنے تمام حصص کو ایک ساتھ منتقل کرنے کا انتخاب کرتے ہیں، تو آپ کو پہلے کسی بھی کھلے حصے کو بند کرنے کی ضرورت ہوگی. +جی ہاں! یہ عمل تھوڑا مختلف ہے کیونکہ ویسٹنگ کنٹریکٹ L2 گیس کی ادائیگی کے لیے درکار ایتھیریم کو آگے نہیں بھیج سکتے، اس لیے آپ کو اسے پہلے سے جمع کروانا ہوگا۔ اگر آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر محفوظ نہیں ہے، تو آپ کو پہلے L2 پر ایک ہم منصب ویسٹنگ کنٹریکٹ کو بھی شروع کرنا ہوگا اور صرف اس L2 ویسٹنگ کنٹریکٹ میں ڈیلیگیشن کو منتقل کر سکیں گے۔ جب آپ ویسٹنگ لاک والیٹ کا استعمال کرتے ہوئے ایکسپلورر سے منسلک ہوتے ہیں تو ایکسپلورر پر UI اس عمل میں آپ کی رہنمائی کر سکتا ہے. -اگر آپ متعدد ٹرانزیکشن پر اپنے حصص کے حصوں کو منتقل کرنے کا ارادہ رکھتے ہیں، تو آپ کو ہمیشہ ایک ہی فائدہ اٹھانے والے کا ایڈریس بتانا ہوگا. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -نوٹ: جب آپ پہلی بار ٹرانسفر ٹول استعمال کرتے ہیں تو آپ کو L2 پر کم از کم حصص کی ضروریات کو پورا کرنا ہوگا۔ انڈیکسرز کو کم از کم 100k GRT بھیجنا چاہیے (جب اس فنکشن کو پہلی بار کال کریں)۔ اگر L1 پر حصص کا کچھ حصہ چھوڑتے ہیں، تو یہ 100k GRT کم از کم سے زیادہ ہونا چاہیے اور آپ کی کھلی تقسیم کو پورا کرنے کے لیے کافی (آپ کے وفود کے ساتھ) ہونا چاہیے. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## مجھے اپنے حصص کی آربٹرم میں منتقلی کی تصدیق کرنے کے لیے کتنا وقت درکار ہے؟ +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* Arbitrum پر حصص کی منتقلی کو مکمل کرنے کے لیے آپ کو اپنے ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ داؤ کھو سکتا ہے. +### کیا کوئی ڈیلیگیشن ٹیکس ہے؟ -## اگر میرے پاس کھلی رقم ہے تو کیا ہوگا؟ +نہیں۔ L2 پر موصول ہونے والے ٹوکن مخصوص ڈیلیگیٹر کی جانب سے ڈیلیگیشن ٹیکس وصول کیے بغیر مخصوص انڈیکسر کو سونپے جاتے ہیں۔ -اگر آپ اپنے تمام حصص نہیں بھیج رہے ہیں تو، L2 ٹرانسفر ٹول اس بات کی توثیق کرے گا کہ کم از کم 100k GRT ایتھیریم مین نیٹ میں باقی ہے اور آپ کا باقی حصہ اور وفد کسی بھی کھلے مختص کو پورا کرنے کے لیے کافی ہے۔ اگر آپ کا GRT بیلنس کم از کم + کھلی مختصات کا احاطہ نہیں کرتا ہے تو آپ کو کھلی تقسیم بند کرنے کی ضرورت پڑسکتی ہے. +### جب میں اپنے ڈیلیگیشن کو منتقل کروں گا تو کیا میرے غیر حقیقی انعامات کو منتقل کیا جائے گا؟ -## ٹرانسفر ٹولز کا استعمال کرتے ہوئے، کیا ٹرانسفر کرنے سے پہلے ایتھیریم مین نیٹ کو ہٹانے کے لیے 28 دن انتظار کرنا ضروری ہے؟ +ہاں! صرف وہ انعامات جو منتقل نہیں کیے جاسکتے ہیں وہ کھلے مختص کے ہیں، کیونکہ وہ اس وقت تک موجود نہیں ہوں گے جب تک کہ انڈیکسر مختص کو بند نہیں کرتا (عام طور پر ہر 28 دن بعد)۔ اگر آپ تھوڑی دیر کے لیے تفویض کر رہے ہیں، تو یہ ممکنہ طور پر انعامات کا ایک چھوٹا سا حصہ ہے۔ -نہیں۔ 28 دن کا انتظار صرف اس صورت میں لاگو ہوتا ہے جب آپ ایتھیریم مینیٹ یا L2 پر اپنے والیٹ میں حصہ واپس لینا چاہتے ہیں. +سمارٹ کنٹریکٹ کی سطح پر، غیر حقیقی انعامات پہلے سے ہی آپ کے ڈیلیگیشن کے بیلنس کا حصہ ہیں، لہذا جب آپ اپنے ڈیلیگیشن کو L2 میں منتقل کریں گے تو انہیں منتقل کر دیا جائے گا۔ -## میرا حصہ منتقل کرنے میں کتنا وقت لگے گا؟ +### کیا ڈیلیگیشنز کو L2 میں منتقل کرنا لازمی ہے؟ کیا کوئی ڈیڈ لائن ہے؟ -L2 ٹرانسفر ٹول کو آپ کے حصص کی منتقلی مکمل کرنے میں تقریباً 20 منٹ لگیں گے. +ڈیلیگیشن کو L2 میں منتقل کرنا لازمی نہیں ہے، لیکن [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193) میں بیان کردہ ٹائم لائن کے بعد L2 پر انڈیکسنگ انعامات بڑھ رہے ہیں۔۔ آخرکار، اگر کونسل اضافے کو منظور کرتی رہتی ہے، تو تمام انعامات L2 میں تقسیم کیے جائیں گے اور L1 پر انڈیکس کرنے والوں اور ڈیلیگیٹرز کے لیے کوئی انڈیکسنگ انعامات نہیں ہوں گے۔ -## کیا مجھے اپنا حصہ منتقل کرنے سے پہلے Arbitrum پر انڈیکس کرنا ہوگا؟ +### اگر میں کسی ایسے انڈیکسر کو ڈیلیگیٹ کر رہا ہوں جس نے پہلے ہی سٹیک L2 کو منتقل کر دیا ہے، تو کیا میں L1 پر انعامات حاصل کرنا بند کر دوں؟ -آپ انڈیکسنگ کو ترتیب دینے سے پہلے مؤثر طریقے سے اپنا حصص منتقل کر سکتے ہیں، لیکن آپ L2 پر کسی بھی انعام کا دعویٰ نہیں کر سکیں گے جب تک کہ آپ L2 پر سب گرافس کے لیے مختص نہیں کر دیتے، ان کو انڈیکس نہیں کرتے اور POIs پیش نہیں کرتے. +بہت سے انڈیکسرز دھیرے دھیرے سٹیک کی منتقلی کر رہے ہیں لہذا L1 پر انڈیکسرز اب بھی L1 پر انعامات اور فیسیں حاصل کر رہے ہوں گے، جو پھر ڈیلیگیٹرز کے ساتھ شیئر کیے جاتے ہیں۔ ایک بار جب ایک انڈیکسر اپنے تمام سٹیک کو منتقل کر دیتا ہے، تو وہ L1 پر کام کرنا بند کر دے گا، اس لیے ڈیلیگیٹرز کو مزید انعامات نہیں ملیں گے جب تک کہ وہ L2 کو منتقل نہ کر دیں۔ -## کیا ڈیلیگیٹرز اپنے وفد کو منتقل کر سکتے ہیں اس سے پہلے کہ میں اپنے انڈیکسنگ کا حصہ منتقل کروں؟ +بالآخر، اگر کونسل L2 میں انڈیکسنگ کے انعامات میں اضافے کی منظوری دیتی رہتی ہے، تو تمام انعامات L2 پر تقسیم کیے جائیں گے اور L1 پر انڈیکسرز اور ڈیلیگیٹرز کے لیے کوئی انڈیکسنگ کے انعامات نہیں ہوں گے۔ -نہیں, ڈیلیگیٹرز کے لیے اپنے ڈیلیگیٹڈ GRT کو Arbitrum میں منتقل کرنے کے لیے، انڈیکسر جس کو وہ ڈیلیگٹ کر رہے ہیں L2 پر فعال ہونا چاہیے. +### مجھے اپنے ڈیلیگیشن کو منتقل کرنے کا بٹن نظر نہیں آرہا ہے۔ ایسا کیوں ہے؟ -## اگر میں GRT ویسٹنگ کنٹریکٹ / ٹوکن لاک والیٹ استعمال کر رہا ہوں تو کیا میں اپنا حصہ منتقل کر سکتا ہوں؟ +آپ کے انڈیکسر نے شاید ابھی تک سٹیک کی منتقلی کے لیے L2 ٹرانسفر ٹولز کا استعمال نہیں کیا ہے۔ -جی ہاں! یہ عمل قدرے مختلف ہے، کیونکہ ویسٹنگ کنٹریکٹ L2 گیس کی ادائیگی کے لیے درکار ETH کو آگے نہیں بھیج سکتے، اس لیے آپ کو اسے پہلے سے جمع کروانے کی ضرورت ہے۔ اگر آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر نہیں ہے، تو آپ کو پہلے L2 پر ایک ہم منصب ویسٹنگ کنٹریکٹ بھی شروع کرنا ہو گا اور آپ صرف اس L2 ویسٹنگ کنٹریکٹ میں حصص کو منتقل کر سکیں گے۔ جب آپ ویسٹنگ لاک والیٹ کا استعمال کرتے ہوئے ایکسپلورر سے منسلک ہوتے ہیں تو ایکسپلورر پر UI اس عمل میں آپ کی رہنمائی کر سکتا ہے. +اگر آپ انڈیکسر سے رابطہ کر سکتے ہیں، تو آپ انہیں L2 ٹرانسفر ٹولز استعمال کرنے کی ترغیب دے سکتے ہیں تاکہ ڈیلیگیٹرز ڈیلیگیشنز کو اپنے L2 انڈیکسر ایڈریس پر منتقل کر سکیں۔ -## ڈیلیگیشن +### میرا انڈیکسر Arbitrum پر ہے، لیکن مجھے اپنی پروفائل میں ڈیلیگیشن منتقل کرنے کا بٹن نہیں نظر آ رہا۔ ایسا کیوں ہے؟ -## میں اپنے ڈیلیگیشن کو کیسے منتقل کروں؟ +یہ ممکن ہے کہ انڈیکسر نے L2 پر آپریشنز مرتب کیے ہوں، لیکن اس نے سٹیک کی منتقلی کے لیے L2 ٹرانسفر ٹولز کا استعمال نہیں کیا ہے۔ لہذا L1 سمارٹ کنٹریکٹس انڈیکسر کے L2 ایڈریس کے بارے میں نہیں جانتے ہوں گے۔ اگر آپ انڈیکسر سے رابطہ کر سکتے ہیں، تو آپ انہیں ٹرانسفر ٹول استعمال کرنے کی ترغیب دے سکتے ہیں تاکہ ڈیلیگیٹرز ڈیلیگیشنز کو اپنے L2 انڈیکسر ایڈریس پر منتقل کر سکیں۔ -اپنے ڈیلیگیشن کو منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہوگی: +### کیا میں اپنے ڈیلیگیشنز کو L2 میں منتقل کر سکتا ہوں اگر میں نے غیر ڈیلیگیشنز کا عمل شروع کر دیا ہے اور ابھی تک اسے واپس نہیں لیا ہے؟ -1. ایتھیریم مین نیٹ پر ڈیلیگیشن کی منتقلی شروع کریں +نہیں۔ اگر آپ کی ڈیلیگیشن ختم ہو رہی ہے، آپ کو 28 دنوں تک انتظار کرنا ہو گا اور پھر اسے نکالیں۔ + +جن ٹوکنز کو غیر ڈیلیگیٹ کیا جا رہا ہے وہ "لاک" ہیں اور اس لیے L2 میں منتقل نہیں کیے جا سکتے۔ + +## کیوریشن سگنل + +### میں اپنی کیوریشن کیسے منتقل کروں؟ + +اپنی کیوریشن منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: + +1. ایتھیریم مین نیٹ پر سگنل کی منتقلی شروع کریں + +2. L2 کیوریٹر ایڈریس کی وضاحت کریں\* + +3. تصدیق کے لیے 20 منٹ انتظار کریں + +\*اگر ضروری ہو تو - یعنی آپ کنٹریکٹ ایڈریس استعمال کر رہے ہیں. + +### مجھے کیسے پتہ چلے گا کہ میں نے جو سب گراف تیار کیا ہے وہ L2 میں چلا گیا ہے؟ + +سب گراف کی تفصیلات کا پیج دیکھتے وقت، ایک بینر آپ کو مطلع کرے گا کہ اس سب گراف کو منتقل کر دیا گیا ہے۔ آپ اپنے کیوریشن کو منتقل کرنے کے لیے پرامپٹ پر عمل کر سکتے ہیں۔ آپ یہ معلومات کسی بھی سب گراف کے سب گراف کی تفصیلات کے پیج پر بھی حاصل کر سکتے ہیں جو منتقل ہوا ہے۔ + +### اگر میں اپنے کیوریشن کو L2 میں منتقل نہیں کرنا چاہتا تو کیا ہو گا؟ + +جب سب گراف فرسودہ ہو جاتا ہے تو آپ کے پاس اپنا سگنل واپس لینے کا اختیار ہوتا ہے۔ اسی طرح، اگر کوئی سب گراف L2 میں منتقل ہو گیا ہے، تو آپ ایتھیریم مین نیٹ میں اپنے سگنل کو واپس لینے یا L2 کو سگنل بھیجنے کا انتخاب کر سکتے ہیں. + +### میں کیسے جان سکتا ہوں کہ میری کیوریشن کامیابی سے منتقل ہو گئی ہے؟ + +L2 ٹرانسفر ٹول شروع ہونے کے تقریباً 20 منٹ بعد سگنل کی تفصیلات ایکسپلورر کے ذریعے قابل رسائی ہوں گی. + +### کیا میں ایک وقت میں ایک سے زیادہ سب گراف پر اپنی کیوریشن منتقل کر سکتا ہوں؟ + +اس وقت بلک ٹرانسفر کا کوئی آپشن نہیں ہے. + +## انڈیکسر سٹیک + +### میں اپنا سٹیک Arbitrum پر کیسے منتقل کروں؟ + +> اعلان دستبرداری: اگر آپ فی الحال اپنے انڈیکسر پر اپنے GRT کے کسی بھی سٹیک کو ہٹا رہے ہیں، تو آپ L2 ٹرانسفر ٹولز استعمال نہیں کر سکیں گے۔ + + + +اپنا سٹیک منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: + +1. ایتھیریم مین نیٹ پر سٹیک کی منتقلی شروع کریں 2. تصدیق کے لیے 20 منٹ انتظار کریں -3. Arbitrum پر ڈیلیگیشن کی منتقلی کی تصدیق کریں +3. Arbitrum پر سٹیک کی منتقلی کی تصدیق کریں -\*\*\*\*Arbitrum پر ڈیلیگیشن کی منتقلی کو مکمل کرنے کے لیے آپ کو ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ ڈیلیگیشن ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت پڑ سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [Discord](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں. +\*نوٹ کریں کہ آپ کو 7 دنوں کے اندر منتقلی کی تصدیق کرنی چاہیے ورنہ آپ کا سٹیک ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [ڈسکورڈ](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں۔ -## اگر میں ایتھیریم مین نیٹ پر کھلی رقم کے ساتھ منتقلی شروع کرتا ہوں تو میرے انعامات کا کیا ہوگا؟ +### کیا میرا تمام سٹیک منتقل ہو جائے گا؟ -اگر انڈیکسر جس کو آپ ڈیلیٹ کر رہے ہیں وہ ابھی بھی L1 پر کام کر رہا ہے، جب آپ Arbitrum میں منتقل کرتے ہیں تو آپ ایتھیریم مین نیٹ پر کھلی مختص سے کسی بھی ڈیلیگیشن کے انعامات کو ضائع کر دیں گے۔ اس کا مطلب ہے کہ آپ زیادہ سے زیادہ، آخری 28 دن کی مدت کے انعامات سے محروم ہو جائیں گے۔ اگر آپ انڈیکسر کے مختص کرنے کے فورا بعد منتقلی کا وقت دیتے ہیں تو آپ یقینی بنا سکتے ہیں کہ یہ کم سے کم رقم ہے۔ اگر آپ کے پاس اپنے انڈیکسرز کے ساتھ ایک مواصلاتی چینل ہے، تو اپنی منتقلی کے لیے بہترین وقت تلاش کرنے کے لیے ان کے ساتھ بات چیت کرنے پر غور کریں. +آپ یہ منتخب کر سکتے ہیں کہ آپ کا کتنا سٹیک منتقل کرنا ہے۔ اگر آپ اپنے تمام سٹیک کو ایک ساتھ منتقل کرنے کا انتخاب کرتے ہیں، تو آپ کو پہلے کسی بھی کھلے سٹیک کو بند کرنے کی ضرورت پڑے گی۔ -## اگر میں فی الحال جس انڈیکسر کی نمائندگی کرتا ہوں وہ Arbitrum One پر نہیں ہے تو کیا ہوگا؟ +اگر آپ متعدد ٹرانزیکشن پر اپنے سٹیک کے حصوں کو منتقل کرنے کا ارادہ رکھتے ہیں، تو آپ کو ہمیشہ ایک ہی بینیفیشری کا ایڈریس بتانا ہوگا. -L2 ٹرانسفر ٹول صرف اس صورت میں فعال کیا جائے گا جب انڈیکسر جس کو آپ نے ڈیلیگیٹ کیا ہے اس نے اپنا حصہ Arbitrum کو منتقل کر دیا ہے. +نوٹ: جب آپ پہلی بار ٹرانسفر ٹول استعمال کرتے ہیں تو آپ کو L2 پر کم سے کم سٹیک کی ضروریات کو پورا کرنا ہوگا۔ انڈیکسرز کو کم سے کم 100 ہزار GRT بھیجنا چاہیے (جب اس فنکشن کو پہلی بار کال کریں)۔ اگر L1 پر حصص کا کچھ حصہ چھوڑتے ہیں، تو یہ 100 ہزار GRT کم سے کم سے زیادہ ہونا چاہیے اور آپ کی کھلی تقسیم کو پورا کرنے کے لیے کافی (آپ کے ڈیلیگیشنز کے ساتھ) ہونا چاہیے. -## کیا ڈیلیگیٹرز کے پاس کسی دوسرے انڈیکسر کو ڈیلیگیٹ کرنے کا اختیار ہے؟ +### مجھے اپنے سٹیک کو Arbitrum میں منتقلی کی تصدیق کرنے کے لیے کتنا وقت درکار ہے؟ -اگر آپ کسی دوسرے انڈیکسر کو ڈیلیگیٹ کرنا چاہتے ہیں، تو آپ Arbitrum پر اسی انڈیکسر کو منتقل کر سکتے ہیں، پھر غیر منتخب کریں اور پگھلنے کی مدت کا انتظار کریں۔ اس کے بعد، آپ ڈیلیگیٹ کرنے کے لیے ایک اور فعال انڈیکسر منتخب کر سکتے ہیں. +\*\*\* Arbitrum پر سٹیک کی منتقلی کو مکمل کرنے کے لیے آپ کو اپنے ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ سٹیک کھو سکتا ہے. -## کیا ہوگا اگر میں L2 پر انڈیکسر نہیں پا رہا ہوں جس کو میں ڈیلیگیٹ کر رہا ہوں؟ +### اگر میرے پاس کھلی مختص ہے تو کیا ہوگا؟ -L2 ٹرانسفر ٹول خود بخود اس انڈیکسر کا ایڈریس لگائے گا جسے آپ نے پہلے ڈیلیگیٹ کیا تھا. +اگر آپ اپنے تمام سٹیک نہیں بھیج رہے ہیں تو، L2 ٹرانسفر ٹول اس بات کی توثیق کرے گا کہ کم سے کم 100 ہزار GRT ایتھیریم مین نیٹ میں باقی ہیں اور آپ کا باقی سٹیک اور ڈیلیگیشن کسی بھی کھلے مختص کو پورا کرنے کے لیے کافی ہے۔ اگر آپ کا GRT بیلنس کم سے کم + کھلی مختصات کا احاطہ نہیں کرتا ہے تو آپ کو کھلی مختص بند کرنے کی ضرورت پڑسکتی ہے. -## کیا میں اپنے ڈیلیگیشن کو پہلے کے انڈیکسر کی بجائے نئے یا متعدد انڈیکسرز میں مکس اور میچ یا 'پھیلانے' کے قابل ہو جاؤں گا؟ +### ٹرانسفر ٹولز کا استعمال کرتے ہوئے، کیا منتقل کرنے سے پہلے ایتھیریم مین نیٹ کو ہٹانے کے لیے 28 دن انتظار کرنا ضروری ہے؟ -L2 ٹرانسفر ٹول ہمیشہ آپ کے ڈیلیگیشن کو اسی انڈیکسر کی طرف لے جائے گا جسے آپ نے پہلے سونپ دیا تھا۔ ایک بار جب آپ L2 میں منتقل ہو جائیں تو، آپ ان ڈیلیگیٹ کر سکتے ہیں، پگھلنے کی مدت کا انتظار کر سکتے ہیں، اور فیصلہ کر سکتے ہیں کہ آیا آپ اپنے ڈیلیگیشن کو الگ کرنا چاہتے ہیں. +نہیں، آپ L2 پر اپنا سٹیک فوری منتقل کر سکتے ہیں، ٹرانسفر ٹول کو استعمال کرنے سے پہلے اسے واپس لینے اور انتظار کرنے کی ضرورت نہیں ہے۔ 28 دن کا انتظار صرف اس صورت میں لاگو ہوتا ہے جب آپ ایتھیریم مین نیٹ یا L2 پر اپنے والیٹ میں سٹیک واپس لینا چاہتے ہیں۔ -## کیا میں کولڈاؤن پیریڈ سے مشروط ہوں یا L2 ڈیلیگیشن ٹرانسفر ٹول استعمال کرنے کے فوراً بعد واپس لے سکتا ہوں؟ +### میرا سٹیک منتقل ہونے میں کتنا وقت لگے گا؟ -منتقلی کا آلہ آپ کو فوری طور پر L2 پر جانے کی اجازت دیتا ہے۔ اگر آپ انڈیلیگیٹ کرنا چاہتے ہیں تو آپ کو پگھلنے کی مدت کا انتظار کرنا پڑے گا۔ تاہم، اگر کسی انڈیکسر نے اپنے تمام حصص L2 کو منتقل کر دیے ہیں، تو آپ ایتھیریم مین نیٹ پر فوری طور پر واپس لے سکتے ہیں. +L2 ٹرانسفر ٹول کو آپ کے سٹیک کی منتقلی مکمل کرنے میں تقریبآٓ 20 منٹ لگیں گے۔ -## اگر میں اپنے ڈیلیگیشن کو منتقل نہیں کرتا ہوں تو کیا میرے انعامات پر منفی اثر پڑ سکتا ہے؟ +### کیا مجھے اپنا سٹیک منتقل کرنے سے پہلے Arbitrum پر انڈیکس کرنا ہو گا؟ -یہ متوقع ہے کہ تمام نیٹ ورک کی شرکت مستقبل میں Arbitrum One میں منتقل ہو جائے گی. +آپ انڈیکسنگ کو ترتیب دینے سے پہلے مؤثر طریقے سے اپنا سٹیک منتقل کر سکتے ہیں، لیکن آپ L2 پر کسی بھی انعام کا دعویٰ نہیں کر سکیں گے جب تک کہ آپ L2 پر سب گرافس کے لیے مختص نہیں کر دیتے، ان کو انڈیکس نہیں کرتے اور POIs پیش نہیں کرتے. -## میرے ڈیلیگیشن کی L2 میں منتقلی کو مکمل کرنے میں کتنا وقت لگتا ہے؟ +### کیا ڈیلیگیٹرز اپنے ڈیلیگیشن کو منتقل کر سکتے ہیں اس سے پہلے کہ میں اپنے انڈیکسنگ کا سٹیک منتقل کروں؟ -ڈیلیگیشن کی منتقلی کے لیے 20 منٹ کی تصدیق درکار ہے۔ براہ کرم نوٹ کریں کہ 20 منٹ کی مدت کے بعد، آپ کو واپس آنا چاہیے اور 7 دنوں کے اندر منتقلی کے عمل کا مرحلہ 3 مکمل کرنا چاہیے۔ اگر آپ ایسا کرنے میں ناکام رہے تو آپ کا ڈیلیگیشن ضائع ہو سکتا ہے۔ نوٹ کریں کہ زیادہ تر معاملات میں ٹرانسفر ٹول آپ کے لیے یہ مرحلہ خود بخود مکمل کر دے گا۔ ناکام خودکار کوشش کی صورت میں، آپ کو اسے دستی طور پر مکمل کرنا ہوگا۔ اگر اس عمل کے دوران کوئی مسئلہ پیدا ہوتا ہے، تو پریشان نہ ہوں، ہم مدد کے لیے حاضر ہوں گے: ہم سے support@thegraph.com پر یا [Discord](https://discord.gg/graphprotocol) پر رابطہ کریں. +نہیں، ڈیلیگیٹرز کے لیے اپنے ڈیلیگیٹڈ GRT کو Arbitrum میں منتقل کرنے کے لیے، انڈیکسر جس کو وہ ڈیلیگیٹ کر رہے ہیں L2 پر فعال ہونا چاہیے۔ -## کیا میں اپنے ڈیلیگیشن کو منتقل کر سکتا ہوں اگر میں GRT ویسٹنگ کنٹریکٹ/ٹوکن لاک والیٹ استعمال کر رہا ہوں؟ +### اگر میں GRT ویسٹنگ کنٹریکٹ / ٹوکن لاک والیٹ استعمال کر رہا ہوں تو کیا میں اپنا سٹیک منتقل کر سکتا ہوں؟ -جی ہاں! یہ عمل تھوڑا مختلف ہے کیونکہ ویسٹنگ کنٹریکٹ L2 گیس کی ادائیگی کے لیے درکار ETH کو آگے نہیں بھیج سکتے، اس لیے آپ کو اسے پہلے سے جمع کروانا ہوگا۔ اگر آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر محفوظ نہیں ہے، تو آپ کو پہلے L2 پر ایک ہم منصب ویسٹنگ کنٹریکٹ کو بھی شروع کرنا ہوگا اور صرف اس L2 ویسٹنگ کنٹریکٹ میں ڈیلیگیشن کو منتقل کر سکیں گے۔ جب آپ ویسٹنگ لاک والیٹ کا استعمال کرتے ہوئے ایکسپلورر سے منسلک ہوتے ہیں تو ایکسپلورر پر UI اس عمل میں آپ کی رہنمائی کر سکتا ہے. +جی ہاں! یہ عمل قدرے مختلف ہے، کیونکہ ویسٹنگ کنٹریکٹ L2 گیس کی ادائیگی کے لیے درکار ایتھیریم کو آگے نہیں بھیج سکتے، اس لیے آپ کو اسے پہلے سے جمع کروانے کی ضرورت ہے۔ اگر آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر نہیں ہے، تو آپ کو پہلے L2 پر ایک ہم منصب ویسٹنگ کنٹریکٹ بھی شروع کرنا ہو گا اور آپ صرف اس L2 ویسٹنگ کنٹریکٹ میں سٹیک کو منتقل کر سکیں گے۔ جب آپ ویسٹنگ لاک والیٹ کا استعمال کرتے ہوئے ایکسپلورر سے منسلک ہوتے ہیں تو ایکسپلورر پر UI اس عمل میں آپ کی رہنمائی کر سکتا ہے. -## کیا کوئی ڈیلیگیشن ٹیکس ہے؟ +### میرے پاس پہلے سے ہی L2 پر سٹیک ہے۔ جب میں پہلی بار ٹرانسفر ٹولز استعمال کرتا ہوں تو کیا مجھے اب بھی 100 ہزار GRT بھیجنے کی ضرورت ہے؟ -نہیں۔ L2 پر موصول ہونے والے ٹوکن مخصوص ڈیلیگیٹر کی جانب سے ڈیلیگیشن ٹیکس وصول کیے بغیر مخصوص انڈیکسر کو سونپے جاتے ہیں. +ہاں۔ L1 سمارٹ کنٹریکٹس آپ کے L2 سٹیک سے واقف نہیں ہوں گے، لہذا جب آپ پہلی بار منتقل کریں گے تو وہ آپ سے کم از کم 100 ہزار GRT منتقل کرنے کا تقاضا کریں گے۔ -## ویسٹنگ کنٹریکٹ ٹرانسفر +### کیا میں اپنے سٹیک کو L2 میں منتقل کر سکتا ہوں اگر میں GRT کو ہٹانے کے عمل میں ہوں؟ -## میں اپنا ویسٹنگ کنٹریکٹ کیسے منتقل کروں؟ +نہیں اگر آپ کے سٹیک کا کوئی حصہ ختم ہو رہا ہے، تو آپ کو 28 دن انتظار کرنا ہوگا اور اس سے پہلے کہ آپ سٹیک کی منتقلی کر سکیں اسے واپس لے لیں۔ جو ٹوکن داغے جا رہے ہیں وہ "لاک" ہیں اور L2 میں کسی بھی قسم کی منتقلی یا سٹیک کو روکیں گے۔ -اپنی ویسٹنگ کو منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہوگی: +## ویسٹنگ کنٹریکٹ منتقلی -1. ایتھیریم مین نیٹ پر ویسٹنگ ٹرانسفر شروع کریں. +### میں اپنا ویسٹنگ کنٹریکٹ کیسے منتقل کروں؟ + +اپنی ویسٹنگ منتقل کرنے کے لیے، آپ کو درج ذیل مراحل کو مکمل کرنے کی ضرورت ہو گی: + +1. ایتھیریم مین نیٹ پر ویسٹنگ کی منتقلی شروع کریں 2. تصدیق کے لیے 20 منٹ انتظار کریں -3. Arbitrum پر ویسٹنگ ٹرانسفر کی تصدیق کریں +3. Arbitrum پر ویسٹنگ منتقلی کی تصدیق کرنا + +### اگر میں صرف جزوی طور پر ویسٹڈ ہوں تو میں اپنا ویسٹنگ کا کنٹریکٹ کیسے منتقل کروں؟ + + -## اگر میں صرف جزوی طور پر ہوں تو میں اپنا ویسٹنگ کا کنٹریکٹ کیسے منتقل کروں؟ +1. ٹرانسفر ٹول کے کنٹریکٹ میں کچھ ایتھیریم جمع کریں (UI مناسب رقم کا تخمینہ لگانے میں مدد کر سکتا ہے) -1. منتقلی کے ٹول کے کنٹریکٹ میں کچھ ETH جمع کریں (UI مناسب رقم کا تخمینہ لگانے میں مدد کر سکتا ہے) +2. L2 ویسٹنگ لاک کو شروع کرنے کے لیے کچھ لاک شدہ GRT ٹرانسفر ٹول کنٹریکٹ کے ذریعے L2 کو بھیجیں۔ یہ ان کا L2 بینیفیشری کا ایڈریس بھی ترتیب دے گا. -2. L2 ویسٹنگ لاک کو شروع کرنے کے لیے کچھ لاک شدہ GRT ٹرانسفر ٹول کنٹریکٹ کے ذریعے L2 کو بھیجیں۔ یہ ان کا L2 فائدہ اٹھانے والے کا ایڈریس بھی ترتیب دے گا. +3. L1 سٹیکنگ کنٹریکٹ میں "لاک" ٹرانسفر ٹول فنکشنز کے ذریعے اپنا سٹیک/ڈیلیگیشن L2 کو بھیجیں. -3. L1 سٹیکنگ کنٹریکٹ میں "لاک" ٹرانسفر ٹول فنکشنز کے ذریعے اپنا حصہ/ڈیلیگیشن L2 کو بھیجیں. +4. ٹرانسفر ٹول کنٹریکٹ سے کوئی بھی بچا ہوا ایتھیریم نکال لیں -4. منتقلی کے ٹول کے کنٹریکٹ سے کوئی بھی باقی ETH واپس لے لیں +### میں اپنا ویسٹنگ کنٹریکٹ کیسے منتقل کروں اگر میں مکمل طور پر ویسٹڈ ہوں؟ -## اگر میں مکمل طور پر ویسٹڈ ہوں تو میں اپنا ویسٹنگ کا کنٹریکٹ کیسے منتقل کروں؟ + -ان لوگوں کے لیے جو مکمل طور پر مستعد ہیں، یہ عمل ایک جیسا ہے: +ان لوگوں کے لیے جو مکمل ویسٹڈ ہیں، یہ عمل ایک جیسا ہے: -1. منتقلی کے ٹول کے کنٹریکٹ میں کچھ ETH جمع کریں (UI مناسب رقم کا تخمینہ لگانے میں مدد کر سکتا ہے) +1. ٹرانسفر ٹول کے کنٹریکٹ میں کچھ ایتھیریم جمع کریں (UI مناسب رقم کا تخمینہ لگانے میں مدد کر سکتا ہے) 2. ٹرانسفر ٹول کنٹریکٹ پر کال کے ساتھ اپنا L2 ایڈریس سیٹ کریں -3. L1 اسٹیکنگ کنٹریکٹ میں "لاکڈ" ٹرانسفر ٹول فنکشنز کے ذریعے اپنا اسٹیک/ڈیلیگیشن L2 کو بھیجیں. +3. L1 سٹیکنگ کنٹریکٹ میں "لاکڈ" ٹرانسفر ٹول فنکشنز کے ذریعے اپنا سٹیک/ڈیلیگیشن L2 کو بھیجیں. -4. منتقلی کے ٹول کے کنٹریکٹ سے کوئی بھی باقی ETH واپس لے لیں +4. ٹرانسفر ٹول کنٹریکٹ سے کوئی بھی بچا ہوا ایتھیریم نکال لیں -## کیا میں اپنا ویسٹنگ کنٹریکٹ Arbitrum کو منتقل کر سکتا ہوں؟ +### کیا میں اپنا ویسٹنگ کنٹریکٹ Arbitrum پر منتقل کر سکتا ہوں؟ -آپ اپنے ویسٹنگ کنٹریکٹ کے GRT بیلنس کو L2 میں ویسٹنگ کنٹریکٹ میں منتقل کر سکتے ہیں۔ حصص یا ڈیلیگیشن کو آپ کے ویسٹنگ کنٹریکٹ سے L2 میں منتقل کرنے کے لیے یہ ایک شرط ہے۔ ویسٹنگ کنٹریکٹ میں GRT کی غیر صفر رقم ہونی چاہیے (اگر ضرورت ہو تو آپ اس میں 1 GRT جیسی چھوٹی رقم منتقل کر سکتے ہیں). +آپ اپنے ویسٹنگ کنٹریکٹ کے GRT بیلنس کو L2 میں ویسٹنگ کنٹریکٹ میں منتقل کر سکتے ہیں۔ سٹیک یا ڈیلیگیشن کو آپ کے ویسٹنگ کنٹریکٹ سے L2 میں منتقل کرنے کے لیے یہ ایک شرط ہے۔ ویسٹنگ کنٹریکٹ میں GRT کی غیر صفر رقم ہونی چاہیے (ضرورت پڑنے پر آپ اس میں 1 GRT جیسی چھوٹی رقم منتقل کر سکتے ہیں). -جب آپ GRT کو اپنے L1 ویسٹنگ کنٹریکٹ سے L2 میں منتقل کرتے ہیں، تو آپ بھیجنے کے لیے رقم کا انتخاب کر سکتے ہیں اور آپ اسے جتنی بار چاہیں کر سکتے ہیں۔ جب آپ پہلی بار GRT منتقل کریں گے تو L2 ویسٹنگ کا کنٹریکٹ شروع کیا جائے گا. +جب آپ GRT کو اپنے L1 ویسٹنگ کنٹریکٹ سے L2 میں منتقل کرتے ہیں، تو آپ بھیجنے کے لیے رقم کا انتخاب کر سکتے ہیں اور آپ اسے جتنی بار چاہیں کر سکتے ہیں۔ جب آپ پہلی بار GRT منتقل کریں گے تو L2 ویسٹنگ کا کنٹریکٹ شروع کیا جائے گا۔ منتقلی ایک ٹرانسفر ٹول کا استعمال کرتے ہوئے کی جاتی ہے جو آپ کے ایکسپلورر پروفائل پر نظر آئے گا جب آپ ویسٹنگ کنٹریکٹ اکاؤنٹ سے منسلک ہوں گے. @@ -257,60 +335,78 @@ L2 ٹرانسفر ٹول ہمیشہ آپ کے ڈیلیگیشن کو اسی ان اگر آپ نے کوئی ویسٹنگ کنٹریکٹ بیلنس L2 میں منتقل نہیں کیا ہے، اور آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ کو اپنا ویسٹنگ کنٹریکٹ L2 میں منتقل نہیں کرنا چاہیے۔ اس کے بجائے، آپ L2 والیٹ کا ایڈریس سیٹ کرنے کے لیے ٹرانسفر ٹولز کا استعمال کر سکتے ہیں، اور براہ راست اپنا حصہ یا ڈیلیگیشن L2 پر اس باقاعدہ والیٹ میں منتقل کر سکتے ہیں. -## مین نیٹ پر داؤ پر لگانے کے لیے میں اپنا بنیان کنٹریکٹ استعمال کر رہا ہوں۔ کیا میں اپنا حصص Arbitrum کو منتقل کر سکتا ہوں؟ +### مین نیٹ پر سٹیک پر لگانے کے لیے میں اپنا ویسٹنگ کنٹریکٹ استعمال کر رہا ہوں۔ کیا میں اپنا سٹیک Arbitrum کو منتقل کر سکتا ہوں؟ -ہاں، لیکن اگر آپ کا کنٹریکٹ اب بھی ویسٹنگ ہے، تو آپ حصص کو صرف اس لیے منتقل کر سکتے ہیں کہ یہ آپ کے L2 ویسٹنگ کنٹریکٹ کی ملکیت ہو۔ آپ کو پہلے ایکسپلورر پر ویسٹنگ کنٹریکٹ ٹرانسفر ٹول کا استعمال کرتے ہوئے کچھ GRT بیلنس منتقل کرکے اس L2 کنٹریکٹ کو شروع کرنا ہوگا۔ اگر آپ کا کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ اپنا حصہ L2 میں کسی بھی پتے پر منتقل کر سکتے ہیں، لیکن آپ کو اسے پہلے سے طے کرنا ہوگا اور L2 گیس کی ادائیگی کے لیے L2 ٹرانسفر ٹول کے لیے کچھ ETH جمع کرانا ہوگا. +ہاں، لیکن اگر آپ کا کنٹریکٹ اب بھی ویسٹنگ ہے، تو آپ سٹیک کو صرف اس لیے منتقل کر سکتے ہیں کہ یہ آپ کے L2 ویسٹنگ کنٹریکٹ کی ملکیت ہو۔ آپ کو پہلے ایکسپلورر پر ویسٹنگ کنٹریکٹ ٹرانسفر ٹول کا استعمال کرتے ہوئے کچھ GRT بیلنس منتقل کرکے اس L2 کنٹریکٹ کو شروع کرنا ہوگا۔ اگر آپ کا کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ اپنا سٹیک L2 میں کسی بھی ایڈریس پر منتقل کر سکتے ہیں، لیکن آپ کو اسے پہلے سے طے کرنا ہوگا اور L2 گیس کی ادائیگی کے لیے L2 ٹرانسفر ٹول کے لیے کچھ ایتھیریم جمع کرانا ہوگا. -## مین نیٹ پر ڈیلیگیٹ کرنے کے لیے میں اپنا ویسٹنگ کنٹریکٹ استعمال کر رہا ہوں۔ کیا میں اپنے ڈیلیگیشن کو Arbitrum میں منتقل کر سکتا ہوں؟ +### مین نیٹ پر ڈیلیگیٹ کرنے کے لیے میں اپنا ویسٹنگ کنٹریکٹ استعمال کر رہا ہوں۔ کیا میں اپنے ڈیلیگیشن کو Arbitrum میں منتقل کر سکتا ہوں؟ -ہاں، لیکن اگر آپ کا کنٹریکٹ اب بھی ویسٹنگ ہے، تو آپ ڈیلیگیشن کو صرف اس لیے منتقل کر سکتے ہیں تاکہ یہ آپ کے L2 ویسٹنگ کنٹریکٹ کی ملکیت ہو۔ آپ کو پہلے ایکسپلورر پر ویسٹنگ کنٹریکٹ ٹرانسفر ٹول کا استعمال کرتے ہوئے کچھ GRT بیلنس منتقل کرکے اس L2 کنٹریکٹس کو شروع کرنا ہوگا۔ اگر آپ کا کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ اپنے ڈیلیگیشن کو L2 میں کسی بھی ایڈریس پر منتقل کر سکتے ہیں، لیکن آپ کو اسے پہلے سے طے کرنا چاہیے اور L2 گیس کی ادائیگی کے لیے L2 ٹرانسفر ٹول کے لیے کچھ ETH جمع کرانا چاہیے. +ہاں، لیکن اگر آپ کا کنٹریکٹ اب بھی ویسٹنگ ہے، تو آپ ڈیلیگیشن کو صرف اس لیے منتقل کر سکتے ہیں تاکہ یہ آپ کے L2 ویسٹنگ کنٹریکٹ کی ملکیت ہو۔ آپ کو پہلے ایکسپلورر پر ویسٹنگ کنٹریکٹ ٹرانسفر ٹول کا استعمال کرتے ہوئے کچھ GRT بیلنس منتقل کرکے اس L2 کنٹریکٹس کو شروع کرنا ہوگا۔ اگر آپ کا کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ اپنے ڈیلیگیشن کو L2 میں کسی بھی ایڈریس پر منتقل کر سکتے ہیں، لیکن آپ کو اسے پہلے سے طے کرنا چاہیے اور L2 گیس کی ادائیگی کے لیے L2 ٹرانسفر ٹول کے لیے کچھ ایتھیریم جمع کرانا چاہیے. -## کیا میں L2 پر اپنے ویسٹنگ کنٹریکٹ کے لیے ایک مختلف فائدہ کنندہ کی وضاحت کر سکتا ہوں؟ +### کیا میں L2 پر اپنے ویسٹنگ کنٹریکٹ کے لیے ایک مختلف بینیفیشری کو چن سکتا ہوں؟ -ہاں، پہلی بار جب آپ بیلنس ٹرانسفر کرتے ہیں اور اپنا L2 ویسٹنگ کنٹریکٹ سیٹ کرتے ہیں، تو آپ ایک L2 فائدہ اٹھانے والے کی وضاحت کر سکتے ہیں۔ یقینی بنائیں کہ یہ فائدہ اٹھانے والا ایک پرس ہے جو Arbitrum One پر ٹرانزیکشن کر سکتا ہے، یعنی یہ EOA یا Arbitrum One میں تعینات ملٹی سگ ہونا چاہیے. +ہاں، پہلی بار جب آپ بیلنس ٹرانسفر کرتے ہیں اور اپنا L2 ویسٹنگ کنٹریکٹ سیٹ کرتے ہیں، تو آپ ایک L2 بینیفیشری کو چن سکتے ہیں۔ یقینی بنائیں کہ یہ بینیفیشری ایک والیٹ ہے جو Arbitrum One پر ٹرانزیکشن کر سکتا ہے، یعنی یہ EOA یا Arbitrum One میں تعینات ملٹی سگ ہونا چاہیے. -اگر آپ کا کنٹریکٹس مکمل طور پر محفوظ ہے، تو آپ L2 پر ویسٹنگ کنٹریکٹ قائم نہیں کریں گے۔ اس کے بجائے، آپ ایک L2 والیٹ ایڈریس سیٹ کریں گے اور یہ Arbitrum پر آپ کے حصص یا ڈیلیگیشن کے لیے وصول کرنے والا والیٹ ہوگا. +اگر آپ کا کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ L2 پر ویسٹنگ کنٹریکٹ قائم نہیں کریں گے؛ اس کے بجائے، آپ ایک L2 والیٹ ایڈریس سیٹ کریں گے اور یہ Arbitrum پر آپ کے سٹیک یا ڈیلیگیشن کے لیے وصول کرنے والا والیٹ ہوگا. -## میرا کنٹریکٹ مکمل طور پر محفوظ ہے۔ کیا میں اپنے حصص یا ڈیلیگیشن کو کسی دوسرے پتے پر منتقل کر سکتا ہوں جو L2 ویسٹنگ کنٹریکٹ نہیں ہے؟ +### میرا کنٹریکٹ مکمل طور پر محفوظ ہے۔ کیا میں اپنے سٹیک یا ڈیلیگیشن کو کسی دوسرے ایڈریس پر منتقل کر سکتا ہوں جو L2 ویسٹنگ کنٹریکٹ نہیں ہے؟ -جی ہاں. اگر آپ نے کوئی ویسٹنگ کنٹریکٹ بیلنس L2 میں منتقل نہیں کیا ہے، اور آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ کو اپنا ویسٹنگ کنٹریکٹ L2 میں منتقل نہیں کرنا چاہیے۔ اس کے بجائے، آپ L2 والیٹ کا ایڈریس سیٹ کرنے کے لیے ٹرانسفر ٹولز کا استعمال کر سکتے ہیں، اور براہ راست اپنا حصہ یا ڈیلیگیشن L2 پر اس باقاعدہ والیٹ میں منتقل کر سکتے ہیں. +جی ہاں. اگر آپ نے کوئی ویسٹنگ کنٹریکٹ بیلنس L2 میں منتقل نہیں کیا ہے، اور آپ کا ویسٹنگ کنٹریکٹ مکمل طور پر محفوظ ہے، تو آپ کو اپنا ویسٹنگ کنٹریکٹ L2 میں منتقل نہیں کرنا چاہیے۔ اس کے بجائے، آپ L2 والیٹ کا ایڈریس سیٹ کرنے کے لیے ٹرانسفر ٹولز کا استعمال کر سکتے ہیں، اور براہ راست اپنا سٹیک یا ڈیلیگیشن L2 پر اس باقاعدہ والیٹ میں منتقل کر سکتے ہیں. -یہ آپ کو اپنے حصص یا ڈیلیگیشن کو کسی بھی L2 ایڈریس پر منتقل کرنے کی اجازت دیتا ہے. +یہ آپ کو اپنے سٹیک یا ڈیلیگیشن کو کسی بھی L2 ایڈریس پر منتقل کرنے کی اجازت دیتا ہے. -## میرا ویسٹنگ کنٹریکٹ اب بھی بنی ہوئی ہے۔ میں اپنے ویسٹنگ کنٹریکٹ بیلنس کو L2 میں کیسے منتقل کروں؟ +### میرا ویسٹنگ کنٹریکٹ اب بھی ویسٹنگ کر رہا ہے۔ میں اپنے ویسٹنگ کنٹریکٹ بیلنس کو L2 میں کیسے منتقل کروں؟ -یہ اقدامات صرف اس صورت میں لاگو ہوتے ہیں جب آپ کا کنٹریکٹ اب بھی ویسٹنگ کر رہا ہے، یا اگر آپ نے اس عمل کو اس سے پہلے استعمال کیا ہے جب آپ کا کنٹریکٹ ابھی بھی بنیاد تھا. +یہ اقدامات صرف اس صورت میں لاگو ہوتے ہیں جب آپ کا کنٹریکٹ اب بھی ویسٹنگ کر رہا ہے، یا اگر آپ نے اس عمل کو اس سے پہلے استعمال کیا ہے جب آپ کا کنٹریکٹ ابھی بھی ویسٹنگ کر رہا تھا. -اپنے ویسٹنگ کا کنٹریکٹ L2 میں منتقل کرنے کے لیے، آپ منتقلی کے ٹولز کا استعمال کرتے ہوئے کوئی بھی GRT بیلنس L2 کو بھیجیں گے، جو آپ کے L2 ویسٹنگ کنٹریکٹ کو شروع کرے گا: +اپنے ویسٹنگ کا کنٹریکٹ L2 میں منتقل کرنے کے لیے، آپ ٹرانسفر ٹولز کا استعمال کرتے ہوئے کوئی بھی GRT بیلنس L2 کو بھیجیں گے، جو آپ کے L2 ویسٹنگ کنٹریکٹ کو شروع کرے گا: -1. ٹرانسفر ٹول کنٹریکٹ میں کچھ ETH جمع کریں (یہ L2 گیس کی ادائیگی کے لیے استعمال کیا جائے گا) +1. ٹرانسفر ٹول کنٹریکٹ میں کچھ ایتھیریم جمع کریں (یہ L2 گیس کی ادائیگی کے لیے استعمال کیا جائے گا) 2. ویسٹنگ کنٹریکٹ تک پروٹوکول کی رسائی منسوخ کریں (اگلے مرحلے کے لیے درکار) 3. ویسٹنگ کنٹریکٹ تک پروٹوکول تک رسائی دیں (آپ کے کنٹریکٹ کو ٹرانسفر ٹول کے ساتھ تعامل کرنے کی اجازت دے گا) -4. ایک L2 فائدہ اٹھانے والے کا ایڈریس\* بتائیں اور ایتھیریم مین نیٹ پر بیلنس کی منتقلی شروع کریں +4. ایک L2 بینیفیشری کا ایڈریس بتائیں\* اور ایتھیریم مین نیٹ پر بیلنس کی منتقلی شروع کریں 5. تصدیق کے لیے 20 منٹ انتظار کریں 6. L2 پر بیلنس کی منتقلی کی تصدیق کریں -"اگر ضروری ہو تو - یعنی آپ کنٹریکٹ ایڈریس استعمال کر رہے ہیں. +\*اگر ضروری ہو تو - یعنی آپ کنٹریکٹ ایڈریس استعمال کر رہے ہیں. + +\*\*\*\* Arbitrum پر بیلنس کی منتقلی کو مکمل کرنے کے لیے آپ کو اپنے ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ بیلنس ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [ڈسکورڈ](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں۔ + +### میرا ویسٹنگ کنٹریکٹ 0 GRT دکھاتا ہے لہذا میں اسے منتقل نہیں کر سکتا، یہ کیوں ہے اور میں اسے کیسے ٹھیک کروں؟ + +اپنے L2 ویسٹنگ کنٹریکٹ کو شروع کرنے کے لیے، آپ کو GRT کی غیر صفر رقم L2 میں منتقل کرنے کی ضرورت ہے۔ یہ Arbitrum GRT بریج کے ذریعہ درکار ہے جو L2 ٹرانسفر ٹولز کے ذریعہ استعمال ہوتا ہے۔ GRT لازمی طور پر ویسٹنگ کنٹریکٹ کے بیلنس سے آنا چاہیے، اس لیے اس میں اسٹیک یا ڈیلیگیٹڈ GRT شامل نہیں ہے۔ + +اگر آپ نے ویسٹنگ کنٹریکٹ سے اپنے تمام GRT کو سٹیک پر لگا دیا ہے یا ڈیلیگیٹ کیا ہے، تو آپ دستی طور پر 1 GRT جیسی چھوٹی رقم کسی اور جگہ سے ویسٹنگ کنٹریکٹ ایڈریس پر بھیج سکتے ہیں (مثلاً کسی اور والیٹ، یا ایکسچینج سے)۔ + +### میں سٹیک یا ڈیلیگیٹ کرنے کے لیے ایک ویسٹنگ کنٹریکٹ استعمال کر رہا ہوں، لیکن مجھے اپنے سٹیک یا ڈیلیگیشن کو L2 میں منتقل کرنے کے لیے کوئی بٹن نظر نہیں آ رہا، میں کیا کروں؟ + +اگر آپ کے ویسٹنگ کنٹریکٹ نے ویسٹنگ مکمل نہیں کی ہے، تو آپ کو پہلے ایک L2 ویسٹنگ کنٹریکٹ بنانا ہوگا جو L2 پر آپ کا سٹیک یا ڈیلیگیشن وصول کرے گا۔ ویسٹنگ کا یہ معاہدہ ویسٹنگ ٹائم لائن کے اختتام تک L2 میں ٹوکن جاری کرنے کی اجازت نہیں دے گا، لیکن آپ کو وہاں جاری ہونے والے L1 ویسٹنگ کنٹریکٹ میں GRT کو واپس منتقل کرنے کی اجازت دے گا۔ + +ایکسپلورر پر ویسٹنگ کنٹریکٹ کے ساتھ منسلک ہونے پر، آپ کو اپنے L2 ویسٹنگ کنٹریکٹ کو شروع کرنے کے لیے ایک بٹن دیکھنا چاہیے۔ پہلے اس عمل کی پیروی کریں، اور پھر آپ کو اپنے پروفائل میں اپنا سٹیک یا ڈیلیگیشن منتقل کرنے کے بٹن نظر آئیں گے۔ + +### اگر میں اپنے L2 ویسٹنگ کنٹریکٹ کو شروع کرتا ہوں، تو کیا یہ میرے ڈیلیگیشن کو خود بخود L2 میں منتقل کر دے گا؟ + +نہیں، آپ کے L2 ویسٹنگ کنٹریکٹ کو شروع کرنا ویسٹنگ کنٹریکٹ سے سٹیک یا ڈیلیگیشن کو منتقل کرنے کے لیے ایک شرط ہے، لیکن پھر بھی آپ کو ان کو الگ سے منتقل کرنے کی ضرورت ہے۔ -\*\*\*\*Arbitrum پر بیلنس کی منتقلی کو مکمل کرنے کے لیے آپ کو اپنے ٹرانزیکشن کی تصدیق کرنی ہوگی۔ یہ مرحلہ 7 دنوں کے اندر مکمل ہونا چاہیے ورنہ بیلنس ضائع ہو سکتا ہے۔ زیادہ تر معاملات میں، یہ مرحلہ خود بخود چلے گا، لیکن اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو دستی تصدیق کی ضرورت ہو سکتی ہے۔ اگر اس عمل کے دوران کوئی مسئلہ درپیش ہے، تو مدد کے لیے وسائل موجود ہوں گے: support@thegraph.com پر یا [Discord](https://discord.gg/graphprotocol) پر سپورٹ سے رابطہ کریں. +آپ کو اپنے پروفائل پر ایک بینر نظر آئے گا جو آپ کو اپنے سٹیک یا ڈیلیگیشن کو منتقل کرنے کا اشارہ کرے گا جب آپ نے اپنا L2 ویسٹنگ کنٹریکٹ شروع کر دیا ہے۔ -## کیا میں اپنے ویسٹنگ کا کنٹریکٹ واپس L1 میں منتقل کر سکتا ہوں؟ +### کیا میں اپنے ویسٹنگ کا کنٹریکٹ واپس L1 میں منتقل کر سکتا ہوں؟ ایسا کرنے کی ضرورت نہیں ہے کیونکہ آپ کا ویسٹنگ کا کنٹریکٹ ابھی بھی L1 میں ہے۔ جب آپ ٹرانسفر ٹولز استعمال کرتے ہیں، تو آپ صرف L2 میں ایک نیا کنٹریکٹ بناتے ہیں جو آپ کے L1 ویسٹنگ کنٹریکٹ سے منسلک ہوتا ہے، اور آپ دونوں کے درمیان GRT کو آگے پیچھے بھیج سکتے ہیں. -## شروع کرنے کے لیے مجھے اپنے ویسٹنگ کنٹریکٹ منتقل کرنے کی ضرورت کیوں ہے؟ +### شروع کرنے کے لیے مجھے اپنے ویسٹنگ کنٹریکٹ منتقل کرنے کی ضرورت کیوں ہے؟ -آپ کو ایک L2 ویسٹنگ کنٹریکٹ قائم کرنے کی ضرورت ہے تاکہ یہ اکاؤنٹ L2 پر آپ کے حصص یا ڈیلیگیشن کا مالک بن سکے۔ بصورت دیگر، آپ کے پاس ویسٹنگ کنٹریکٹ سے "فرار" ہوئے بغیر حصص/ڈیلیگیشن کو L2 میں منتقل کرنے کا کوئی طریقہ نہیں ہوگا. +آپ کو ایک L2 ویسٹنگ کنٹریکٹ قائم کرنے کی ضرورت ہے تاکہ یہ اکاؤنٹ L2 پر آپ کے سٹیک یا ڈیلیگیشن کا مالک بن سکے۔ بصورت دیگر، آپ کے پاس ویسٹنگ کنٹریکٹ سے "فرار" ہوئے بغیر سٹیک/ڈیلیگیشن کو L2 میں منتقل کرنے کا کوئی طریقہ نہیں ہوگا. -## کیا ہوتا ہے اگر میں اپنے کنٹریکٹ کو کیش آؤٹ کرنے کی کوشش کرتا ہوں جب یہ صرف جزوی طور پر ہوتا ہے؟ کیا یہ ممکن ہے؟ +### کیا ہوتا ہے اگر میں اپنے کنٹریکٹ کو کیش آؤٹ کرنے کی کوشش کرتا ہوں جب یہ صرف جزوی طور پر ہوتا ہے؟ کیا یہ ممکن ہے؟ یہ کوئی امکان نہیں ہے۔ آپ فنڈز واپس L1 میں منتقل کر سکتے ہیں اور انہیں وہاں سے نکال سکتے ہیں. -## اگر میں اپنے ویسٹنگ کا کنٹریکٹ L2 میں منتقل نہیں کرنا چاہتا تو کیا ہوگا؟ +### اگر میں اپنے ویسٹنگ کا کنٹریکٹ L2 میں منتقل نہیں کرنا چاہتا تو کیا ہوگا؟ -آپ L1 پر اسٹیک/ڈیلیگیٹنگ جاری رکھ سکتے ہیں۔ وقت گزرنے کے ساتھ، آپ Arbitrum پر پروٹوکول کے پیمانے کے طور پر وہاں انعامات کو فعال کرنے کے لیے L2 پر جانے پر غور کر سکتے ہیں۔ نوٹ کریں کہ منتقلی کے یہ ٹولز ان کنٹریکٹس کے لیے ہیں جنہیں پروٹوکول میں حصہ لینے اور ان کی نمائندگی کرنے کی اجازت ہے۔ اگر آپ کا کنٹریکٹ اسٹیکنگ یا ڈیلیگیٹنگ کی اجازت نہیں دیتا، یا منسوخی کے قابل ہے، تو پھر کوئی ٹرانسفر ٹول دستیاب نہیں ہے۔ دستیاب ہونے پر بھی آپ L1 سے اپنا GRT واپس لے سکیں گے. +آپ L1 پر سٹیک/ڈیلیگیٹنگ جاری رکھ سکتے ہیں۔ وقت گزرنے کے ساتھ، آپ Arbitrum پر پروٹوکول کے پیمانے کے طور پر وہاں انعامات کو فعال کرنے کے لیے L2 پر جانے پر غور کر سکتے ہیں۔ نوٹ کریں کہ ٹرانسفر ٹولز ان ویسٹنگ کنٹریکٹس کے لیے ہیں جنہیں پروٹوکول میں سٹیک کرنے اور ان کو ڈیلیگیٹ کرنے کی اجازت ہے۔ اگر آپ کا کنٹریکٹ سٹیکنگ یا ڈیلیگیٹنگ کی اجازت نہیں دیتا، یا منسوخی کے قابل ہے، تو پھر کوئی ٹرانسفر ٹول دستیاب نہیں ہے۔ دستیاب ہونے پر بھی آپ L1 سے اپنا GRT واپس لے سکیں گے. diff --git a/website/pages/ur/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/ur/arbitrum/l2-transfer-tools-guide.mdx index 751ba3878121..66d375d0a80a 100644 --- a/website/pages/ur/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/ur/arbitrum/l2-transfer-tools-guide.mdx @@ -2,35 +2,35 @@ title: L2 ٹرانسفر ٹولز گائڈ --- -> L2 ٹرانسفر ٹولز ابھی تک جاری نہیں کیے گئے ہیں۔ ان کے 2023 کے موسم گرما میں دستیاب ہونے کی امید ہے. - گراف نے Arbitrum One پر L2 پر منتقل ہونا آسان کر دیا ہے۔ پروٹوکول کے شریک کے لیے، تمام نیٹ ورک کے شرکاء کے لیے L2 میں منتقلی کو ہموار بنانے کے لیے L2 ٹرانسفر ٹولز کا ایک سیٹ موجود ہے۔ یہ ٹولز آپ سے اس بات پر منحصر ہوں گے کہ آپ کیا منتقل کر رہے ہیں۔ ان ٹولز کے بارے میں اکثر پوچھے جانے والے سوالات کا جواب [L2 ٹرانسفر ٹولز اکثر پوچھے گئے سوالات](/arbitrum/l2-transfer-tools-faq). اکثر پوچھے جانے والے سوالات میں ٹولز کو استعمال کرنے کے طریقے، وہ کیسے کام کرتے ہیں، اور ان کو استعمال کرتے وقت ذہن میں رکھنے والی چیزوں کی گہرائی سے وضاحت پر مشتمل ہے۔ ## اپنے سب گراف کو Arbitrum (L2) میں کیسے منتقل کریں -## آپ کے سب گرافس کو منتقل کرنے کے فوائد + + +## اپنے سب گرافس منتقل کرنے کے فوائد -گراف کی کمیونٹی اور کور ڈویلپرز پچھلے سال سے Arbitrum پر جانے کے لیے [تیار کر رہے ہیں] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)۔ Arbitrum ایک لیئر 2 یا "L2" بلاکچین، ایتھیریم سے سیکیورٹی وراثت میں ملتی ہے لیکن گیس کی بہت کم فیس فراہم کرتا ہے۔ +گراف کی کمیونٹی اور بنیادی ڈویلپرز پچھلے سال سے Arbitrum پر جانے کے [تیار کر رہے ہیں](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305)۔ Arbitrum، ایک لیئر 2 یا "L2" بلاکچین، ایتھیریم سے سیکورٹی وراثت میں ملتی ہے لیکن گیس کی فیس بہت کم فراہم کرتی ہے. -جب آپ گراف نیٹ ورک پر اپنا سب گراف شائع یا اپ گریڈ کرتے ہیں، آپ پروٹوکول پر سمارٹ کنٹریکٹ کے ساتھ تعامل کر رہے ہوتے ہیں اور اس کے لیے ایتھیریم کا استعمال کرتے ہوئے گیس کی ادائیگی کی ضرورت ہوتی ہے۔ اپنا سب گراف Arbitrum پر منتقل کر کے، آپ کے سب گراف آئندہ کسی بھی اپ ڈیٹ کے لیے گیس کی بہت کم فیس درکار ہو گی۔ کم فیس، اور حقیقت یہ ہے کہ L2 پر کیوریشن بانڈنگ منحنی خطوط فلیٹ ہیں، کیوریٹرز کے لیے آپ کے سب گراف پر کیوریٹ کرنا آسان بناتے ہیں، جس سے آپ کے سب گراف پر انڈیکسرز کے لیے انعامات بڑھ جاتے ہیں۔ یہ کم لاگت والا ماحول بھی انڈیکسرز کے لیے آپ کے سب گراف کو انڈیکس کرنا اور پیش کرنا سستا بناتا ہے۔ Arbitrum پر انڈیکسنگ کے انعامات بڑھیں گے اور آنے والے مہینوں میں ایتھیریم مین نیٹ پر کم ہوں گے، اس لیے زیادہ سے زیادہ انڈیکسرز اپنے حصص کو منتقل کر رہے ہوں گے اور L2 پر اپنی کارروائیاں ترتیب دیں گے۔ +جب آپ گراف نیٹ ورک پر اپنا سب گراف شائع یا اپ گریڈ کرتے ہیں، آپ پروٹوکول پر سمارٹ کنٹریکٹ کے ساتھ تعامل کر رہے ہوتے ہیں اور اس کے لیے ایتھیریم کا استعمال کرتے ہوئے گیس کی ادائیگی کی ضرورت ہوتی ہے۔ اپنا سب گراف Arbitrum پر منتقل کر کے، آپ کے سب گراف کی آئندہ کسی بھی اپ ڈیٹ کے لیے گیس کی بہت کم فیس درکار ہو گی۔ کم فیس، اور حقیقت یہ ہے کہ L2 پر کیوریشن بانڈنگ منحنی خطوط فلیٹ ہیں، کیوریٹرز کے لیے آپ کے سب گراف پر کیوریٹ کرنا آسان بناتے ہیں، جس سے آپ کے سب گراف پر انڈیکسرز کے لیے انعامات بڑھ جاتے ہیں۔ یہ کم لاگت والا ماحول بھی انڈیکسرز کے لیے آپ کے سب گراف کو انڈیکس کرنا اور پیش کرنا سستا بناتا ہے۔ Arbitrum پر انڈیکسنگ کے انعامات بڑھیں گے اور آنے والے مہینوں میں ایتھیریم مین نیٹ پر کم ہوں گے، اس لیے زیادہ سے زیادہ انڈیکسرز اپنے حصص کو منتقل کر رہے ہوں گے اور L2 پر اپنی کارروائیاں ترتیب دیں گے. -## یہ سمجھنا کہ سگنل کے ساتھ کیا ہوتا ہے، آپ کا L1 سب گراف اور کیوری لنکس +## یہ سمجھنا کہ سگنل کے ساتھ کیا ہوتا ہے، آپ کا L1 سب گراف اور کیوری URLs -سب گراف کو Arbitrum پر منتقل کرنا Arbitrum GRT بریک کا استعمال کرتا ہے، جو بدلے میں مقامی Arbitrum بریج استعمال کرتا ہے، سب گراف کو L2 پر بھیجنے کے لیے۔ "منتقلی" مین نیٹ پر سب گراف کو فرسودہ کر دے گی اور بریج کا استعمال کرتے ہوئے L2 پر سب گراف کو دوبارہ بنانے کے لیے معلومات بھیجے گی۔ اس میں سب گراف کے مالک کا سگنل شدہ GRT بھی شامل ہوگا، جو پل کے لیے منتقلی کو قبول کرنے کے لیے صفر سے زیادہ ہونا چاہیے۔ +سب گراف کو Arbitrum پر منتقل کرنا Arbitrum GRT بریج کا استعمال کرتا ہے، جو بدلے میں مقامی Arbitrum بریج استعمال کرتا ہے، سب گراف کو L2 پر بھیجنے کے لیے۔ "منتقلی" مین نیٹ پر سب گراف کو فرسودہ کر دے گی اور بریج کا استعمال کرتے ہوئے L2 پر سب گراف کو دوبارہ بنانے کے لیے معلومات بھیجے گی۔ اس میں سب گراف کے مالک کا سگنل شدہ GRT بھی شامل ہوگا، جو بریج کے لیے منتقلی کو قبول کرنے کے لیے صفر سے زیادہ ہونا چاہیے. -جب آپ سب گراف کو منتقل کرنے کا انتخاب کرتے ہیں، یہ تمام سب گراف کے کیوریشن سگنلز کو GRT میں تبدیل کر دے گا۔ یہ مین نیٹ پر سب گراف کو "فرسودہ" کرنے کے مترادف ہے۔ آپ کے کیوریشن کے مطابق GRT سب گراف کے ساتھ L2 کو بھیجا جائے گا، جہاں ان کا استعمال آپ کی جانب سے سگنل دینے کے لیے کیا جائے گا۔ +جب آپ سب گراف کو منتقل کرنے کا انتخاب کرتے ہیں، یہ تمام سب گراف کے کیوریشن سگنلز کو GRT میں تبدیل کر دے گا۔ یہ مین نیٹ پر سب گراف کو "فرسودہ" کرنے کے مترادف ہے۔ آپ کے کیوریشن کے مطابق GRT سب گراف کے ساتھ L2 کو بھیجا جائے گا، جہاں ان کا استعمال آپ کی جانب سے سگنل دینے کے لیے کیا جائے گا. -دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا اپنے GRT کا حصہ لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے۔ اگر ایک سب گراف کا مالک اپنا سب گراف L2 میں منتقل نہیں کرتا ہے اور اسے کنٹریکٹ کال کے ذریعے دستی طور پر فرسودہ کرتا ہے، تو کیوریٹرز کو مطلع کیا جائے گا اور وہ اپنا کیوریشن واپس لے سکیں گے۔ +دوسرے کیوریٹرز یہ انتخاب کر سکتے ہیں کہ آیا اپنے GRT کا حصہ لینا ہے، یا اسی سب گراف پر اسے L2 پر منٹ سگنل پر منتقل کرنا ہے۔ اگر ایک سب گراف کا مالک اپنا سب گراف L2 میں منتقل نہیں کرتا ہے اور اسے کنٹریکٹ کال کے ذریعے دستی طور پر فرسودہ کرتا ہے، تو کیوریٹرز کو مطلع کیا جائے گا اور وہ اپنا کیوریشن واپس لے سکیں گے. -جیسے ہی سب گراف منتقل ہو جائے گا، چونکہ تمام کیوریشن GRT میں تبدیل ہو چکی ہے، انڈیکسرزکو سب گراف کو انڈیکس کرنے کے لیے مزید انعامات نہیں ملیں گے۔ البتہ، ایسے انڈیکسرز ہوں گے جو 1) منتقل ہونے والے سب گرافس کو 24 گھنٹوں تک پیش کرتے رہیں گے، اور 2) فوری طور پر L2 پر سب گراف انڈیکسنگ شروع کر دیں گے۔ چونکہ ان انڈیکسرز کے پاس پہلے سے ہی انڈیکسڈ سب گراف موجود ہے، اس لیے سب گراف کے مطابقت پذیر ہونے کا انتظار کرنے کی ضرورت نہیں ہے، اور L2 سب گراف سے تقریباً فوراً کیوری کرنا ممکن ہو گا۔ +جیسے ہی سب گراف منتقل ہو جائے گا، چونکہ تمام کیوریشن GRT میں تبدیل ہو چکی ہے، انڈیکسر کو سب گراف کو انڈیکس کرنے کے لیے مزید انعامات نہیں ملیں گے۔ البتہ، ایسے انڈیکسرز ہوں گے جو 1) منتقل ہونے والے سب گرافس کو 24 گھنٹوں تک پیش کرتے رہیں گے، اور 2) فوری طور پر L2 پر سب گراف انڈیکسنگ شروع کر دیں گے۔ چونکہ ان کے انڈیکسرز کے پاس پہلے سے ہی انڈیکسڈ سب گراف موجود ہے، اس لیے سب گراف کے مطابقت پذیر ہونے کا انتظار کرنے کی ضرورت نہیں ہے، اور L2 سب گراف سے تقریبآٓ فورآٓ کیوری کرنا ممکن ہو گا. -L2 سب گراف سے متعلق سوالات ایک مختلف لنک پر کرنے کی ضرورت ہوگی (`arbitrum-gateway.thegraph.com` پر)، لیکن L1 لنک کم از کم 48 گھنٹے تک کام کرتا رہے گا۔ اس کے بعد، L1 گیٹ وے کیوریز کو L2 گیٹ وے (کچھ وقت کے لیے) پر بھیجے گا، لیکن اس سے تاخیر میں اضافہ ہو جائے گا، اس لیے یہ تجویز کیا جاتا ہے کہ آپ اپنے تمام کیوریز کو جلد از جلد نئے لنک میں تبدیل کر دیں۔ +L2 سب گراف سے متعلق کیوریز ایک مختلف لنک پر کرنے کی ضرورت ہوگی (`arbitrum-gateway.thegraph.com` پر)، لیکن L1 لنک کم از کم 48 گھنٹے تک کام کرتا رہے گا۔ اس کے بعد، L1 گیٹ وے کیوریز کو L2 گیٹ وے (کچھ وقت کے لیے) پر بھیجے گا، لیکن اس سے تاخیر میں اضافہ ہو جائے گا، اس لیے یہ تجویز کیا جاتا ہے کہ آپ اپنے تمام کیوریز کو جلد از جلد نئے لنک میں تبدیل کر دیں۔ -## اپنے L2 والیٹ کا انتخاب کرنا +## اپنا L2 والیٹ منتخب کرنا -جب آپ نے اپنا سب گراف مین نیٹ پر شائع کر دیں، آپ نے سب گراف بنانے کے لیے کنیکٹڈ والیٹ کا استعمال کیا, اور یہ والیٹ NFT کا مالک ہے جو اس سب گراف کی نمائندگی کرتا ہے اور آپ کو اپ ڈیٹس شائع کرنے کی اجازت دیتا ہے۔ +کب آپ اپنا سب گراف مین نیٹ پر شائع کرتے ہیں، آپ نے اپنا سب گراف بنانے کے لیے کنیکٹڈ والیٹ کا استعمال کیا، اور یہ والیٹ NFT کا مالک ہے جو اس سب گراف کی نمائندگی کرتا ہے اور آپ کو اپ ڈیٹس شائع کرنے کی اجازت دیتا ہے۔ جب سب گراف کو Arbitrum پر منتقل کر رہے ہوں، آپ مختلف والیٹ استعمال کر سکتے ہیں جو L2 پر اس سب گراف NFT کا مالک ہو گا۔ @@ -42,25 +42,25 @@ L2 سب گراف سے متعلق سوالات ایک مختلف لنک پر کر ## منتقلی کی تیاری: کچھ ایتھیریم بریج کرنا -سب گراف کو منتقل کرنا بریج کے ذریعے ٹرانزیکشن بھیجنا، اور پھر Arbitrum پر ایک اور ٹرانزیکشن کو انجام دینا۔ پہلی ٹرانزیکشن مین نیٹ پر ایتھیریم کا استعمال کرتی ہے، اور L2 پر پیغام موصول ہونے پر گیس کی ادائیگی کے لیے کچھ ایتھیریم شامل کرتا ہے۔ تاہم، اگر یہ گیس ناکافی ہے، تو آپ کو ٹرانزیکشن کی دوبارہ کوشش کرنی ہوگی اور براہ راست L2 پر گیس کی ادائیگی کرنی ہوگی (یہ ذیل میں "مرحلہ 3: منتقلی کی تصدیق" ہے)۔ یہ مرحلہ **منتقلی شروع کرنے کے 7 دنوں کے اندر انجام دیا جانا چاہیے**۔ مزید یہ کہ، دوسری ٹرانزیکشن ("مرحلہ 4: L2 پر منتقلی کو ختم کرنا") براہ راست Arbitrum پر کیا جائے گا۔ ان وجوہات کی بناء پر، آپ کو Arbitrum والیٹ پر کچھ ایتھیریم کی ضرورت ہوگی۔ اگر آپ ملٹی سگ یا سمارٹ کنٹریکٹ اکاؤنٹ استعمال کر رہے ہیں، تو ایتھیریم کو باقاعدہ (EOA) والیٹ میں ہونا چاہیے جسے آپ ٹرانزیکشن کو انجام دینے کے لیے استعمال کر رہے ہیں، نہ کہ ملٹی سگ والیٹ پر۔ +سب گراف کی منتقلی میں بریج کے ذریعے ٹرانزیکشن بھیجنا شامل ہے، اور پھر Arbitrum پر ایک اور ٹرانزیکشن کو انجام دینا۔ پہلی ٹرانزیکشن مین نیٹ پر ایتھیریم کا استعمال کرتی ہے، اور L2 پر پیغام موصول ہونے پر گیس کی ادائیگی کے لیے کچھ ایتھیریم شامل کرتا ہے۔ تاہم، اگر یہ گیس ناکافی ہے، تو آپ کو ٹرانزیکشن کی دوبارہ کوشش کرنی ہوگی اور براہ راست L2 پر گیس کی ادائیگی کرنی ہوگی (یہ ذیل میں "مرحلہ 3: منتقلی کی تصدیق" ہے)۔ یہ مرحلہ **منتقلی شروع کرنے کے 7 دنوں کے اندر انجام دیا جانا چاہیے**۔ مزید یہ کہ، دوسری ٹرانزیکشن ("مرحلہ 4: L2 پر منتقلی کو ختم کرنا") براہ راست Arbitrum پر کیا جائے گا۔ ان وجوہات کی بناء پر، آپ کو Arbitrum والیٹ پر کچھ ایتھیریم کی ضرورت ہوگی۔ اگر آپ ملٹی سگ یا سمارٹ کنٹریکٹ اکاؤنٹ استعمال کر رہے ہیں، تو ایتھیریم کو باقاعدہ (EOA) والیٹ میں ہونا چاہیے جسے آپ ٹرانزیکشن کو انجام دینے کے لیے استعمال کر رہے ہیں، نہ کہ ملٹی سگ والیٹ پر۔ -آپ ایتھیریم کو کچھ ایکسچینجیز سے خرید سکتے ہیں اور سیدھا Arbitrum میں مگوا سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر Arbitrum بریج کا استعمال کرتے ہوئے کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io)۔ چونکہ Arbitrum پر گیس فیس کم ہوتی ہے، آپ کو صرف چھوٹی سی مقدار کی ضرورت پڑے گی۔ یہ تجویز کیا جاتا ہے کہ آپ اپنی ٹرانزیکشن کی منظوری کے لیے کم حد (0. 01 ایتھیریم) سے شروع کریں۔ +آپ کچھ ایکسچینجیز سے ایتھیریم خرید سکتے ہیں اور سیدھا اسے Arbitrum میں مگوا سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر Arbitrum بریج کا استعمال کرتے ہوئے کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io)۔ چونکہ Arbitrum پر گیس فیس کم ہوتے ہے، آپ کو صرف چھوٹی سی مقدار کی ضرورت پڑے گی۔ یہ تجویز کیا جاتا ہے کہ آپ اپنی ٹرانزیکشن کی منظوری کے لیے کم حد (مثال کے طور پر 0.01 ایتھیریم) سے شروع کریں۔ ## سب گراف ٹرانسفر ٹول تلاش کرنا آپ L2 ٹرانسفر ٹول تلاش کر سکتے ہیں جب آپ سب گراف سٹوڈیو پر اپنا سب گراف کا پیج دیکھ رہے ہوں گے۔ -![transfer tool](/img/L2-transfer-tool1.png) +![ٹرانسفر ٹول](/img/L2-transfer-tool1.png) -یہ ایکسپلورر پر بھی دستیاب ہے اگر آپ اس والیٹ سے کنیکٹڈ ہیں جس کے پاس سب گراف ہے اور ایکسپلورر پر اس سب گراف کے صفحہ پر: +یہ ایکسپلورر پر بھی دستیاب ہے اگر آپ اس والیٹ سے کنیکٹڈ ہیں جس کے پاس سب گراف ہے اور ایکسپلورر پر اس سب گراف کے پیج پر: -![Transferring to L2](/img/transferToL2.png) +![L2 پر منتقل کرنا](/img/transferToL2.png) -ٹرانسفر ٹو L2 بٹن پر کلک کرنے سے ٹرانسفر ٹول کھل جائے گا جہاں آپ منتقلی کا عمل شروع کر سکتے ہیں۔ +L2 پر منتقل کرنے کے بٹن پر کلک کرنے سے ٹرانسفر ٹول کھل جائے گا جہاں آپ منتقلی کا عمل شروع کر سکتے ہیں۔ -## مرحلہ 1: منتقلی کا عمل شروع کرنا +## مرحلہ 1: منتقلی شروع کرنا -منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہوگا کہ L2 پر کون سا ایڈریس سب گراف کا مالک ہوگا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ پرزور مشورہ دیا جاتا ہے کہ Arbitrum پر پہلے سے ہی گیس کے لیے کچھ ایتھیریم رکھیں (دیکھیں "منتقلی کی تیاری: کچھ ایتھیریم بریج کرنا" اوپر)۔ +منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہو گا کہ L2 پر کون سا ایڈریس سب گراف کا مالک ہو گا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ پرزور مشورہ دیا جاتا ہے کہ Arbitrum پر پہلے سے ہی گیس کے لیے کچھ ایتھیریم رکھیں (دیکھیں "منتقلی کی تیاری: کچھ ایتھیریم بریج کرنا" اوپر)۔ یہ بھی نوٹ کریں کہ سب گراف کی منتقلی کے لیے سب گراف پر اسی اکاؤنٹ کے ساتھ سگنل کی غیر صفر مقدار کی ضرورت ہوتی ہے جس کے پاس سب گراف ہے۔ اگر آپ نے سب گراف پر اشارہ نہیں کیا ہے تو آپ کو تھوڑا سا کیوریشن شامل کرنا پڑے گا (ایک چھوٹی سی رقم جیسے ایک GRT شامل کرنا کافی ہوگا)۔ @@ -68,98 +68,98 @@ L2 سب گراف سے متعلق سوالات ایک مختلف لنک پر کر اگر آپ اس قدم پر عمل کرتے ہیں، تو **یقینی بنائیں کہ آپ 7 دنوں سے بھی کم وقت میں مرحلہ 3 مکمل کرنے تک آگے بڑھیں، ورنہ سب گراف اور آپ کا سگنل GRT ضائع ہو جائے گا۔** یہ اس وجہ سے ہے کہ L1-L2 پیغام رسانی Arbitrum پر کیسے کام کرتی ہے: پیغامات جو بریج کے ذریعے بھیجے گئے "دوبارہ کوشش کے قابل ٹکٹ" ہیں جن پر عمل درآمد 7 دنوں کے اندر ہونا ضروری ہے، اور اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو ابتدائی عمل درآمد کے لیے دوبارہ کوشش کی ضرورت پڑ سکتی ہے۔ -![Start the trnasfer to L2](/img/startTransferL2.png) +![L2 میں منتقلی شروع کریں](/img/startTransferL2.png) ## مرحلہ 2: سب گراف کے L2 تک پہنچنے کا انتظار کرنا -منتقلی شروع کرنے کے بعد، وہ پیغام جو آپ کا L1 سب گراف L2 کو بھیجتا ہے اسے آربٹرم بریج کے ذریعے پھیلانا چاہیے۔ اس میں تقریباً 20 منٹ لگتے ہیں (بریج مین نیٹ بلاک کا انتظار کرتا ہے جس میں ٹرانزیکشن کو ممکنہ چین کی بحالی سے "محفوظ" رکھا جاتا ہے)۔ +منتقلی شروع کرنے بعد، وہ پیغام جو آپ کا L1 سب گراف L2 کو بھیجتا ہے اسے Arbitrum بریج کے ذریعے پھیلانا چاہیے۔ اس میں تقریبآٓ 20 منٹ لگتے ہیں (بریج مین نیٹ بلاک کا انتظار کرتا ہے جس میں ٹرانزیکشن کو ممکنہ چین کی بحالی سے "محفوظ" رکھا جاتا ہے)۔ -انتظار کا وقت ختم ہونے کے بعد، Arbitrum L2 کنٹریکٹس پر منتقلی کو خود کار طریقے سے انجام دینے کی کوشش کرے گا۔ +انتظار کا وقت ختم ہونے کے بعد، Arbitrum L2 کنٹریکٹس پر منتقلی کو خودکار طریقے سے انجام دینے کی کوشش کرے گا۔ -![Wait screen](/img/screenshotOfWaitScreenL2.png) +![ویٹ سکرین](/img/screenshotOfWaitScreenL2.png) ## مرحلہ 3: منتقلی کی تصدیق کرنا زیادہ تر معاملات میں، یہ مرحلہ خود بخود عمل میں آجائے گا کیونکہ مرحلہ 1 میں شامل L2 گیس اس ٹرانزیکشن کو انجام دینے کے لیے کافی ہونی چاہیے جو Arbitrum کنٹریکٹس پر سب گراف وصول کرتی ہے۔ تاہم، بعض صورتوں میں، یہ ممکن ہے کہ Arbitrum پر گیس کی قیمتوں میں اضافہ اس خود کار طریقے سے عمل کو ناکام بنادے۔ اس صورت میں، "ٹکٹ" جو آپ کے سب گراف کو L2 پر بھیجتا ہے زیر التواء رہے گا اور اسے 7 دنوں کے اندر دوبارہ کوشش کرنے کی ضرورت ہوگی۔ -اس صورت میں، آپ کو L2 والیٹ کنیکٹ کرنے کی ضرورت پڑے گی جس میں Arbitrum میں تھوڑا ایتھیریم موجود ہو، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں، اور "کنفرم ٹرانسفر" کو ٹرانزیکشن دہرانے کے لیے دبائیں۔ +اس صورت میں، آپ کو L2 والیٹ کنیکٹ کرنے کی ضرورت پڑے گی جس میں Arbitrum میں تھوڑا ایتھیریم موجود ہو، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں، اور "کنفرم ٹرانسفر" کو ٹرانزیکشن دہرانے کے لیے دبائیں. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +![L2 میں منتقلی کی تصدیق کریں](/img/confirmTransferToL2.png) -## مرحلہ 4: L2 پر منتقلی ختم کرنا +## مرحلہ 4: L2 پر منتقلی ختم کریں -اس موقع پر، آپ کا سب گراف اور GRT آپ کے Arbitrum میں موصول ہو چکے ہیں، لیکن سب گراف ابھی تک شائع نہیں ہوا۔ آپ کو L2 والیٹ کا استعمال کرتے ہوئے منسلک کرنے کی ضرورت ہوگی جسے آپ نے وصول کرنے والے والیٹ کے طور پر منتخب کیا ہے، اپنے والیٹ نیٹ ورک کو Arbitrum میں تبدیل کریں، اور "پبلش سب گراف" پر کلک کریں۔ +اس موقع پر، آپ کا سب گراف اور GRT آپ کے Arbitrum میں موصول ہو چکے ہیں، لیکن سب گراف ابھی تک شائع نہیں ہوا۔ آپ کو L2 والیٹ کا استعمال کرتے ہوئے منسلک کرنے کی ضرورت ہو گی جسے آپ نے وصول کرنے والے والیٹ کے طور پر منتخب کیا ہے، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں ، اور "سب گراف شائع کریں" پر کلک کریں۔ -![Publish the subgraph](/img/publishSubgraphL2TransferTools.png) +![سب گراف شائع کریں](/img/publishSubgraphL2TransferTools.png) ![سب گراف کے شائع ہونے کا انتظار کریں](/img/waitForSubgraphToPublishL2TransferTools.png) -یہ سب گراف کو شائع کرے گا تاکہ انڈیکسرز جو Arbitrum پر کام کر رہے ہیں اسے پیش کرنا شروع کر سکیں۔ یہ GRT کا استعمال کرتے ہوئے کیوریشن سگنل بھی دے گا جو L1 سے منتقل کیا گیا تھا۔ +یہ سب گراف کو شائع کرے گا تا کہ انڈیکسرز جو Arbitrum پر کام کر رہے ہیں اسے پیش کرنا شروع کر سکیں۔ یہ GRT کا استعمال کرتے ہوئے کیوریشن سگنل بھی دے گا جو L1 سے منتقل کیا گیا ہے. ## مرحلہ 5: کیوری لنک اپ ڈیٹ کریں -آپ کا سب گراف کامیابی کے ساتھ Arbitrum پر منتقل کر دیا گیا ہے! سب گراف کو کیوری کرنے کے لیے، نیا لنک ہو گا: +آپ کا سب گراف کامیابی کے ساتھ Arbitrum پر منتقل کر دیا گیا ہے! سب گراف کو کیوری کرنے کے لیے، نیا لنگ ہو گا: `https://arbitrum-gateway.thegraph.com/api/[api-key]/subgraphs/id/[l2-subgraph-id]` -نوٹ کریں کہ Arbitrum پر سب گراف ID آپ کے مین نیٹ پر موجود ایک سے مختلف ہوگی، لیکن آپ اسے ہمیشہ ایکسپلورر یا سٹوڈیو پر تلاش کر سکتے ہیں۔ جیسا کہ اوپر بتایا گیا ہے (دیکھیں "سگنل کے ساتھ کیا ہوتا ہے، آپ کے L1 سب گراف اور کیوری والے لنکس") پرانا L1 لنک تھوڑی دیر کے لیے سپورٹ کیا جائے گا، لیکن آپ کو اپنی کیوریز کو نئے ایڈریس پر تبدیل کر دینا چاہیے جیسے ہی سب گراف کی مطابقت پذیری L2 پر ہو جائے گی۔ +نوٹ کریں کہ Arbitrum پر سب گراف ID آپ کے مین نیٹ پر موجود ایک سے مختلف ہوگی، لیکن آپ اسے ہمیشہ ایکسپلورر یا سٹوڈیو پر تلاش کر سکتے ہیں۔ جیسا کہ اوپر بتایا گیا ہے (دیکھیں "سگنل کے ساتھ کیا ہوتا ہے، آپ کے L1 سب گراف اور کیوری والے لنکس") پرانا L1 لنک تھوڑی دیر کے لیے سپورٹ کیا جائے گا، لیکن آپ کو اپنی کیوریز کو نئے ایڈریس پر تبدیل کر دینا چاہیے جیسے ہی سب گراف کی مطابقت پذیری L2 پر ہو جائے گی. -## اپنی کیوریشن Arbitrum (L2) پر کیسے منتقل کی جائے +## اپنی کیوریشن کو کیسے Arbitrum (L2) پر منتقل کیا جائے ## یہ سمجھنا کہ L2 میں سب گراف کی منتقلی پر کیوریشن کا کیا ہوتا ہے -جب سب گراف کا مالک سب گراف کو Arbitrum پر منتقل کرتا ہے، سب گراف کے تمام سگنلز اسی وقت GRT میں تبدیل ہو جاتے ہیں۔ یہ "آٹو مائیگریٹڈ" سگنل پر لاگو ہوتا ہے، یعنی وہ سگنل جو سب گراف ورژن یا تعیناتی کے لیے مخصوص نہیں ہے لیکن یہ سب گراف کے تازہ ترین ورژن کی پیروی کرتا ہے۔ +جب سب گراف کا مالک سب گراف کو Arbitrum پر منتقل کرتا ہے، سب گراف کے تمام سگنلز اسی وقت GRT میں تبدیل ہو جاتے ہیں۔ یہ "آٹو مائیگریٹڈ" پر لاگو ہوتا ہے، یعنی وہ سگنل جو سب گراف ورزن یا تعیناتی کے لیے مخصوص نہیں ہے لیکن یہ سب گراف کے تازہ ترین ورزن کی پیروی کرتا ہے۔ سگنل سے GRT میں یہ تبدیلی وہی ہے جیسا کہ اگر سب گراف کے مالک نے L1 میں سب گراف کو فرسودہ کیا تو کیا ہوگا۔ جب سب گراف کو فرسودہ یا منتقل کیا جاتا ہے، تو تمام کیوریشن سگنل بیک وقت "برن" ہو جاتے ہیں (کیوریشن بانڈنگ کریو کا استعمال کرتے ہوئے) اور نتیجے میں GRT سمارٹ کنٹریکٹ GNS کے پاس ہوتا ہے (یہ وہ کنٹریکٹ ہے جو سب گراف اپ گریڈ اور خودکار منتقلی سگنل کو ہینڈل کرتا ہے)۔ اس لیے اس سب گراف پر ہر کیوریٹر کا دعویٰ ہے کہ وہ سب گراف کے حصص کی مقدار کے متناسب GRT پر ہے۔ سب گراف کے مالک کے مطابق ان GRT کا ایک حصہ سب گراف کے ساتھ L2 کو بھیجا جاتا ہے۔ -اس مقام پر، کیوریٹڈ GRT مزید کیوری کی فیس جمع نہیں کرے گا، لہذا کیوریٹرز اپنا GRT واپس لینے یا اسے L2 پر اسی سب گراف میں منتقل کرنے کا انتخاب کر سکتے ہیں، جہاں اسے نئے کیوریشن سگنل کے لیے استعمال کیا جا سکتا ہے۔ ایسا کرنے میں کوئی جلدی نہیں ہے کیونکہ GRT غیر معینہ مدت کے لیے مدد کی جا سکتی ہے اور ہر کسی کو ان کے حصص کے متناسب رقم ملتی ہے، چاہے وہ ایسا کرتے ہی کیوں نہ ہوں۔ +اس مقام پر، کیویرٹڈ GRT مزید کیوری کی فیس جمع نہیں کرے گا، لہذا کیوریٹرز اپنا GRT واپس لینے یا اسے L2 پر اسی سب گراف میں منتقل کرنے کا انتخاب کر سکتے ہیں، جہاں اسے نئے کیویریشن سگنل کے لیے استعمال کیا جا سکتا یے۔ ایسا کرنے میں کوئی جلدی نہیں ہے کیونکہ GRT غیر معینہ مدت کے لیے مدد کی جا سکتی ہے اور ہر کسی کو اس کے حصص کے متناسب رقم ملتی ہے، چاہے وہ ایسا کرتے ہی کیوں نہ ہوں۔ -## اپنے L2 والیٹ کا انتخاب کرنا +## اپنا L2 والیٹ منتخب کرنا -اگر آپ اپنے کیوریٹڈ GRT کو L2 پر منتقل کرنے کا فیصلہ کرتے ہیں، آپ الگ والیٹ کا انتخاب کر سکتے ہیں جو کیوریشن سگنل کا مالک ہوگا۔ +اگر آپ اپنے کیوریٹڈ GRT کو L2 پر منتقل کرنے کا فیصلہ کرتے ہیں، آپ الگ والیٹ کا انتخاب کر سکتے ہیں جو کیوریشن سگنل کا مالک ہو گا۔ اگر آپ میٹا ماسک کی طرح "عام" والیٹ استعمال کر رہے ہیں (ایک بیرونی ملکیتی اکاؤنٹ یا EOA، یعنی ایک والیٹ جو سمارٹ کنٹریکٹ نہیں ہے)، تو یہ اختیاری اور یہ نصیحت کی جاتی ہے کہ وہی کیوریٹر ایڈریس استعمال کیا جائے جو L1 میں استعمال کیا تھا۔ -اگر آپ سمارٹ کنٹریکٹ والیٹ استعمال کر رہے ہیں، جیسے ملٹی سگ(مثال کے طور پر سیف)، پھر کوئی الک والیٹ استعمال کرنا ضروری ہو گا، کیوں کے زیادہ امکان ہے کہ یہ اکاؤنٹ صرف مین نیٹ پر ہے اور آپ اس والیٹ کا استعمال کرتے ہوئے Arbitrum پر کوئی ٹرانزیکشن نہیں کر پائیں گے۔ اگر آپ سمارٹ کنٹریکٹ یا ملٹی سگ کا استعمال جاری رکھنا چاہتے ہیں، Arbitrum پر نیا والیٹ بنائیں اور اس کا ایڈریس L2 وصول کرنے والے والیٹ ایڈریس کے طور پر استعمال کریں۔ +اگر آپ سمارٹ کنٹریکٹ والیٹ استعمال کر رہے ہیں، جیسے ملٹی سگ(مثال کے طور پر سیف)، پھر کوئی الگ والیٹ استعمال کرنا ضروری ہو گا، کیوں کے زیادہ امکان ہے کہ یہ اکاؤنٹ صرف مین نیٹ پر ہے اور آپ اس والیٹ کا استعمال کرتے ہوئے Arbitrum پر کوئی ٹرانزیکشن نہیں کر پائیں گے۔ اگر آپ سمارٹ کنٹریکٹ یا ملٹی سگ کا استعمال جاری رکھنا چاہتے ہیں، Arbitrum پر نیا والیٹ بنائیں اور اس کا ایڈریس L2 وصول کرنے والے والیٹ ایڈریس کے طور پر استعمال کریں۔ -**یہ بہت ضروری ہے کہ آپ وہی والیٹ استعمال کریں جس پر آپ کا کنٹرول ہے، اور جو Arbitrum پر ٹرانزیکشنز کر پائیں، کیونکہ بصورت دیگر کیوریشن ضائع ہو جائے گی اور بازیافت نہیں ہو سکتی۔** +**یہ بہت ضروری ہے کہ آپ وہی والیٹ استعمال کریں جس پر آپ کا کنٹرول ہے، اور جو Arbitrum پر ٹرانزیکشن کر پائیں، کیونکہ بصورت دیگر کیوریشن ضائع ہو جائے گی اور بازیافت نہیں ہو سکتی۔** -## کیوریشن کو L2 پر بھیجنا: مرحلہ 1 +## کیوریشن سگنل L2 پر بھیجنا: مرحلہ 1 -منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہوگا کہ L2 پر کیوریشن کا کون سا ایڈریس ہوگا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ تجویز کی جاتی ہے کہ اگر آپ کو L2 پر پیغام کے نفاذ کی دوبارہ کوشش کرنے کی ضرورت ہو تو Arbitrum پر پہلے سے ہی بریج شدہ گیس کے لیے کچھ ایتھیریم رکھیں۔ آپ کچھ ایکسچینجز پر ایتھیریم خرید سکتے ہیں اور اسے براہ راست Arbitrum میں واپس لے سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر بھیجنے کے لیے Arbitrum برج کا استعمال کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io) - چونکہ Arbitrum پر گیس کی فیسیں بہت کم ہیں، آپ کو صرف تھوڑی سی رقم کی ضرورت ہوگی، جیسے۔ 0.01 ایتھیریم شاید کافی سے زیادہ ہو گا۔ +منتقلی شروع کرنے سے پہلے، آپ کو یہ فیصلہ کرنا ہوگا کہ L2 پر کیوریشن کا کون سا ایڈریس ہوگا (اوپر "اپنے L2 والیٹ کا انتخاب" دیکھیں)، اور یہ تجویز کی جاتی ہے کہ اگر آپ کو L2 پر پیغام کے نفاذ کی دوبارہ کوشش کرنے کی ضرورت ہو تو Arbitrum پر پہلے سے ہی بریج شدہ گیس کے لیے کچھ ایتھیریم رکھیں۔ آپ کچھ ایکسچینجز پر ایتھیریم خرید سکتے ہیں اور اسے براہ راست Arbitrum میں واپس لے سکتے ہیں، یا آپ ایتھیریم کو مین نیٹ والیٹ سے L2 پر بھیجنے کے لیے Arbitrum بریج کا استعمال کر سکتے ہیں: [bridge.arbitrum.io](http://bridge.arbitrum.io) - چونکہ Arbitrum پر گیس کی فیس بہت کم ہیں، آپ کو صرف تھوڑی سی رقم کی ضرورت ہوگی، جیسے۔ 0.01 ایتھیریم شاید کافی سے زیادہ ہو گا۔ -اگر جو سب گراف آپ کیوریٹ کر رہے ہیں L2 پر منتقل ہو گیا ہے، آپ ایکسپلورر پر ایک میسج دیکھیں گے جو بتا رہا ہو گا کہ آپ منتقل ہوئے سب گراف پر کیوریٹ کر رہے ہیں۔ +اگر جو سب گراف آپ کیویرٹ کر رہے ہیں L2 پر منتقل ہو گیا ہے، آپ ایکسپلورر پر ایک میسج دیکھیں گے جو بتا رہا ہو گا کہ آپ منتقل ہوئے سب گراف پر کیوریٹ کر رہے ہیں۔ -سب گراف کے پیج پر دیکھتے ہوئے، آپ کیوریشن واپس لینے یا منتقل کرنے کا انتخاب کر سکتے ہیں۔ "Arbitrum پر سگنل ٹرانسفر کریں" پر کلک کرنے سے ٹرانسفر ٹول کھل جائے گا۔ +سب گراف پیج پر دیکھتے ہوئے، آپ کیوریشن واپس لینے یا منتقل کرنے کا انتخاب کر سکتے ہیں۔ "Arbitrum پر سگنل منتقل کریں" پر کلک کرنے سے ٹرانسفر ٹول کھل جائے گا۔ -![Transfer signal](/img/transferSignalL2TransferTools.png) +![ٹرانسفر سگنل](/img/transferSignalL2TransferTools.png) ٹرانسفر ٹول کھولنے کے بعد، اگر آپ کے پاس نہیں ہے تو آپ کو اپنے والیٹ میں کچھ ایتھیریم شامل کرنے کے لیے کہا جا سکتا ہے۔ پھر آپ L2 والیٹ ایڈریس کو "ریسیونگ والیٹ ایڈریس" فیلڈ میں داخل کرنے کے قابل ہو جائیں گے - **یقینی بنائیں کہ آپ نے یہاں درست ایڈریس درج کیا ہے**۔ ٹرانسفر سگنل پر کلک کرنے سے آپ کو اپنے والیٹ پر ٹرانزیکشن کرنے کا اشارہ ملے گا (نوٹ کریں کہ L2 گیس کی ادائیگی کے لیے کچھ ایتھیریم ویلیو شامل ہے)؛ یہ منتقلی شروع کرے گا. -اگر آپ اس قدم پر عمل کرتے ہیں، تو **یقینی بنائیں کہ آپ 7 دنوں سے بھی کم وقت میں مرحلہ 3 مکمل کرنے تک آگے بڑھیں گے، ورنہ آپ کا سگنل GRT ضائع ہو جائے گا۔** یہ Arbitrum پر L1-L2 پیغام رسانی کے کام کرنے کے طریقے کی وجہ سے ہے: پیغامات جو اس کے ذریعے بھیجے جاتے ہیں۔ بریج "دوبارہ کوشش کے قابل ٹکٹ" ہیں جن کو 7 دنوں کے اندر مکمل کرنا ضروری ہے، اور اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو ابتدائی عملدرآمد کے لیے دوبارہ کوشش کی ضرورت پڑ سکتی ہے۔ +اگر آپ اس قدم پر عمل کرتے ہیں، **یقینی بنائیں کہ آپ 7 دنوں سے بھی کم وقت میں مرحلہ 3 مکمل کرنے تک آگے بڑھیں گے، ورنہ آپ کے سگنل GRT ضائع ہو جائیں گے۔**یہ Arbitrum پر L1-L2 پیغام رسائی کے کام کرنے کے طریقے کی وجہ سے ہے: پیغامات جو اس کے ذریعے بھیجے جاتے ہیں۔ بریج "دوبارہ کوشش کے قابل ٹکٹس" ہیں جن کو 7 دنوں کے اندر مکمل کرنا ضروری ہے، اور اگر Arbitrum پر گیس کی قیمت میں اضافہ ہوتا ہے تو ابتدائی عملدرآمد کے لیے دوبارہ کوشش کی ضرورت پڑ سکتی ہے۔ ## L2 پر کیوریشن بھیجنا: مرحلہ 2 منتقلی شروع کرنا: -![Send signal to L2](/img/sendingCurationToL2Step2First.png) +![L2 پر سگنل بھیجیں](/img/sendingCurationToL2Step2First.png) منتقلی شروع کرنے کے بعد، وہ پیغام جو آپ کا L1 کیوریشن L2 کو بھیجتا ہے اسے Arbitrum بریج کے ذریعے پھیلانا چاہیے۔ اس کو تقریبآٓ 20 منٹ لگیں گے(بریج مین نیٹ بلاک کا انتظار کرتا ہے جس میں ٹرانزیکشن کو ممکنہ چین کی بحالی سے "محفوظ" رکھا جاتا ہے)۔ -انتظار کا وقت ختم ہونے کے بعد، Arbitrum L2 کنٹریکٹس پر منتقلی کو خود کار طریقے سے انجام دینے کی کوشش کرے گا۔ +انتظار کا وقت ختم ہونے کے بعد، Arbitrum L2 کنٹریکٹس پر منتقلی کو خودکار طریقے سے انجام دینے کی کوشش کرے گا۔ -!L2 [پر کیوریشن سگنل بھیجنا](/img/sendingCurationToL2Step2Second.png) +![L2 پر کیوریشن سگنل بھیجنا](/img/sendingCurationToL2Step2Second.png) ## L2 پر کیوریشن بھیجنا: مرحلہ 3 زیادہ تر معاملات میں، یہ مرحلہ خود بخود عمل میں آجائے گا کیونکہ مرحلہ 1 میں شامل L2 گیس اس ٹرانزیکشن کو انجام دینے کے لیے کافی ہونی چاہیے جو Arbitrum کنٹریکٹس پر کیوریشن حاصل کرتی ہے۔ تاہم، بعض صورتوں میں، یہ ممکن ہے کہ Arbitrum پر گیس کی قیمتوں میں اضافہ اس خود کار طریقے سے عمل کو ناکام بنادے۔ اس صورت میں، "ٹکٹ" جو آپ کی کیوریشن L2 کو بھیجتی ہے وہ زیر التواء رہے گی اور 7 دنوں کے اندر دوبارہ کوشش کی ضرورت ہوگی۔ -اس صورت میں، آپ کو L2 والیٹ کنیکٹ کرنے کی ضرورت پڑے گی جس میں Arbitrum میں تھوڑا ایتھیریم موجود ہو، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں، اور "کنفرم ٹرانسفر" کو ٹرانزیکشن دہرانے کے لیے دبائیں۔ +اس صورت میں، آپ کو L2 والیٹ کنیکٹ کرنے کی ضرورت پڑے گی جس میں Arbitrum میں تھوڑا ایتھیریم موجود ہو، اپنے والیٹ نیٹ ورک کو Arbitrum میں سویچ کریں، اور "کنفرم ٹرانسفر" کو ٹرانزیکشن دہرانے کے لیے دبائیں. -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) +![L2 پر سگنل بھیجیں](/img/L2TransferToolsFinalCurationImage.png) ## L1 پر اپنی کیوریشن واپس لینا -اگر آپ اپنے GRT کو L2 پر نہیں بھیجنا پسند کرتے ہیں، یا آپ GRT کو دستی طور پر بریج کرنا چاہتے ہیں، تو آپ L1 پر اپنا کیوریٹ شدہ GRT واپس لے سکتے ہیں۔ سب گراف کے صفحے پر بینر پر، "سگنل واپس لیں" کا انتخاب کریں اور ٹرانزیکشن کی تصدیق کریں۔ GRT آپ کے کیوریٹر کے ایڈریس پر بھیج دیا جائے گا۔ +اگر آپ اپنے GRT کو L2 پر نہیں بھیجنا پسند کرتے ہیں، یا آپ GRT کو دستی طور پر بریج کرنا چاہتے ہیں، تو آپ L1 پر اپنا کیوریٹ شدہ GRT واپس لے سکتے ہیں۔ سب گراف کے پیج پر بینر پر، "سگنل واپس لیں" کا انتخاب کریں اور ٹرانزیکشن کی تصدیق کریں۔ GRT آپ کے کیوریٹر کے ایڈریس پر بھیج دیا جائے گا۔ diff --git a/website/pages/ur/billing.mdx b/website/pages/ur/billing.mdx index 4cacebd43194..4c0042614f16 100644 --- a/website/pages/ur/billing.mdx +++ b/website/pages/ur/billing.mdx @@ -37,8 +37,12 @@ title: بلنگ ### کرپٹو والیٹ کا استعمال کرتے ہوئے GRT شامل کرنا + + > یہ سیکشن یہ فرض کرتے ہوئے لکھا گیا ہے کہ آپ کے کرپٹو والیٹ میں پہلے سے ہی GRT موجود ہے، اور آپ پر ہیں۔ اگر آپ ایتھریم مینیٹ کے پاس GRT نہیں ہے، تو آپ GRT حاصل کرنے کا طریقہ [یہاں](#getting-grt) سیکھ سکتے ہیں. +کرپٹو والیٹ کا استعمال کرتے ہوئے اپنے بلنگ بیلنس میں GRT شامل کرنے کے ویڈیو واک تھرو کے لیے، یہ [ویڈیو](https://youtu.be/4Bw2sh0FxCg) دیکھیں۔ + 1. [سب گراف اسٹوڈیو بلنگ پیج](https://thegraph.com/studio/billing/) پر جائیں. 2. صفحہ کے اوپری دائیں کونے میں "کنیکٹ والیٹ" بٹن پر کلک کریں۔ آپ کو والیٹ کے انتخاب کے صفحہ پر بھیج دیا جائے گا۔ اپنا والیٹ منتخب کریں اور "کنیکٹ" پر کلک کریں. @@ -71,6 +75,8 @@ title: بلنگ ### ملٹی سگ والیٹ کا استعمال کرتے ہوئے GRT شامل کرنا + + 1. [سب گراف اسٹوڈیو بلنگ پیج](https://thegraph.com/studio/billing/) پر جائیں. 2. صفحہ کے اوپری دائیں کونے میں "کنیکٹ والیٹ" بٹن پر کلک کریں۔ اپنا والیٹ منتخب کریں اور "کنیکٹ" پر کلک کریں۔ اگر آپ [Gnosis-Safe](https://gnosis-safe.io/) استعمال کر رہے ہیں، تو آپ اپنے ملٹی سگ کے ساتھ ساتھ اپنے دستخط والے والیٹ کو بھی جوڑ سکیں گے۔ پھر، متعلقہ پیغام پر دستخط کریں۔ اس سے کوئی گیس فیس خرچ نہیں ہوگی. @@ -153,6 +159,50 @@ title: بلنگ آپ [یہاں](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-) Uniswap پر GRT حاصل کرنے کے بارے میں مزید جان سکتے ہیں. -## آربٹرم بریج +## ایتھیریم حاصل کرنا + +یہ سیکشن آپ کو دکھائے گا کہ ٹرانزیکشن کی فیس یا گیس کے اخراجات کی ادائیگی کے لیے ایتھیریم کیسے حاصل کیا جائے۔ ایتھیریم نیٹ ورک پر کارروائیوں کو انجام دینے کے لیے ضروری ہے جیسے ٹوکن کی منتقلی یا کنٹریکٹ کے ساتھ تعامل. + +### کوائن بیس + +کوائن بیس پر ایتھیریم کی خریداری کے لیے یہ مرحلہ وار گائیڈ ہوگا. + +1. [کوائن بیس](https://www.coinbase.com/) پر جائیں اور ایک اکاؤنٹ بنائیں. +2. ایک بار جب آپ نے ایک اکاؤنٹ بنا لیا، تو KYC (یا اپنے صارف کو جانیں) کے نام سے جانے والے عمل کے ذریعے اپنی شناخت کی تصدیق کریں۔ یہ تمام سینٹرلائزڈ یا کسٹوڈیل کرپٹو ایکسچینجز کے لیے ایک معیاری طریقہ کار ہے. +3. اپنی شناخت کی تصدیق کرنے کے بعد، صفحہ کے اوپری دائیں جانب "خرید/فروخت" بٹن پر کلک کرکے ایتھریم خریدیں. +4. وہ کرنسی منتخب کریں جسے آپ خریدنا چاہتے ہیں۔ ایتھریم کو منتخب کریں. +5. اپنا پسندیدہ ادائیگی کا طریقہ منتخب کریں. +6. ایتھیریم کی وہ مقدار درج کریں جو آپ خریدنا چاہتے ہیں. +7. اپنی خریداری کا جائزہ لیں اور "ایتھیریم خریدیں" پر کلک کریں. +8. اپنی خریداری کی تصدیق کریں اور آپ کامیابی سے ایتھیریم خرید چکے ہوں گے. +9. آپ ایتھیریم کو اپنے کوائن بیس اکاؤنٹ سے اپنے کرپٹو والیٹ جیسے [میٹا ماسک](https://metamask.io/) میں منتقل کر سکتے ہیں. + - ایتھیریم کو اپنے کرپٹو والیٹ میں منتقل کرنے کے لیے، پیج کے اوپری دائیں جانب "اکاؤنٹس" بٹن پر کلک کریں. + - ایتھیریم اکاؤنٹ کے آگے "بھیجیں" بٹن پر کلک کریں. + - ایتھیریم کی وہ رقم درج کریں جسے آپ بھیجنا چاہتے ہیں اور والیٹ کا پتہ جس پر آپ اسے بھیجنا چاہتے ہیں. + - "جاری رہے" پر کلک کریں اور اپنے ٹرانزیکشن کی تصدیق کریں. + +کوائن بیس پر ایتھیریم حاصل کرنے کے بارے میں آپ [یہاں](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency) مزید جان سکتے ہیں. + +### بائنینس + +یہ بائنینس پر ایتھیریم کی خریداری کے لیے مرحلہ وار گائیڈ ہوگا. + +1. [بائنینس](https://www.binance.com/en) پر جائیں اور ایک اکاؤنٹ بنائیں. +2. ایک بار جب آپ نے ایک اکاؤنٹ بنا لیا، تو KYC (یا اپنے صارف کو جانیں) کے نام سے جانے والے عمل کے ذریعے اپنی شناخت کی تصدیق کریں۔ یہ تمام سینٹرلائزڈ یا کسٹوڈیل کرپٹو ایکسچینجز کے لیے ایک معیاری طریقہ کار ہے. +3. اپنی شناخت کی تصدیق کرنے کے بعد، ہوم پیج بینر پر "اب خریدیں" بٹن پر کلک کرکے ایتھیریم خریدیں. +4. وہ کرنسی منتخب کریں جسے آپ خریدنا چاہتے ہیں۔ ایتھیریم کو منتخب کریں. +5. اپنا پسندیدہ ادائیگی کا طریقہ منتخب کریں. +6. ایتھیریم کی وہ مقدار درج کریں جو آپ خریدنا چاہتے ہیں. +7. اپنی خریداری کا جائزہ لیں اور "ایتھیریم خریدیں" پر کلک کریں. +8. اپنی خریداری کی تصدیق کریں اور آپ اپنے بائننس اسپاٹ والیٹ میں اپنا ایتھیریم دیکھیں گے. +9. آپ اپنے اکاؤنٹ سے اپنے کرپٹو والیٹ جیسے [میٹا ماسک](https://metamask.io/) میں ایتھریم واپس لے سکتے ہیں. + - ایتھیریم کو اپنے کرپٹو والیٹ میں واپس لینے کے لیے، اپنے کریپٹو والیٹ کا پتہ نکالنے کی وائٹ لسٹ میں شامل کریں. + - "والیٹ" بٹن پر کلک کریں، واپس لینے پر کلک کریں، اور ایتھیریم کو منتخب کریں. + - ایتھیریم کی وہ رقم درج کریں جسے آپ بھیجنا چاہتے ہیں اور وائٹ لسٹ شدہ والیٹ ایڈریس جس پر آپ اسے بھیجنا چاہتے ہیں. + - "جاری رہے" پر کلک کریں اور اپنے ٹرانزیکشن کی تصدیق کریں. + +آپ بائنینس پر ایتھیریم حاصل کرنے کے بارے میں مزید جان سکتے ہیں [یہاں ](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + +## Arbitrum بریج -بلنگ کنٹریکٹ صرف GRT کو ایتھریم مین نیٹ سے آربٹرم نیٹ ورک تک پہنچانے کے لیے ڈیزائن کیا گیا ہے۔ اگر آپ اپنا آربٹرم GRT سے ایتھریم مینیٹ پر واپس منتقل کرنا چاہتے ہیں، تو آپ کو [آربٹرم بریج](https://bridge.arbitrum.io/?l2ChainId=42161) استعمال کرنا ہوگا. +بلنگ کنٹریکٹ صرف GRT کو ایتھریم مین نیٹ سے Arbitrum نیٹ ورک تک پہنچانے کے لیے ڈیزائن کیا گیا ہے۔ اگر آپ اپنا Arbitrum GRT سے ایتھیریم مینیٹ پر واپس منتقل کرنا چاہتے ہیں، تو آپ کو [ Arbitrum بریج](https://bridge.arbitrum.io/?l2ChainId=42161) استعمال کرنا ہوگا. diff --git a/website/pages/ur/chain-integration-overview.mdx b/website/pages/ur/chain-integration-overview.mdx new file mode 100644 index 000000000000..1339264a7525 --- /dev/null +++ b/website/pages/ur/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: چین انٹیگریشن کے عمل کا جائزہ +--- + +ایک شفاف اور گورننس پر مبنی انٹیگریشن کا عمل بلاک چین ٹیموں کے لیے ڈیزائن کیا گیا تھا جو [ گراف پروٹوکول کے ساتھ انٹیگریشن] \(https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468) کی تلاش میں تھے۔ یہ ایک 3 مرحلے کا عمل ہے، جیسا کہ ذیل میں خلاصہ کیا گیا ہے. + +## مرحلہ 1. تکنیکی انٹیگریشن + +- ٹیمیں غیر ای وی ایم پر مبنی چینز کے لیے گراف نوڈ انٹیگریشن اور فائر ہوز پر کام کرتی ہیں۔ [یہ طریقہ ہے](/new-chain-integration/). +- ٹیمیں فورم تھریڈ بنا کر پروٹوکول انٹیگریشن کا عمل شروع کرتی ہیں [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71)(گورننس اور GIPs کے تحت نئے ڈیٹا ذرائع ذیلی زمرہ) ۔ پہلے سے طے شدہ فورم ٹیمپلیٹ کا استعمال لازمی ہے. + +## مرحلہ 2۔ انٹیگریشن کی توثیق + +- ٹیمیں ہموار انٹیگریشن کے عمل کو یقینی بنانے کے لیے بنیادی ڈویلپرز، گراف فاؤنڈیشن اور GUIs اور نیٹ ورک گیٹ ویز کے آپریٹرز، جیسے کہ [Subgraph Studio](https://thegraph.com/studio/) کے ساتھ تعاون کرتی ہیں۔ اس میں ضروری بیک اینڈ انفراسٹرکچر فراہم کرنا شامل ہے، جیسے انٹیگریٹنگ چین کے JSON RPC یا فائر ہوز اینڈ پوائنٹس۔ ایسی ٹیمیں جو اس طرح کے بنیادی ڈھانچے کی خود میزبانی سے گریز کرنا چاہتی ہیں وہ ایسا کرنے کے لیے گراف کی کمیونٹی آف نوڈ آپریٹرز (انڈیکسرز) سے فائدہ اٹھا سکتی ہیں، جس میں فاؤنڈیشن مدد کر سکتی ہے. +- گراف انڈیکسرز گراف کے ٹیسٹ نیٹ پر انٹیگریشن کی جانچ کرتے ہیں. +- کور ڈویلپرز اور انڈیکسرز استحکام، کارکردگی، اور ڈیٹا کے تعین کی نگرانی کرتے ہیں. + +## مرحلہ 3۔ مین نیٹ انٹیگریشن + +- ٹیمیں گراف میں بہتری کی تجویز (GIP) جمع کر کے اور [فیچر سپورٹ میٹرکس] \(https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) پر پل کی درخواست (PR) شروع کرکے مین نیٹ انٹیگریشن کی تجویز پیش کرتی ہیں۔ (مزید تفصیلات لنک پر). +- گراف کونسل درخواست کا جائزہ لیتی ہے اور مین نیٹ سپورٹ کو منظور کرتی ہے، ایک کامیاب مرحلہ 2 اور مثبت کمیونٹی فیڈ بیک فراہم کرتی ہے. + +--- + +اگر عمل مشکل لگتا ہے، تو پریشان نہ ہوں! گراف فاؤنڈیشن تعاون کو فروغ دے کر، ضروری معلومات کی پیشکش کرکے، اور مختلف مراحل میں ان کی رہنمائی کرکے انٹیگریٹرز کی مدد کرنے کے لیے پرعزم ہے، بشمول گراف کی بہتری کی تجاویز (GIPs) اور پل کی درخواستوں جیسے گورننس کے عمل کو نیویگیٹ کرنا۔ اگر آپ کے سوالات ہیں، تو براہ کرم [info@thegraph.foundation](mailto:info@thegraph.foundation) یا ڈسکارڈ (یا تو Pedro، گراف فاؤنڈیشن کے رکن، انڈیکسرDAO، یا دیگر بنیادی ڈویلپرز) کے ذریعے رابطہ کریں. + +گراف نیٹ ورک کے مستقبل کی تشکیل کے لیے تیار ہیں؟ [اپنی تجویز شروع کریں](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) ابھی اور ویب 3 انقلاب کا حصہ بنیں! + +--- + +## اکثر پوچھے گئے سوالات + +### 1. اس کا [ڈیٹا سروسز کی دنیا GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761) سے کیا تعلق ہے؟ + +یہ عمل سب گراف ڈیٹا سروس سے متعلق ہے، جو صرف نئے سب گراف `ڈیٹا سورسز` پر لاگو ہوتا ہے. + +### 2. اگر مین نیٹ پر نیٹ ورک سپورٹ ہونے کے بعد فائر ہوز اور سب سٹریم سپورٹ آجائے تو کیا ہوگا؟ + +یہ صرف سب سٹریمزسے چلنے والے سب گرافس پر انڈیکسنگ کے انعامات کے لیے پروٹوکول سپورٹ کو متاثر کرے گا۔ اس GIP میں اسٹیج 2 کے لیے بیان کردہ طریقہ کار کے بعد، نئے فائر ہوز کے نفاذ کو ٹیسٹ نیٹ پر جانچ کی ضرورت ہوگی۔ اسی طرح، یہ فرض کرتے ہوئے کہ نفاذ پرفارمنس اور قابل اعتماد ہے، [فیچر سپورٹ میٹرکس](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) پر ایک PR کی ضرورت ہوگی ( 'سب سٹریمز ڈیٹا سورسز' سب گراف فیچر)، نیز انڈیکسنگ انعامات کے لیے پروٹوکول سپورٹ کے لیے ایک نیا GIP۔ کوئی بھی PR اور GIP بنا سکتا ہے۔ فاؤنڈیشن کونسل کی منظوری میں مدد کرے گی. + +### 3. اس عمل میں کتنا وقت لگے گا؟ + +مین نیٹ کرنے کا وقت کئی ہفتوں کا متوقع ہے، انٹیگریشن کی ترقی کے وقت کی بنیاد پر مختلف ہوتا ہے، چاہے اضافی تحقیق کی ضرورت ہو، جانچ اور بگ فکسز، اور ہمیشہ کی طرح، گورننس کے عمل کا وقت جس کے لیے کمیونٹی فیڈ بیک کی ضرورت ہوتی ہے. + +انڈیکسنگ انعامات کے لیے پروٹوکول سپورٹ اسٹیک ہولڈرز کی بینڈوتھ پر منحصر ہے کہ اگر قابل اطلاق ہو تو بنیادی کوڈبیس میں ٹیسٹنگ، فیڈ بیک اکٹھا کرنے، اور تعاون کو سنبھالنے کے لیے۔ یہ براہ راست انٹیگریشن کی پختگی سے منسلک ہے اور انٹیگریشن کی ٹیم کتنی ذمہ دار ہے (جو RPC/فائر ہوز کے نفاذ کے پیچھے ٹیم ہوسکتی ہے یا نہیں)۔ فاؤنڈیشن پورے عمل میں مدد کے لیے حاضر ہے. + +### 4. ترجیحات کو کس طرح سنبھالا جائے گا؟ + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/ur/cookbook/arweave.mdx b/website/pages/ur/cookbook/arweave.mdx index ec0683b0ac96..7c3dd08a9411 100644 --- a/website/pages/ur/cookbook/arweave.mdx +++ b/website/pages/ur/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: بناۓ گئے سب گرافز آرویو(Arweave) پر --- -> آرویو سپورٹ گراف نوڈ اور ہوسٹڈ سروس بیٹا میں ہے: براہ مہربانی ہم تک پھنچیں [ڈسکورڈ](https://discord.gg/graphprotocol) آرویو کے سب گراف بنانے جی کسی بھی سوال پوچھنے کے لئے! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! اس گائڈ میں، آپ سیکھیں گے کہ آرویو(Arweave) بلاکچین کو انڈیکس کرنے کیلئے سب گرافز بنانے اور مستعمل کرنے کا طریقہ کار کیسے ہے۔ @@ -83,7 +83,7 @@ dataSources: ``` - آرویو سب گراف ایک نئی قسم کے ڈیٹا سورس (`arweave`) کو متعارف کراتے ہیں۔ -- نیٹ ورک کو ہوسٹنگ گراف نوڈ پر موجود نیٹ ورک کے مطابق ہونا چاہیے۔ ہوسٹڈ سروس پر، آرویو کا مین نیٹ `arweave-mainnet` ہے +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - آرویو ڈیٹا کے ذرائع ایک اختیاری source.owner فیلڈ متعارف کراتے ہیں، جو آرویو والیٹ کی عوامی کلید ہے آرویو ڈیٹا کے ذرائع دو قسم کے ہینڈلرز کو سپورٹ کرتے ہیں: @@ -150,9 +150,9 @@ class Transaction { آرویو سب گراف کی میپنگ لکھنا ایتھریم سب گراف کی میپنگ لکھنے کے مترادف ہے۔ مزید معلومات کے لیے، [یہاں](/developing/creating-a-subgraph/#writing-mappings) کلک کریں. -## ہوسٹڈ سروس پر آرویو سب گراف کی تعیناتی +## Deploying an Arweave Subgraph on the hosted service -ہوزڈ سروس ڈیش بورڈ پر آپ کا سب گراف بن جانے کے بعد، آپ `graph deploy` CLI کمانڈ استعمال کر کے تعینات کر سکتے ہیں. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/ur/cookbook/grafting.mdx b/website/pages/ur/cookbook/grafting.mdx index b58733e6ec9f..b10b48f93b0d 100644 --- a/website/pages/ur/cookbook/grafting.mdx +++ b/website/pages/ur/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: ایک کنٹریکٹ کو تبدیل کریں اور اس کی تاری اس ٹیوٹوریل میں، ہم استعمال کے ایک بنیادی کیس کا احاطہ کریں گے۔ ہم موجودہ معاہدے کو ایک جیسے کنٹریکٹ سے بدل دیں گے (ایک نئے پتہ کے ساتھ، لیکن ایک ہی کوڈ کے ساتھ)۔ اس کے بعد، موجودہ سب گراف کو "بیس" سب گراف پر گرافٹ کریں جو نئے کنٹریکٹ کو ٹریک کرتا ہے. +## نیٹ ورک میں اپ گریڈ کرتے وقت گرافٹنگ پر اہم نوٹ + +> **احتیاط**: اگر آپ سب گراف اسٹوڈیو یا ہوسٹڈ سروس سے ڈیسینٹرلائزڈ نیٹ ورک پر اپنا سب گراف اپ گریڈ کر رہے ہیں، تو اپ گریڈ کے عمل کے دوران گرافٹنگ کے استعمال سے گریز کرنے کی سختی سے سفارش کی جاتی ہے. + +### یہ کیوں اہم ہے؟ + +گرافٹنگ ایک طاقتور خصوصیت ہے جو آپ کو ایک سب گراف کو دوسرے پر "گرافٹ" کرنے کی اجازت دیتی ہے، مؤثر طریقے سے تاریخی ڈیٹا کو موجودہ سب گراف سے نئے ورژن میں منتقل کرتی ہے۔ اگرچہ یہ ڈیٹا کو محفوظ رکھنے اور انڈیکسنگ پر وقت بچانے کا ایک مؤثر طریقہ ہے، لیکن گرافٹنگ کسی میزبان ماحول سے ڈیسنٹرالا ئزڈ نیٹ ورک کی طرف ہجرت کرتے وقت پیچیدگیوں اور ممکنہ مسائل کو پیش کر سکتی ہے۔ گراف نیٹ ورک سے سب گراف کو ہوسٹڈ سروس یا سب گراف اسٹوڈیو میں واپس کرنا ممکن نہیں ہے. + +### بہترین طریقے + +**ابتدائی منتقلی**: جب آپ پہلی بار اپنے سب گراف کو ڈیسنٹرالا ئزڈ نیٹ ورک پر تعینات کرتے ہیں، تو بغیر گرافٹنگ کے ایسا کریں۔ یقینی بنائیں کہ سب گراف مستحکم ہے اور توقع کے مطابق کام کر رہا ہے. + +**بعد کے اپ ڈیٹس**: ایک بار جب آپ کا سب گراف ڈیسینٹرلائزڈ نیٹ ورک پر لائیو اور مستحکم ہو جاتا ہے، تو آپ منتقلی کو ہموار بنانے اور تاریخی ڈیٹا کو محفوظ رکھنے کے لیے مستقبل کے ورژنز کے لیے گرافٹنگ کا استعمال کر سکتے ہیں. + +ان رہنما خطوط پر عمل پیرا ہو کر، آپ خطرات کو کم کرتے ہیں اور منتقلی کے ایک ہموار عمل کو یقینی بناتے ہیں. + ## ایک موجودہ سب گراف بنانا سب گراف بنانا گراف کا ایک لازمی حصہ ہے، جس کی تفصیل [یہاں](http://localhost:3000/en/cookbook/quick-start/) ہے۔ اس ٹیوٹوریل میں استعمال شدہ موجودہ سب گراف کی تعمیر اور تعیناتی کے قابل ہونے کے لیے، درج ذیل ریپو فراہم کیا گیا ہے: diff --git a/website/pages/ur/cookbook/near.mdx b/website/pages/ur/cookbook/near.mdx index c0e4352d1068..5fe26e7fa413 100644 --- a/website/pages/ur/cookbook/near.mdx +++ b/website/pages/ur/cookbook/near.mdx @@ -277,7 +277,7 @@ accounts: ### میرے سوال کا جواب نہیں دیا گیا ہے، مجھے NEAR سب گراف بنانے میں مزید مدد کہاں سے مل سکتی ہے؟ -اگر یہ سب گراف ڈیولپمنٹ کے بارے میں عمومی سوال ہے تو باقی [ڈیولپر دستاویزات](/cookbook/quick-start) میں بہت زیادہ معلومات موجود ہیں۔ بصورت دیگر براہ کرم [گراف پروٹوکول ڈسکورڈ](https://discord.gg/graphprotocol) میں شامل ہوں اور #near چینل پر پوچھیں یا near@thegraph.com پر ای میل کریں. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## حوالہ جات diff --git a/website/pages/ur/cookbook/substreams-powered-subgraphs.mdx b/website/pages/ur/cookbook/substreams-powered-subgraphs.mdx index 6b84c84358c8..9e2f2d129e49 100644 --- a/website/pages/ur/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/ur/cookbook/substreams-powered-subgraphs.mdx @@ -1,30 +1,30 @@ --- -title: Substreams-powered subgraphs +title: سب سٹریمز سے چلنے والے سب گرافس --- -[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. +[Substreams]\(/سب سٹریمز) بلاکچین ڈیٹا کی پروسیسنگ کے لیے ایک نیا فریم ورک ہے، جسے سٹریمنگ فاسٹ نے گراف نیٹ ورک کے لیے تیار کیا ہے۔ سب سٹریمز کے ماڈیولز ہستی کی تبدیلیوں کو آؤٹ پٹ کر سکتے ہیں، جو سب گراف اداروں کے ساتھ ہم آہنگ ہیں۔ ایک سب گراف اس طرح کے سب سٹریمز ماڈیول کو ڈیٹا سورس کے طور پر استعمال کر سکتا ہے، جس سے انڈیکسنگ کی رفتار اور سب سٹریمز کا اضافی ڈیٹا سب گراف ڈویلپرز تک پہنچ جاتا ہے. -## Requirements +## تقاضے -This cookbook requires [yarn](https://yarnpkg.com/), [the dependencies necessary for local Substreams development](https://substreams.streamingfast.io/developers-guide/installation-requirements), and the latest version of Graph CLI (>=0.52.0): +اس کک بک کے لیے [yarn](https://yarnpkg.com/)، [مقامی سب سٹریمز کی ترقی کے لیے ضروری انحصار](https://substreams.streamingfast.io/developers-guide/installation-requirements)، اور تازہ ترین ورژن کی ضرورت ہے.گراف CLI (>=0.52.0): ``` npm install -g @graphprotocol/graph-cli ``` -## Get the cookbook +## کک بک حاصل کریں -> This cookbook uses this [Substreams-powered subgraph as a reference](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). +> یہ کک بک اس [سب اسٹریم سے چلنے والے سب گراف کو بطور حوالہ استعمال کرتی ہے](https://github.com/graphprotocol/graph-tooling/tree/main/examples/substreams-powered-subgraph). ``` graph init --from-example substreams-powered-subgraph ``` -## Defining a Substreams package +## سب سٹریمز پیکیج کی وضاحت کرنا -A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. +ایک سب سٹریمز پیکیج اقسام پر مشتمل ہے (جس کی وضاحت [پروٹوکول بفرز](https://protobuf.dev/))، ماڈیولز (Rust میں لکھی گئی ہے)، اور ایک `substreams.yaml` فائل جو اقسام کا حوالہ دیتی ہے، اور یہ بتاتی ہے کہ ماڈیول کیسے ہیں متحرک ہیں. [سب سٹریمز ڈویلپمنٹ کے بارے میں مزید جاننے کے لیے سب سٹریمز کی دستاویزات دیکھیں](/substreams)، اور [wesome-substreams](https://github.com/pinax-network/awesome-substreams) اور [Substreams cookbook]\(https مزید مثالوں کے لیے://github.com/pinax-network/substreams-cookbook). -The Substreams package in question detects contract deployments on Mainnet Ethereum, tracking the creation block and timestamp for all newly deployed contracts. To do this, there is a dedicated `Contract` type in `/proto/example.proto` ([learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): +زیر بحث سب اسٹریمز پیکیج مینیٹ ایتھیریم پر کنٹریکٹ کی تعیناتیوں کا پتہ لگاتا ہے، تمام نئے تعینات کردہ کنٹریکٹس کے لیے تخلیق بلاک اور ٹائم اسٹیمپ کا پتہ لگاتا ہے۔ ایسا کرنے کے لیے، `/proto/example.proto` ([پروٹوکول بفرز کی تعریف کرنے کے بارے میں مزید جانیں](https://protobuf.dev/programming-guides/proto3/#simple)) میں ایک مخصوص `Contract` قسم ہے: ```proto syntax = "proto3"; @@ -43,7 +43,7 @@ message Contract { } ``` -The core logic of the Substreams package is a `map_contract` module in `lib.rs`, which processes every block, filtering for Create calls which did not revert, returning `Contracts`: +سب اسٹریمز پیکج کی بنیادی منطق `lib.rs` میں ایک `map_contract` ماڈیول ہے، جو ہر بلاک پر کارروائی کرتا ہے، ایسی کالز تخلیق کرنے کے لئے فلٹر کرتا ہے جو واپس نہیں آتیں، `Contracts` واپس کرتی ہیں: ``` #[substreams::handlers::map] @@ -67,9 +67,9 @@ fn map_contract(block: eth::v2::Block) -> Result The `substreams_entity_change` crate also has a dedicated `Tables` function for simply generating entity changes ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). The Entity Changes generated must be compatible with the `schema.graphql` entities defined in the `subgraph.graphql` of the corresponding subgraph. +> 'substreams_entity_change' کریٹ میں صرف ہستی کی تبدیلیاں پیدا کرنے کے لیے ایک وقف کردہ 'Tables' فنکشن ہے ([documentation](https://docs.rs/substreams-entity-change/1.2.2/substreams_entity_change/tables/index.html)). تخلیق کردہ ہستی کی تبدیلیاں متعلقہ سب گراف کے `subgraph.graphql` میں بیان کردہ `schema.graphql` اداروں کے ساتھ ہم آہنگ ہونی چاہئیں. ``` #[substreams::handlers::map] @@ -88,7 +88,7 @@ pub fn graph_out(contracts: Contracts) -> Result graph_out; ``` -To prepare this Substreams package for consumption by a subgraph, you must run the following commands: +سب اسٹریمز کے ذریعے استعمال کے لیے اس سب اسٹریم پیکیج کو تیار کرنے کے لیے، آپ کو درج ذیل کمانڈز چلانے چاہئیں: ```bash -yarn substreams:protogen # generates types in /src/pb -yarn substreams:build # builds the substreams -yarn substreams:package # packages the substreams in a .spkg file +yarn substreams:protogen # /src/pb میں قسمیں تیار کرتا ہے +yarn substreams:build # سب سٹریمز بناتا ہے +yarn substreams:package # سب اسٹریمز کو .spkg فائل میں پیک کرتا ہے -# alternatively, yarn substreams:prepare calls all of the above commands +# متبادل طور پر، یارن سب سٹریمز: مندرجہ بالا تمام کمانڈز کو کالز تیار کریں ``` -> These scripts are defined in the `package.json` file if you want to understand the underlying substreams commands +> ان اسکرپٹس کی وضاحت `package.json` فائل میں کی گئی ہے اگر آپ بنیادی سب سٹریمز کی کمانڈ کو سمجھنا چاہتے ہیں -This generates a `spkg` file based on the package name and version from `substreams.yaml`. The `spkg` file has all the information which Graph Node needs to ingest this Substreams package. +یہ 'substreams.yaml' سے پیکیج کے نام اور ورژن کی بنیاد پر ایک `spkg` فائل تیار کرتا ہے۔ `spkg` فائل میں وہ تمام معلومات ہیں جن کی گراف نوڈ کو اس سب سٹریمز پیکیج کو ہضم کرنے کی ضرورت ہے. -> If you update the Substreams package, depending on the changes you make, you may need to run some or all of the above commands so that the `spkg` is up to date. +> اگر آپ سب اسٹریم پیکج کو اپ ڈیٹ کرتے ہیں تو، آپ کی تبدیلیوں پر منحصر ہے، آپ کو مندرجہ بالا کچھ یا سبھی کمانڈز چلانے کی ضرورت پڑسکتی ہے تاکہ `spkg` اپ ٹو ڈیٹ رہے. -## Defining a Substreams-powered subgraph +## سب اسٹریمز سے چلنے والے سب گراف کی وضاحت کرنا -Substreams-powered subgraphs introduce a new `kind` of data source, "substreams". Such subgraphs can only have one data source. +سب اسٹریمز سے چلنے والے سب گرافس ڈیٹا کے ذریعہ کی ایک نئی `kind` متعارف کراتے ہیں، "سب اسٹریمز"۔ اس طرح کے سب گرافس میں صرف ایک ڈیٹا سورس ہو سکتا ہے. -This data source must specify the indexed network, the Substreams package (`spkg`) as a relative file location, and the module within that Substreams package which produces subgraph-compatible entity changes (in this case `map_entity_changes`, from the Substreams package above). The mapping is specified, but simply identifies the mapping kind ("substreams/graph-entities") and the apiVersion. +اس ڈیٹا سورس کو انڈیکسڈ نیٹ ورک، سب اسٹریم پیکیج (`spkg`) کو متعلقہ فائل لوکیشن کے طور پر اور اس سب اسٹریم پیکیج کے اندر موجود ماڈیول کی وضاحت کرنی چاہیے جو سب سٹریمز سے مطابقت رکھنے والی ہستی کی تبدیلیاں (اس صورت میں `map_entity_changes`، اوپر سب اسٹریمز پیکیج سے)۔ میپنگ کی وضاحت کی گئی ہے، لیکن صرف میپنگ کی قسم ("substreams/graph-entities") اور apiVersion کی شناخت کرتا ہے. -> Currently the Subgraph Studio and The Graph Network support Substreams-powered subgraphs which index `mainnet` (Mainnet Ethereum). +> فی الحال سب گراف اسٹوڈیو اور گراف نیٹ ورک سب اسٹریمز سے چلنے والے سب گرافس کو سپورٹ کرتے ہیں جو انڈیکس `مین نیٹ` (مینیٹ ایتھیریم) کرتے ہیں. ```yaml specVersion: 0.0.4 @@ -180,7 +180,7 @@ dataSources: apiVersion: 0.0.5 ``` -The `subgraph.yaml` also references a schema file. The requirements for this file are unchanged, but the entities specified must be compatible with the entity changes produced by the Substreams module referenced in the `subgraph.yaml`. +`subgraph.yaml` ایک سکیما فائل کا بھی حوالہ دیتا ہے۔ اس فائل کے تقاضے غیر تبدیل شدہ ہیں، لیکن متعین کردہ ہستیوں کو `subgraph.yaml` میں حوالہ کردہ سب سٹریمز ماڈیول کے ذریعہ تیار کردہ ہستی کی تبدیلیوں کے ساتھ ہم آہنگ ہونا چاہئے. ```graphql type Contract @entity { @@ -194,21 +194,21 @@ type Contract @entity { } ``` -Given the above, subgraph developers can use Graph CLI to deploy this Substreams-powered subgraph. +مندرجہ بالا کو دیکھتے ہوئے، سب گراف ڈویلپرز اس سب اسٹریمز سے چلنے والے سب گراف کو تعینات کرنے کے لیے گراف CLI استعمال کر سکتے ہیں. -> Substreams-powered subgraphs indexing mainnet Ethereum can be deployed to the [Subgraph Studio](https://thegraph.com/studio/). +> سب اسٹریمز سے چلنے والے سب گرافس کو انڈیکس کرنے والے مین نیٹ ایتھیریم کو [سب گراف اسٹوڈیو](https://thegraph.com/studio/) میں تعینات کیا جا سکتا ہے. ```bash -yarn install # install graph-cli -yarn subgraph:build # build the subgraph -yarn subgraph:deploy # deploy the subgraph +yarn install # graph-cli انسٹال کریں +yarn subgraph:build # سب گراف کی تعمیر کریں +yarn subgraph:deploy # سب گراف کو تعینات کریں ``` -That's it! You have built and deployed a Substreams-powered subgraph. +یہی ہے! آپ نے سب اسٹریمز سے چلنے والا سب گراف بنایا اور تعینات کیا ہے. -## Serving Substreams-powered subgraphs +## سب اسٹریمز سے چلنے والے سب گرافس کی خدمت کرنا -In order to serve Substreams-powered subgraphs, Graph Node must be configured with a Substreams provider for the relevant network, as well as a Firehose or RPC to track the chain head. These providers can be configured via a `config.toml` file: +سب اسٹریمز سے چلنے والے سب اسٹریمز کو پیش کرنے کے لیے، گراف نوڈ کو متعلقہ نیٹ ورک کے لیے سب اسٹریمز فراہم کنندہ کے ساتھ کنفیگر کیا جانا چاہیے، ساتھ ہی ساتھ چین ہیڈ کو ٹریک کرنے کے لیے فائر ہوز یا RPC کا ہونا چاہیے۔ ان فراہم کنندگان کو ایک `config.toml` فائل کے ذریعے ترتیب دیا جا سکتا ہے: ```toml [chains.mainnet] diff --git a/website/pages/ur/cookbook/upgrading-a-subgraph.mdx b/website/pages/ur/cookbook/upgrading-a-subgraph.mdx index b240dd54bb49..9a5b3b5ff787 100644 --- a/website/pages/ur/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/ur/cookbook/upgrading-a-subgraph.mdx @@ -1,23 +1,23 @@ --- -title: Upgrading an Existing Subgraph to The Graph Network +title: موجودہ سب گراف کو گراف نیٹ ورک میں اپ گریڈ کرنا --- ## تعارف -This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! +یہ ایک گائیڈ ہے کہ آپ اپنے سب گراف کو ہوسٹڈ سروس سے گراف کے ڈیسنٹرالا ئزڈ نیٹ ورک میں کیسے اپ گریڈ کریں۔ گراف نیٹ ورک میں 1,000 سے زیادہ سب گرافس کامیابی کے ساتھ اپ گریڈ ہو چکے ہیں جن میں اسنیپ شاٹ، لوپرنگ، آڈیئس، پریمیا، لائیوپیر، اوما، کریو، لڈو، اور بہت سارے پروجیکٹس شامل ہیں! -The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. +اپ گریڈ کرنے کا عمل تیز ہے اور آپ کے سب گرافس ہمیشہ کے لیے قابل اعتماد اور کارکردگی سے مستفید ہوں گے جو آپ صرف گراف نیٹ ورک پر حاصل کر سکتے ہیں. ### شرطیں - آپ نے پہلے ہی ہوسٹڈ سروس پر سب گراف تعینات کر دیا ہے. -- سب گراف گراف نیٹ ورک پر دستیاب (یا بیٹا میں دستیاب) چین کو ترتیب دے رہا ہے. -- You have a wallet with ETH to publish your subgraph on-chain. -- You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. +- The subgraph is indexing a chain available on The Graph Network. +- آپ کے پاس اپنے سب گراف کو آن چین شائع کرنے کے لیے ایتھیریم کے ساتھ ایک والیٹ ہے. +- آپ کے پاس اپنے سب گراف کو کیوریٹ کرنے کے لیے ~10,000 GRT ہے تاکہ انڈیکسرز اسے انڈیکس کرنا شروع کر سکیں. -## Upgrading an Existing Subgraph to The Graph Network +## موجودہ سب گراف کو گراف نیٹ ورک میں اپ گریڈ کرنا -> You can find specific commands for your subgraph in the [Subgraph Studio](https://thegraph.com/studio/). +> آپ اپنے سب گراف کے لیے مخصوص کمانڈز [سب گراف اسٹوڈیو](https://thegraph.com/studio/) میں حاصل کر سکتے ہیں. 1. graph-cli کا جدید ورژن انسٹال کریں: @@ -29,7 +29,7 @@ npm install -g @graphprotocol/graph-cli yarn global add @graphprotocol/graph-cli ``` -Make sure your `apiVersion` in subgraph.yaml is `0.0.5` or greater. +یقینی بنائیں کہ subgraph.yaml میں آپ کا `apiVersion` `0.0.5` یا اس سے زیادہ ہے. 2. سب گراف کے مرکزی پروجیکٹ کے ذخیرے کے اندر، سٹوڈیو پر تعینات اور تعمیر کرنے کے لیے سب گراف کی تصدیق کریں: @@ -40,18 +40,18 @@ graph auth --studio 3. فائلیں اور سب گراف بنائیں: ```sh -graph codegen && graph build +گراف کوڈجن اور گراف بلڈ ``` -If your subgraph has build errors, refer to the [AssemblyScript Migration Guide](/release-notes/assemblyscript-migration-guide/). +اگر آپ کے سب گراف میں تعمیراتی خامیاں ہیں، تو [اسمبلی سکرپٹ مائیگریشن گائڈ](/release-notes/assemblyscript-migration-guide/) سے رجوع کریں. -4. Sign into [Subgraph Studio](https://thegraph.com/studio/) with your wallet and deploy the subgraph. You can find your `` in the Studio UI, which is based on the name of your subgraph. +4. اپنے والیٹ سے [سب گراف اسٹوڈیو](https://thegraph.com/studio/) میں سائن ان کریں اور سب گراف کو تعینات کریں۔ آپ اسٹوڈیو UI میں اپنا `` تلاش کر سکتے ہیں، جو آپ کے سب گراف کے نام پر مبنی ہے. ```sh -graph deploy --studio +گراف کی تعیناتی --سٹوڈیو ``` -5. Test queries on the Studio's playground. Here are some examples for the [Sushi - Mainnet Exchange Subgraph](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground): +5. اسٹوڈیو کے پلے گراؤنڈ پر ٹیسٹ کیوریز۔ یہاں [Sushi - مین نیٹ ایکسچینج سب گراف](https://thegraph.com/explorer/subgraph?id=0x4bb4c1b0745ef7b4642feeccd0740dec417ca0a0-0&view=Playground) کی کچھ مثالیں ہیں: ```sh { @@ -70,23 +70,23 @@ graph deploy --studio 6. اس وقت، آپ کا سب گراف اب سب گراف سٹوڈیو پر تعینات ہے، لیکن ابھی تک ڈیسینٹرالائزڈ نیٹ ورک پر شائع نہیں ہوا ہے۔ اب آپ سب گراف کی جانچ کر سکتے ہیں تاکہ یہ یقینی بنایا جا سکے کہ یہ عارضی کیوری کے URL کا استعمال کرتے ہوئے جیسا کہ اوپر دائیں کالم کے اوپر دیکھا گیا ہے کام کر رہا ہے۔ جیسا کہ یہ نام پہلے ہی تجویز کرتا ہے، یہ ایک عارضی URL ہے اور اسے پروڈکشن میں استعمال نہیں کیا جانا چاہیے. -- Updating is just publishing another version of your existing subgraph on-chain. -- Because this incurs a cost, it is highly recommended to deploy and test your subgraph in the Subgraph Studio, using the "Development Query URL" before publishing. See an example transaction [here](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b). Prices are roughly around 0.0425 ETH at 100 gwei. -- Any time you need to update your subgraph, you will be charged an update fee. Because this incurs a cost, it is highly recommended to deploy and test your subgraph on Goerli before deploying to mainnet. It can, in some cases, also require some GRT if there is no signal on that subgraph. In the case there is signal/curation on that subgraph version (using auto-migrate), the taxes will be split. +- اپ ڈیٹ کرنا صرف آپ کے موجودہ سب گراف آن چین کا دوسرا ورژن شائع کرنا ہے. +- چونکہ اس پر لاگت آتی ہے، اس لیے یہ انتہائی سفارش کی جاتی ہے کہ شائع کرنے سے پہلے "ترقیاتی کیوری URL" کا استعمال کرتے ہوئے سب گراف اسٹوڈیو میں اپنے سب گراف کو تعینات اور جانچیں۔ ٹرانزیکشن کی ایک مثال دیکھیں [یہاں](https://etherscan.io/tx/0xd0c3fa0bc035703c9ba1ce40c1862559b9c5b6ea1198b3320871d535aa0de87b)۔ قیمتیں 100 gwei پر تقریباً 0.0425 ایتھیریم ہیں. +- جب بھی آپ کو اپنا سب گراف اپ ڈیٹ کرنے کی ضرورت ہو، آپ سے اپ ڈیٹ فیس وصول کی جائے گی۔ چونکہ اس پر لاگت آتی ہے، اس لیے مین نیٹ پر تعینات کرنے سے پہلے اپنے سب گراف کو Goerli پر تعینات کرنے اور جانچنے کی انتہائی سفارش کی جاتی ہے۔ اگر اس سب گراف پر کوئی سگنل نہیں ہے تو اسے، بعض صورتوں میں، کچھ GRT کی بھی ضرورت پڑ سکتی ہے۔ اس سب گراف ورژن پر سگنل/کیوریشن ہونے کی صورت میں (آٹو مائیگریٹ کا استعمال کرتے ہوئے)، ٹیکس تقسیم ہو جائیں گے. 7. "شائع کریں" کے بٹن کو دبا کر گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف کو شائع کریں. -You should curate your subgraph with GRT to ensure that it is indexed by Indexers. To save on gas costs, you can curate your subgraph in the same transaction that you publish it to the network. It is recommended to curate your subgraph with at least 10,000 GRT for high quality of service. +آپ کو اپنے سب گراف کو GRT کے ساتھ کیوریٹ کرنا چاہیے تاکہ یہ یقینی بنایا جا سکے کہ اسے انڈیکسرز کے ذریعے ترتیب دیا گیا ہے۔ گیس کے اخراجات کو بچانے کے لیے، آپ اپنے سب گراف کو اسی ٹرانزیکشن میں درست کر سکتے ہیں جسے آپ نیٹ ورک پر شائع کرتے ہیں۔ اعلی معیار کی خدمت کے لیے کم از کم 10,000 GRT کے ساتھ اپنے سب گراف کو درست کرنے کی سفارش کی جاتی ہے. -And that's it! After you are done publishing, you'll be able to view your subgraphs live on the decentralized network via [The Graph Explorer](https://thegraph.com/explorer). +اور یہ بات ہے! آپ کی اشاعت مکمل ہونے کے بعد، آپ اپنے سب گرافس کو ڈیسینٹرلائزڈ نیٹ ورک پر [گراف ایکسپلورر](https://thegraph.com/explorer) کے ذریعے لائیو دیکھ سکیں گے. -Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. +ڈسکورڈ پر [#Curators چینل](https://discord.gg/s5HfGMXmbW) کا فائدہ اٹھانے کے لیے بلا جھجھک کیوریٹرز کو یہ بتانے کے لیے کہ آپ کا سب گراف سگنل کیے جانے کے لیے تیار ہے۔ اگر آپ اپنے متوقع کیوری کا حجم ان کے ساتھ شیئر کریں تو یہ بھی مددگار ثابت ہوگا۔ لہذا، وہ اندازہ لگا سکتے ہیں کہ انہیں آپ کے سب گراف پر کتنا GRT سگنل دینا چاہیے. -### ایک API کلید بنائیں +### ایک API key بنائیں -You can generate an API key in Subgraph Studio [here](https://thegraph.com/studio/apikeys/). +آپ سب گراف اسٹوڈیو میں ایک API کلید بنا سکتے ہیں [here](https://thegraph.com/studio/apikeys/). -![API key creation page](/img/api-image.png) +![API کلیدی تخلیق کا پیج](/img/api-image.png) ہر ہفتے کے آخر میں، کیوری کی فیس کی بنیاد پر ایک رسید تیار کی جائے گی جو اس مدت کے دوران خرچ کی گئی ہیں۔ یہ انوائس آپ کے بیلنس میں دستیاب جی آر ٹی کا استعمال کرتے ہوئے خود بخود ادا کی جائے گی۔ آپ کی کیوری فیس کی لاگت واپس لینے کے بعد آپ کا بیلنس اپ ڈیٹ ہو جائے گا۔ کیوری کی فیس آربٹرم نیٹ ورک کے ذریعے جی آر ٹی میں ادا کی جاتی ہے۔ آپ کو درج ذیل مراحل کے ذریعے اپنی API کلید کو فعال کرنے کے لیے آربٹرم بلنگ کنٹریکٹ میں جی آر ٹی شامل کرنے کی ضرورت ہوگی: @@ -94,14 +94,14 @@ You can generate an API key in Subgraph Studio [here](https://thegraph.com/studi - اپنے والیٹ پر جی آر ٹی بھیجیں. - اسٹوڈیو میں بلنگ صفحہ پر، جی آر ٹی شامل کریں پر کلک کریں. -![Add GRT in billing](/img/Add-GRT-New-Page.png) +![بلنگ میں GRT شامل کریں](/img/Add-GRT-New-Page.png) - اپنے بلنگ بیلنس میں اپنا GRT شامل کرنے کے لیے اقدامات پر عمل کریں. - آپ کا GRT خود بخود آربٹرم نیٹ ورک سے منسلک ہو جائے گا اور آپ کے بلنگ بیلنس میں شامل ہو جائے گا. -![Billing pane](/img/New-Billing-Pane.png) +![بلنگ پین](/img/New-Billing-Pane.png) -> Note: see the [official billing page](../billing.mdx) for full instructions on adding GRT to your billing balance. +> نوٹ: اپنے بلنگ بیلنس میں GRT شامل کرنے سے متعلق مکمل ہدایات کے لیے [آفیشل بلنگ صفحہ](../billing.mdx) دیکھیں. ### اپنی API کلید کو محفوظ کرنا @@ -110,13 +110,13 @@ You can generate an API key in Subgraph Studio [here](https://thegraph.com/studi 1. مجاز سب گراف 2. مجاز ڈومین -You can secure your API key [here](https://thegraph.com/studio/apikeys/test/). +آپ اپنی API کلید کو[یہاں](https://thegraph.com/studio/apikeys/test/) محفوظ کر سکتے ہیں. -![Subgraph lockdown page](/img/subgraph-lockdown.png) +![سب گراف لاک ڈاؤن پیج](/img/subgraph-lockdown.png) ### ڈیسینٹرالائزڈ نیٹ ورک پر آپ کے سب گراف سے کیوری کرنا -Now you can check the indexing status of the Indexers on the network in Graph Explorer (example [here](https://thegraph.com/explorer/subgraph?id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers)). The green line at the top indicates that at the time of posting 8 Indexers successfully indexed that subgraph. Also in the Indexer tab you can see which Indexers picked up your subgraph. +اب آپ گراف ایکسپلورر (مثال کے طور پر [یہاں]\(https://thegraph.com/explorer/subgraph؟ id=S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo&view=Indexers))سب سے اوپر کی سبز لکیر اس بات کی نشاندہی کرتی ہے کہ پوسٹ کرنے کے وقت 8 انڈیکسرز نے اس سب گراف کو کامیابی کے ساتھ انڈیکس کیا۔ انڈیکس ٹیب میں بھی آپ دیکھ سکتے ہیں کہ کن انڈیکسرز نے آپ کا سب گراف اٹھایا. ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) @@ -124,13 +124,13 @@ Now you can check the indexing status of the Indexers on the network in Graph Ex `https://gateway.thegraph.com/api/[api-key]/subgraphs/id/S9ihna8D733WTEShJ1KctSTCvY1VJ7gdVwhUujq4Ejo` -Important: Make sure to replace `[api-key]` with an actual API key generated in the section above. +اہم: یقینی بنائیں کہ `[api-key]` کو اوپر والے سیکشن میں تیار کردہ ایک حقیقی API key سے تبدیل کریں. اب آپ اپنی GraphQL کی درخواستیں بھیجنے کے لیے اپنے ڈیپ میں اس کیوری URL استعمال کر سکتے ہیں. مبارک ہو! اب آپ ڈیسینٹرالائزیشن کے علمبردار ہیں! -> Note: Due to the distributed nature of the network it might be the case that different Indexers have indexed up to different blocks. In order to only receive fresh data you can specify the minimum block an Indexer has to have indexed in order to serve your query with the block: `{ number_gte: $minBlock }` field argument as shown in the example below: +> نوٹ: نیٹ ورک کی تقسیم شدہ نوعیت کی وجہ سے ایسا ہو سکتا ہے کہ مختلف انڈیکسرز نے مختلف بلاکس تک انڈیکس کیا ہو۔ صرف تازہ ڈیٹا حاصل کرنے کے لیے آپ کم از کم بلاک کی وضاحت کر سکتے ہیں کہ ایک انڈیکسر کو بلاک کے ساتھ آپ کے کیوریز کو پیش کرنے کے لیے انڈیکس کیا جانا چاہیے: `{ number_gte: $minBlock }` فیلڈ آرگیومنٹ جیسا کہ ذیل کی مثال میں دکھایا گیا ہے: ```graphql { @@ -140,74 +140,74 @@ Important: Make sure to replace `[api-key]` with an actual API key generated in } ``` -More information about the nature of the network and how to handle re-orgs are described in the documentation article [Distributed Systems](/querying/distributed-systems/). +نیٹ ورک کی نوعیت اور دوبارہ تنظیموں کو سنبھالنے کے طریقہ کے بارے میں مزید معلومات دستاویزی مضمون [ڈسٹری بیوٹڈ سسٹمز](/querying/distributed-systems/) میں بیان کی گئی ہیں. -## Updating a Subgraph on the Network +## نیٹ ورک پر سب گراف کو اپ ڈیٹ کرنا -If you would like to update an existing subgraph on the network, you can do this by deploying a new version of your subgraph to the Subgraph Studio using the Graph CLI. +اگر آپ نیٹ ورک پر موجودہ سب گراف کو اپ ڈیٹ کرنا چاہتے ہیں، تو آپ گراف CLI کا استعمال کرتے ہوئے اپنے سب گراف کا نیا ورژن سب گراف اسٹوڈیو میں تعینات کر کے ایسا کر سکتے ہیں. 1. اپنے موجودہ سب گراف میں بدلاو کریں. ایک اچھا خیال یہ ہے کہ Goerli پر شائع کرکے سب گراف اسٹوڈیو پر چھوٹی اصلاحات کی جانچ کریں. 2. درج ذیل کو تعینات کریں اور نۓ ورزن کی کمانڈ میں وضاحت کریں (eg. v0.0.1, v0.0.2, etc): ```sh -graph deploy --studio +گراف کی تعیناتی --سٹوڈیو ``` 3. پلے گراونڈ میں کیوری کرکے سب گراف اسٹوڈیو میں نئے ورژن کی جانچ کریں۔ 4. گراف نیٹ ورک پر نۓ ورزن کو شائع کریں. یاد رکہیں کے اس کو گیس فیس کی ضرورت ہوتی ہے (جیسا کے اوپر والے حصے میں بیان کیا گیا ہے). -### Owner Update Fee: Deep Dive +### مالک کی تازہ کاری کی فیس: ڈیپ ڈائیو -> Note: Curation on Arbitrum does not use bonding curves. Learn more about Arbitrum [here](/arbitrum/arbitrum-faq/). +> نوٹ: Arbitrum پر کیوریشن بانڈنگ کروز کا استعمال نہیں کرتا ہے۔ Arbitrum کے بارے میں مزید جانیں [یہاں](/arbitrum/arbitrum-faq/). -An update requires GRT to be migrated from the old version of the subgraph to the new version. This means that for every update, a new bonding curve will be created (more on bonding curves [here](/network/curating#bonding-curve-101)). +ایک اپ ڈیٹ کے لیے GRT کو سب گراف کے پرانے ورژن سے نئے ورژن میں منتقل کرنے کی ضرورت ہوتی ہے۔ اس کا مطلب یہ ہے کہ ہر اپ ڈیٹ کے لیے، ایک نیا بانڈنگ کریو بنایا جائے گا (بانڈنگ منحنی خطوط پر مزید [یہاں](/network/curating#bonding-curve-101)). -The new bonding curve charges the 1% curation tax on all GRT being migrated to the new version. The owner must pay 50% of this or 1.25%. The other 1.25% is absorbed by all the curators as a fee. This incentive design is in place to prevent an owner of a subgraph from being able to drain all their curator's funds with recursive update calls. If there is no curation activity, you will have to pay a minimum of 100 GRT in order to signal your own subgraph. +نیا بانڈنگ کریو نئے ورژن میں منتقل ہونے والے تمام GRT پر 1% کیوریشن ٹیکس وصول کرتا ہے۔ مالک کو اس کا 50% یا 1.25% ادا کرنا ہوگا۔ باقی 1.25% تمام کیوریٹرز فیس کے طور پر جذب کر لیتے ہیں۔ یہ ترغیبی ڈیزائن ایک سب گراف کے مالک کو اپنے کیوریٹر کے تمام فنڈز کو بار بار آنے والی اپ ڈیٹ کالوں کے ذریعے نکالنے سے روکنے کے لیے بنایا گیا ہے۔ اگر کوئی کیوریشن سرگرمی نہیں ہے، تو آپ کو اپنا سب گراف سگنل کرنے کے لیے کم از کم 100 GRT ادا کرنا ہوگا. چلو ایک مثال بناتے ہیں، یہ صرف اس صورت میں ہے جب آپ کا سب گراف فعال طور پر تیار کیا جا رہا ہے: - 100,000 GRT کا سگنل سب گراف کے v1 پر خودکار منتقلی کا استعمال کرتے ہوئے کیا جاتا ہے -- Owner updates to v2. 100,000 GRT is migrated to a new bonding curve, where 97,500 GRT get put into the new curve and 2,500 GRT is burned -- The owner then has 1250 GRT burned to pay for half the fee. The owner must have this in their wallet before the update, otherwise, the update will not succeed. This happens in the same transaction as the update. +- V2 میں مالک کی تازہ کاری۔ 100,000 GRT کو ایک نئے بانڈنگ وکر میں منتقل کیا جاتا ہے، جہاں 97,500 GRT کو نئے وکر میں ڈال دیا جاتا ہے اور 2,500 GRT کو جلا دیا جاتا ہے +- اس کے بعد مالک نے آدھی فیس ادا کرنے کے لیے 1250 GRT جلا دیے ہیں۔ اپ ڈیٹ کرنے سے پہلے مالک کے پاس اپنے والیٹ میں یہ ہونا ضروری ہے، بصورت دیگر، اپ ڈیٹ کامیاب نہیں ہوگا۔ یہ اسی ٹرانزیکشن میں ہوتا ہے جیسا کہ اپ ڈیٹ ہوتا ہے. -_While this mechanism is currently live on the network, the community is currently discussing ways to reduce the cost of updates for subgraph developers._ +_جبکہ یہ طریقہ کار فی الحال نیٹ ورک پر رواں ہے، کمیونٹی فی الحال سب گراف ڈویلپرز کے لیے اپ ڈیٹس کی لاگت کو کم کرنے کے طریقوں پر تبادلہ خیال کر رہی ہے._ ### سب گراف کے مستحکم ورزن کو برقرار رکھنا -If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. +اگر آپ اپنے سب گراف میں بہت زیادہ تبدیلیاں کر رہے ہیں، تو اسے مسلسل اپ ڈیٹ کرنا اور اپ ڈیٹ کی لاگت کو سامنے رکھنا اچھا خیال نہیں ہے۔ اپنے سب گراف کے ایک مستحکم اور مستقل ورژن کو برقرار رکھنا بہت ضروری ہے، نہ صرف لاگت کے نقطہ نظر سے بلکہ اس لیے بھی کہ انڈیکسرز اپنے مطابقت پذیری کے اوقات میں اعتماد محسوس کر سکیں۔ جب آپ اپ ڈیٹ کا ارادہ رکھتے ہیں تو انڈیکسرز کو جھنڈا لگانا چاہیے تاکہ انڈیکسر کی مطابقت پذیری کے اوقات متاثر نہ ہوں۔ ڈسکارڈ پر بلا جھجھک [#Indexers چینل](https://discord.gg/JexvtHa7dq) سے فائدہ اٹھائیں تاکہ انڈیکسرز کو یہ معلوم ہو سکے کہ آپ اپنے سب گرافس کو کب ورژن بنا رہے ہیں. -Subgraphs are open APIs that external developers are leveraging. Open APIs need to follow strict standards so that they do not break external developers' applications. In The Graph Network, a subgraph developer must consider Indexers and how long it takes them to sync a new subgraph **as well as** other developers who are using their subgraphs. +سب گراف اوپن APIs ہیں جن کا بیرونی ڈویلپر فائدہ اٹھا رہے ہیں۔ اوپن APIs کو سخت معیارات پر عمل کرنے کی ضرورت ہے تاکہ وہ بیرونی ڈویلپرز کی ایپلیکیشنز کو نہ توڑیں۔ گراف نیٹ ورک میں، ایک سب گراف ڈویلپر کو انڈیکسرز پر غور کرنا چاہیے اور یہ کہ ایک نئے سب گراف کو ** اور ساتھ ہی** دوسرے ڈویلپرز جو اپنے سب گرافس استعمال کر رہے ہیں ہم آہنگ ہونے میں کتنا وقت لگتا ہے. ### سب گراف کے میٹا ڈیٹا کو اپ ڈیٹ کرنا آپ نیا ورزن شائع کیے بغیر اپنے سب گراف کا میٹا ڈیٹا اپڈیٹ کر سکتے ہیں. میٹا ڈیٹا میں سب گراف کا نام، تصویر، تفصیل، ویب سائٹ URL, سورس کوڈ URL, اور اقسام شامل ہیں. ڈویلپرز سب گراف سٹوڈیو میں اپنے سب گراف کی تفصیلات اپڈیٹ کر کے یہ کر سکتے ہیں جہاں آپ تمام قابل اطلاق فیلڈز میں ترمیم کر سکتے ہیں. -Make sure **Update Subgraph Details in Explorer** is checked and click on **Save**. If this is checked, an on-chain transaction will be generated that updates subgraph details in the Explorer without having to publish a new version with a new deployment. +یقینی بنائیں کہ **ایکسپلورر میں سب گراف کی تفصیلات اپ ڈیٹ کریں** کو چیک کیا گیا ہے اور **محفوظ کریں** پر کلک کریں۔ اگر اس کی جانچ پڑتال کی جاتی ہے تو، ایک آن چین ٹرانزیکشن تیار کیا جائے گا جو ایکسپلورر میں سب گراف کی تفصیلات کو نئی تعیناتی کے ساتھ نیا ورژن شائع کیے بغیر اپ ڈیٹ کرتا ہے. ## سب گراف کو گراف نیٹ ورک پر تعینات کرنے کے بہترین طریقے 1. سب گراف ڈیولپمنٹ کے لیے ENS نام کا فائدہ اٹھانا: -- Set up your ENS [here](https://app.ens.domains/) -- Add your ENS name to your settings [here](https://thegraph.com/explorer/settings?view=display-name). +- اپنا ENS [یہاں](https://app.ens.domains/) سیٹ کریں +- اپنی ترتیبات میں اپنا ENS نام شامل کریں [یہاں](https://thegraph.com/explorer/settings?view=display-name). 2. جتنی زیادہ آپ کی پروفائل بہری ہے گی, اتنے ہی زیادہ امکانات ہوں گے آپ کے سب گراف کے انڈیکس اور کیوریٹ ہونے کے. ## گراف نیٹ ورک پر سب گراف کو فرسودہ کرنا -Follow the steps [here](/managing/deprecating-a-subgraph) to deprecate your subgraph and remove it from The Graph Network. +اپنے سب گراف کو فرسودہ کرنے اور اسے گراف نیٹ ورک سے ہٹانے کے لیے [یہاں](/managing/deprecating-a-subgraph) کے مراحل پر عمل کریں. ## سب گراف کا کیوری کرنا + گراف نیٹ ورک پر بلنگ -The hosted service was set up to allow developers to deploy their subgraphs without any restrictions. +ہوسٹڈ سروس ڈویلپرز کو بغیر کسی پابندی کے اپنے سب گرافس کو تعینات کرنے کی اجازت دینے کے لیے ترتیب دی گئی تھی. -In order for The Graph Network to truly be decentralized, query fees have to be paid as a core part of the protocol's incentives. For more information on subscribing to APIs and paying the query fees, check out billing documentation [here](/billing/). +گراف نیٹ ورک کو صحیح معنوں میں ڈیسنٹرالا ئزڈ بنانے کے لیے، کیوری کی فیس کو پروٹوکول کی ترغیبات کے بنیادی حصے کے طور پر ادا کرنا ہوگا۔ APIs کو سبسکرائب کرنے اور کیوری فیس ادا کرنے کے بارے میں مزید معلومات کے لیے، بلنگ دستاویزات [یہاں](/billing/) دیکھیں. ### نیٹ ورک پر کیوری کی فیس کا تخمینہ لگائیں اگرچہ یہ پروڈکٹ UI میں لائیو خصوصیت نہیں ہے، آپ فی کیوری اپنا زیادہ سے زیادہ بجٹ مقرر کر سکتے ہیں اس رقم کو لے کر جو آپ ہر ماہ ادا کرنا چاہتے ہیں اور اسے اپنے متوقع کیوری کی رقم سے تقسیم کر سکتے ہیں. -While you get to decide on your query budget, there is no guarantee that an Indexer will be willing to serve queries at that price. If a Gateway can match you to an Indexer willing to serve a query at, or lower than, the price you are willing to pay, you will pay the delta/difference of your budget **and** their price. As a consequence, a lower query price reduces the pool of Indexers available to you, which may affect the quality of service you receive. It's beneficial to have high query fees, as that may attract curation and big-name Indexers to your subgraph. +جب آپ اپنے کیوری کے بجٹ کا فیصلہ کرتے ہیں، تو اس بات کی کوئی گارنٹی نہیں ہے کہ انڈیکسر اس قیمت پر کیوریز پیش کرنے کے لیے تیار ہوگا۔ اگر کوئی گیٹ وے آپ کو کسی ایسے انڈیکسر سے ملا سکتا ہے جو آپ جس قیمت کی ادائیگی کے لیے تیار ہیں، یا اس سے کم قیمت پر کیوری پیش کرنے کے لیے تیار ہے، تو آپ اپنے بجٹ کے ڈیلٹا/فرق **اور** ان کی قیمت ادا کریں گے۔ نتیجے کے طور پر، کم کیوری کی قیمت آپ کے لیے دستیاب انڈیکسرز کے پول کو کم کر دیتی ہے، جو آپ کو موصول ہونے والی سروس کے معیار کو متاثر کر سکتی ہے۔ کیوریز کی زیادہ فیس لینا فائدہ مند ہے، کیونکہ یہ آپ کے سب گراف میں کیوریشن اور بڑے نام کے انڈیکسرز کو راغب کر سکتا ہے. یاد رکھیں کہ یہ ایک متحرک اور بڑھتی ہوئی مارکیٹ ہے، لیکن آپ اس کے ساتھ کیسے تعامل کرتے ہیں یہ آپ کے اختیار میں ہے۔ پروٹوکول یا گیٹ ویز میں کوئی زیادہ سے زیادہ یا کم از کم قیمت نہیں بتائی گئی ہے۔ مثال کے طور پر، آپ ذیل میں نیٹ ورک پر (فی ہفتہ کی بنیاد پر) چند ڈیپ کے ذریعے ادا کی گئی قیمت کو دیکھ سکتے ہیں۔ آخری کالم دیکھیں، جو GRT میں کیوری کی فیس دکھاتا ہے. @@ -215,11 +215,11 @@ While you get to decide on your query budget, there is no guarantee that an Inde ## اضافی وسائل -If you're still confused, fear not! Check out the following resources or watch our video guide on upgrading subgraphs to the decentralized network below: +اگر آپ اب بھی الجھن میں ہیں، تو ڈرو نہیں! درج ذیل وسائل کو دیکھیں یا ذیل میں ڈیسنٹرالا ئزڈ نیٹ ورک میں سب گرافس کو اپ گریڈ کرنے کے بارے میں ہماری ویڈیو گائیڈ دیکھیں: -- [The Graph Network Contracts](https://github.com/graphprotocol/contracts) -- [Curation Contract](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - the underlying contract that the GNS wraps around +- [گراف نیٹ ورک کنٹریکٹس](https://github.com/graphprotocol/contracts) +- [کیوریشن کنٹریکٹ](https://github.com/graphprotocol/contracts/blob/dev/contracts/curation/Curation.sol) - بنیادی کنٹریکٹ جسے GNS لپیٹتا ہے - Address - `0x8fe00a685bcb3b2cc296ff6ffeab10aca4ce1538` -- [Subgraph Studio documentation](/deploying/subgraph-studio) +- [سب گراف اسٹوڈیو دستاویزات]\(/تعیناتی/سب گراف اسٹوڈیو) diff --git a/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx index 35067134e210..fcf3b70e080e 100644 --- a/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/ur/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: سب گراف سٹوڈیو پر سب گراف تعینات کرنا --- -> یقینی بنائیں کہ آپ کا سب گراف جس نیٹ ورک سے ڈیٹا انڈیکس کر رہا ہے وہ ڈیسینٹرالائزڈ نیٹ ورک پر [تعاون یافتہ](/developing/supported-chains) ہے. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). اپنے سب گراف کو سب گراف سٹوڈیو میں تعینات کرنے کے یہ اقدامات ہیں: diff --git a/website/pages/ur/deploying/hosted-service.mdx b/website/pages/ur/deploying/hosted-service.mdx index c5d033025fc4..f9fafebee077 100644 --- a/website/pages/ur/deploying/hosted-service.mdx +++ b/website/pages/ur/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / مثال کا سب گراف Dani گرانٹ کے گریوٹی کنٹریکٹ پر مبنی ہے جو صارف کے اوتاروں کا انتظام کرتا ہے اور `NewGravatar` یا `UpdateGravatar` ایونٹس کو خارج کرتا ہے جب بھی اوتار بنائے یا اپ ڈیٹ ہوتے ہیں۔ سب گراف گراف نوڈ اسٹور پر `Gravatar` اداروں کو لکھ کر اور اس بات کو یقینی بنا کر کہ یہ واقعات کے مطابق اپ ڈیٹ ہو کر ان ایونٹس کو سنبھالتا ہے۔ بہتر طور پر یہ سمجھنے کے لیے کہ آپ کے سمارٹ کنٹریکٹ میں سے کن ایونٹس، میپنگز وغیرہ پر توجہ دینا ہے، [سب گراف مینی فیسٹ](/developing/creating-a-subgraph#the-subgraph-manifest) پر جاری رکھیں. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## ہوسٹڈ سروس پر سپورٹڈ نیٹ ورکس آپ سپورٹڈ نیٹ ورکس کی فہرست [یہاں](/developing/supported-networks) تلاش کر سکتے ہیں. diff --git a/website/pages/ur/deploying/subgraph-studio.mdx b/website/pages/ur/deploying/subgraph-studio.mdx index ddc17b4d7ba1..74215597a62c 100644 --- a/website/pages/ur/deploying/subgraph-studio.mdx +++ b/website/pages/ur/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ title: سب گراف سٹوڈیو کیسے استعمال کرتے ہیں 1. اپنے والیٹ سے سائن ان کریں - آپ یہ MetaMask یا WalletConnect کے ذریعے کر سکتے ہیں 1. ایک بار جب آپ سائن ان کریں گے، آپ کو اپنے اکاؤنٹ کے ہوم پیج پر اپنی منفرد تعیناتی کلید نظر آئے گی۔ یہ آپ کو اپنے سب گراف شائع کرنے یا اپنی API کیز + بلنگ کا نظم کرنے کی اجازت دے گا۔ آپ کے پاس ایک منفرد تعیناتی کلید ہوگی جسے دوبارہ تیار کیا جا سکتا ہے اگر آپ کو لگتا ہے کہ اس سے سمجھوتہ کیا گیا ہے. -## سب گراف سٹوڈیو میں آپ اپنا سب گراف کیسے بنا سکتے ہیں +## How to Create a Subgraph in Subgraph Studio -بہترین حصہ! جب آپ پہلی بار سب گراف بناتے ہیں، تو آپ کو پُر کرنے کی ہدایت کی جائے گی: - -- آپ کے سب گراف کا نام -- تصویر -- تفصیل -- اقسام (جیسے `DeFi`, `NFTs`, `Governance`) -- ویب سائٹ + ## گراف نیٹ ورک کے ساتھ سب گراف مطابقت diff --git a/website/pages/ur/developing/creating-a-subgraph.mdx b/website/pages/ur/developing/creating-a-subgraph.mdx index cb2cb835582c..4db28da404c5 100644 --- a/website/pages/ur/developing/creating-a-subgraph.mdx +++ b/website/pages/ur/developing/creating-a-subgraph.mdx @@ -12,7 +12,7 @@ title: سب گراف بنانا - `schema.graphql`: ایک GraphQL اسکیما جو اس بات کی وضاحت کرتا ہے کہ آپ کے سب گراف کے لیے کون سا ڈیٹا محفوظ ہے، اور GraphQL کے ذریعے اسے کیوری کیسے کیا جائے -- `AssemblyScript Mappings`: [اسمبلی اسکرپٹ](https://github.com/AssemblyScript/assemblyscript) کوڈ جو ایونٹ کے ڈیٹا سے آپ کے اسکیما کی اینٹیٹیز میں تبدیل کرتا ہے (جیسے `mapping.ts` اس ٹیوٹوریل میں) +- `AssemblyScript Mappings`: [اسمبلی اسکرپٹ](https://github.com/AssemblyScript/assemblyscript) کوڈ جو ایونٹ کے ڈیٹا سے آپ کے اسکیما کی ہستیوں میں تبدیل کرتا ہے (جیسے `mapping.ts` اس ٹیوٹوریل میں) > گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر اپنا سب گراف استعمال کرنے کے لیے، آپ کو [ایک API کلید بنانا ہوگا](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key)۔ یہ تجویز کیا جاتا ہے کہ آپ کم از کم [10,000 جی آر ٹی](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network) کے ساتھ اپنے سب گراف میں [سگنل شامل کریں](/network/curating/#how-to-signal). @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: مینی فیسٹ کے لیے اپ ڈیٹ کرنے کے لیے اہم اندراجات یہ ہیں: -- `description`: سب گراف کی پڑھنے کے قابل ایک وضاحت. یہ تفصیل گراف ایکسپلورر کے ذریعہ اس وقت ظاہر ہوتی ہے جب سب گراف کو ہوسٹڈ سروس پر تعینات کیا جاتا ہے. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: ریپوزٹری کا URL جہاں سب گراف مینی فیسٹ پایا جا سکتا ہے. یہ گراف ایکسپلورر کے ذریعہ بھی ظاہر ہوتا ہے. @@ -146,6 +154,10 @@ dataSources: - `dataSources.source.startBlock`: بلاک کا اختیاری نمبر جس سے ڈیٹا سورس انڈیکس کرنا شروع کرتا ہے. زیادہ تر معاملات میں، ہم اس بلاک کو استعمال کرنے کا مشورہ دیتے ہیں جس میں کنٹریکٹ بنایا گیا تھا. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: وہ اینٹیٹیز جنہیں ڈیٹا سورس اسٹور کو لکھتا ہے۔ schema.graphql فائل میں ہر اینٹیٹی کے لیے اسکیما کی وضاحت کی گئی ہے. - `dataSources.mapping.abis`: سورس کنٹریکٹ کے لیے ایک یا زیادہ ABI فائلیں اور ساتھ ہی کسی دوسرے سمارٹ کنٹریکٹ کے لیے جن کے ساتھ آپ میپنگ کے اندر سے تعامل کرتے ہیں. @@ -156,7 +168,7 @@ dataSources: - `dataSources.mapping.blockHandlers`: ان بلاکس کی فہرست بناتا ہے جن پر یہ سب گراف رد عمل ظاہر کرتا ہے اور میپنگ میں ہینڈلرز کو چلانے کے لیے جب ایک بلاک کو چین میں شامل کیا جاتا ہے. فلٹر کے بغیر، بلاک ہینڈلر ہر بلاک کو چلایا جائے گا. ایک اختیاری کال فلٹر ہینڈلر کو `kind: call` کے ساتھ ایک `filter` فیلڈ شامل کرکے فراہم کیا جاسکتا ہے. یہ صرف ہینڈلر کو چلائے گا اگر بلاک میں ڈیٹا سورس کنٹریکٹ پر کم از کم ایک کال ہو. -ایک واحد سب گراف متعدد سمارٹ کنٹریکٹ سے ڈیٹا کو انڈیکس کر سکتا ہے. ہر کنٹریکٹ کے لیے ایک اندراج شامل کریں جس سے ڈیٹا کو `dataSources` array میں انڈیکس کرنے کی ضرورت ہے. +ایک واحد سب گراف متعدد سمارٹ کنٹریکٹ سے ڈیٹا کو انڈیکس کر سکتا ہے. ہر کنٹریکٹ کے لیے ایک اندراج شامل کریں جس سے ڈیٹا کو `dataSources` ایرے میں انڈیکس کرنے کی ضرورت ہے. بلاک کے اندر ڈیٹا سورس کے لیے محرکات درج ذیل عمل کا استعمال کرتے ہوئے ترتیب دیے گئے ہیں: @@ -242,6 +254,7 @@ type GravatarDeclined @entity { | `String` | `string` ویلیوز کے لیے اسکیلر. خالی حروف تعاون یافتہ نہیں ہیں اور خود بخود ہٹا دیے جاتے ہیں. | | `Boolean` | `Boolean` ویلیوز کے لیے اسکیلر. | | `Int` | GraphQL spec `Int` کی وضاحت کرتا ہے جس کا سائز 32 bytes ہے. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | بڑے integers۔ Ethereum کی `uint32`، `int64`، `uint64`، ..., `uint256` اقسام کے لیے استعمال کیا جاتا ہے. نوٹ: `uint32` کے نیچے ہر چیز، جیسے `int32`، `uint24` یا `int8` کو `i32` کے طور پر دکھایا گیا ہے. | | `BigDecimal` | `BigDecimal` اعلی درستگی والے اعشاریہ ایک significand اور ایک exponent کے طور پر پیش کیا جاتے ہہیں. Exponent رینج −6143 سے +6144 تک ہے۔ 34 سگنیفیکینڈ ہندسوں پر rounded کیا گیا۔. | @@ -770,6 +783,8 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { ### معاون فلٹرز +#### کال فلٹر + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### پولنگ فلٹر + +> **Requires `specVersion` >= 0.0.8** + +> **نوٹ:** پولنگ فلٹرز صرف ڈیٹا سورس آف `kind: ethereum` پر دستیاب ہیں. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +متعین ہینڈلر کو ہر `n` بلاکس کے لیے ایک بار بلایا جائے گا، جہاں `n` `every` فیلڈ میں فراہم کردہ قدر ہے۔ یہ ترتیب سب گراف کو باقاعدہ بلاک وقفوں پر مخصوص آپریشن کرنے کی اجازت دیتی ہے. + +#### ونس فلٹر + +> **Requires `specVersion` >= 0.0.8** + +> **نوٹ:** ایک بار جب فلٹرز صرف ڈیٹا سورس آف `kind: ethereum` پر دستیاب ہوتے ہیں. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +ایک بار فلٹر کے ساتھ متعین ہینڈلر کو دوسرے تمام ہینڈلرز کے چلنے سے پہلے صرف ایک بار کال کیا جائے گا۔ یہ کنفیگریشن سب گراف کو انڈیکسنگ کے آغاز میں مخصوص کاموں کو انجام دیتے ہوئے، ہینڈلر کو ابتدائیہ ہینڈلر کے طور پر استعمال کرنے کی اجازت دیتی ہے. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### میپنگ فنکشن میپنگ فنکشن کو اس کی واحد آرگومینٹ کے طور پر ایک `ethereum.Block` ملے گا۔ ایونٹس کے لیے میپنگ کے فنکشنز کی طرح، یہ فنکشن اسٹور میں موجود سب گراف ہستیوں تک رسائی حاصل کر سکتا ہے، سمارٹ کنٹریکٹس کو کال کر سکتا ہے اور ہستیوں کو تخلیق یا اپ ڈیٹ کر سکتا ہے. @@ -934,6 +988,8 @@ _meta { ### موجودہ سب گرافس پر گرافٹنگ +> **نوٹ:** ابتدائی طور پر گراف نیٹ ورک میں اپ گریڈ کرتے وقت گرافٹنگ استعمال کرنے کی سفارش نہیں کی جاتی ہے۔ مزید جانیں [یہاں](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + جب سب گراف کو پہلی بار تعینات کیا جاتا ہے، تو یہ متعلقہ چین کے جینیسس بلاک (یا ہر ڈیٹا سورس کے ساتھ بیان کردہ `startBlock` پر) ایوینٹس کو انڈیکس کرنا شروع کرتا ہے. کچھ حالات میں; موجودہ سب گراف سے ڈیٹا کو دوبارہ استعمال کرنا اور بعد کے بلاک میں انڈیکس کرنا شروع کرنا فائدہ مند ہے۔ انڈیکسنگ کے اس موڈ کو _گرافٹنگ_ کہا جاتا ہے. گرافٹنگ, مثال کے طور پر، ڈیویلاپمنٹ کے دوران میپنگ میں ماضی کی سادہ غلطیوں کو تیزی سے حاصل کرنے کے لیے یا موجودہ سب گراف کے ناکام ہونے کے بعد اسے عارضی طور پر دوبارہ کام کرنے کے لیے مفید ہے. سب گراف کو بیس سب گراف پر اس وقت گرافٹ کیا جاتا ہے جب `subgraph.yaml` میں سب گراف مینی فیسٹ میں اوپر کی سطح پر `graft` بلاک ہوتا ہے: @@ -963,7 +1019,7 @@ graft: ## فائل ڈیٹا سورسز -فائل ڈیٹا سورسز ایک مضبوط، قابل توسیع طریقے سے، IPFS سے شروع کرتے ہوئے، انڈیکسنگ کے دوران آف چین ڈیٹا تک رسائی کے لیے ایک نئی سب گراف کی فعالیت ہے. +فائل ڈیٹا کے ذرائع ایک مضبوط، قابل توسیع طریقے سے انڈیکسنگ کے دوران آف چین ڈیٹا تک رسائی کے لیے ایک نئی سب گراف کی فعالیت ہے۔ فائل ڈیٹا کے ذرائع IPFS اور Arweave سے فائلیں لانے میں معاونت کرتے ہیں. > یہ آف چین ڈیٹا کی تعییناتی انڈیکسنگ کے ساتھ ساتھ صوابدیدی HTTP سے حاصل کردہ ڈیٹا کے ممکنہ تعارف کی بنیاد بھی رکھتا ہے. @@ -975,7 +1031,7 @@ graft: > یہ موجودہ `ipfs.cat` API کی جگہ لے لیتا ہے -### Upgrade guide +### اپ گریڈ گائیڈ #### `graph-ts` اور `graph-cli` کو اپ ڈیٹ کریں @@ -1030,7 +1086,7 @@ type TokenMetadata @entity { > آپ پیرنٹ ہستیوں کو فلٹر کرنے کے لیے [نیسٹڈ فلٹرز](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) استعمال کر سکتے ہیں ان نیسٹڈ اداروں کی بنیاد پر. -#### `kind: file/ipfs` کے ساتھ ایک نیا ٹیمپلیٹڈ ڈیٹا ماخذ شامل کریں +#### `kind: file/ipfs` یا `kind: file/arweave` کے ساتھ ایک نیا ٹیمپلیٹڈ ڈیٹا ماخذ شامل کریں یہ ڈیٹا سورس ہے جو دلچسپی کی فائل کی شناخت ہونے پر پیدا کیا جائے گا. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { اب آپ چین پر مبنی ہینڈلرز کے عمل کے دوران فائل ڈیٹا کے ذرائع بنا سکتے ہیں: - خود کار طریقے سے تیار کردہ `templates` سے ٹیمپلیٹ درآمد کریں -- `TemplateName.create(cid: string)` کو میپنگ کے اندر سے کال کریں، جہاں cid ایک درست IPFS مواد شناخت کنندہ ہے +- میپنگ کے اندر سے `TemplateName.create(cid: string)` کو کال کریں، جہاں cid IPFS یا Arweave کے لیے مواد کا ایک درست شناخت کنندہ ہے + +IPFS کے لیے، گراف نوڈ [v0 اور v1 مواد کے شناخت کنندگان](https://docs.ipfs.tech/concepts/content-addressing/) کو سپورٹ کرتا ہے، اور ڈائرکٹریز کے ساتھ مواد کی شناخت کرنے والوں کو سپورٹ کرتا ہے (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> فی الحال گراف نوڈ [v0 اور v1 مواد کے شناخت کنندگان](https://docs.ipfs.tech/concepts/content-addressing/) کو سپورٹ کرتا ہے، اور ڈائریکٹریز کے ساتھ مواد کے شناخت کنندگان (جیسے `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). مثال: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -یہ ایک نیا فائل ڈیٹا کا ذریعہ بنائے گا، جو گراف نوڈ کے کنفیگر کردہ IPFS اینڈ پوائنٹ کو پول کرے گا، اگر یہ نہیں ملا تو دوبارہ کوشش کرے گا۔ جب فائل مل جائے گی، فائل ڈیٹا سورس ہینڈلر کو عمل میں لایا جائے گا. +یہ ایک نیا فائل ڈیٹا سورس بنائے گا، جو گراف نوڈ کے کنفیگر کردہ آئی پی ایف ایس یا Arweave اینڈ پوائنٹ کو پول کرے گا، اگر یہ نہ ملا تو دوبارہ کوشش کریں۔ جب فائل مل جائے گی، فائل ڈیٹا سورس ہینڈلر کو عمل میں لایا جائے گا. یہ مثال CID کو پیرنٹ `Token` ہستی اور نتیجے میں `TokenMetadata` ہستی کے درمیان تلاش کے طور پر استعمال کر رہی ہے. diff --git a/website/pages/ur/developing/developer-faqs.mdx b/website/pages/ur/developing/developer-faqs.mdx index 903c4ad79513..3565c0b6e593 100644 --- a/website/pages/ur/developing/developer-faqs.mdx +++ b/website/pages/ur/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } فی الحال، ڈیپ کے لیے تجویز کردہ طریقہ یہ ہے کہ کلید کو فرنٹ اینڈ میں شامل کیا جائے اور اسے اختتامی صارفین کے سامنے لایا جائے۔ اس نے کہا، آپ اس کلید کو میزبان نام تک محدود کر سکتے ہیں، جیسے _yourdapp.io_ اور سب گراف۔ گیٹ وے فی الحال ایج اور نوڈ گیٹ وے کی ذمہ داری کا حصہ بدسلوکی پر نظر رکھنا اور بدسلوکی والے کلائنٹس سے ٹریفک کو روکنا ہے. -## 25. میں ہوسٹڈ سروس پر اپنا موجودہ سب گراف تلاش کرنے کے لیے کہاں جاؤں؟ +## 25. Where do I go to find my current subgraph on the hosted service? سب گراف تلاش کرنے کے لیے ہوسٹڈ سروس کی طرف جائیں جو آپ یا دوسروں نے ہوسٹڈ سروس میں تعینات کیے ہیں۔ آپ اسے [یہاں](https://thegraph.com/hosted-service) تلاش کر سکتے ہیں۔ -## 26. کیا ہوسٹڈ سروس کیوری فیس لینا شروع کر دے گا؟ +## 26. Will the hosted service start charging query fees? گراف ہوسٹڈ سروس کے لیے کبھی بھی پیسے نہیں لے گا۔ گراف ایک ڈیسینٹرالائزڈ پروٹوکول ہے، اور سینٹرالائزڈ سروسز کے لیے پیسے لینا گراف کی اقدار کے ساتھ موافق نہیں ہے۔ ہوسٹڈ سروس ہمیشہ سے ہی ایک عارضی قدم ہوتا ہے تاکہ ڈیسینترالائزڈ نیٹ ورک تک پہنچنے میں مدد کی جا سکے۔ ڈویلپرز کے پاس ڈیسینٹرالائزڈ نیٹ ورک میں اپ گریڈ کرنے کے لیے کافی وقت ہوگا کیونکہ وہ آرام دہ ہیں۔ -## 27. ہوسٹڈ سروس کب بند کی جائے گی؟ - -ہوسٹڈ سروس 2023 میں بند ہو جائے گی۔ اعلان کی بلاگ پوسٹ [یہاں](https://thegraph.com/blog/sunsetting-hosted-service) پڑھیں۔ تمام ڈیپس جو ہوسٹڈ سروس استعمال کر رہی ہیں ان کو ڈیسینٹرالائزڈ نیٹ ورک پر منتقل ہونے کی ترغیب دی جاتے ہے۔ مائیگریشن گرانٹس ڈویلپرز کے لیے اپنے سب گراف کو منتقل کرنے میں مدد کے لیے دستیاب ہیں۔ اگر آپ کا ڈیپ سب گراف اپ گریڈ کر رہا ہے آپ [یہاں](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com) درخواست دے سکتے ہیں۔ - -## 28. میں مین نیٹ پر سب گراف کیسے اپ ڈیٹ کروں؟ +## 27. How do I update a subgraph on mainnet? اگر آپ سب گراف ڈویلپر ہیں، تو آپ CLI کا استعمال کرتے ہوئے اپنے سب گراف کے نئے ورزن کو سٹوڈیو میں اپ گریڈ کر سکتے ہیں۔ اس وقت یہ پرائیویٹ ہو جائے گا، لیکن اگر آپ اس سے خوش ہیں، تو آپ ڈیسینٹرالائزڈ گراف ایکسپلورر پر شائع کر سکتے ہیں۔ یہ آپ کے سب گراف کا ایک نیا ورزن بنا دے گا جس پر کیوریٹرز سگنل دینا شروع کر سکتے ہیں۔ diff --git a/website/pages/ur/developing/graph-ts/api.mdx b/website/pages/ur/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..d2c95c6053d8 --- /dev/null +++ b/website/pages/ur/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: اسمبلی اسکرپٹ API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +یہ صفحہ دستاویز کرتا ہے کہ سب گراف میپنگ لکھتے وقت کیا بلٹ ان APIs استعمال کیا جا سکتا ہے۔ دو قسم کے APIs باکس سے باہر دستیاب ہیں: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API حوالہ + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- مختلف قسم کے سسٹمز جیسے کہ ایتھیریم، JSON، GraphQL اور اسمبلی اسکرپٹ کے درمیان ترجمہ کرنے کے لیے نچلی سطح کے قدیم. + +### ورژنز + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| ورزن | جاری کردہ نوٹس | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### بلٹ ان اقسام + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### سٹور API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### ہستیوں کی تخلیق + +ایتھیریم ایونٹس سے ہستیوں کو بنانے کے لیے درج ذیل ایک عام نمونہ ہے. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +دیگر ہستیوں کے ساتھ ٹکراؤ سے بچنے کے لیے ہر ایک کے پاس ایک منفرد ID ہونا ضروری ہے۔ ایونٹ کے پیرامیٹرز میں ایک منفرد شناخت کنندہ شامل کرنا کافی عام ہے جسے استعمال کیا جا سکتا ہے۔ نوٹ: ٹرانزیکشن ہیش کو ID کے طور پر استعمال کرنے سے یہ فرض ہوتا ہے کہ ایک ہی ٹرانزیکشن میں کوئی اور ایونٹ اس ہیش کے ساتھ ID کے طور پر نہیں بنتا ہے. + +#### اسٹور سے ہستیوں کو لوڈ کرنا + +اگر کوئی ہستی پہلے سے موجود ہے تو اسے اسٹور سے درج ذیل کے ساتھ لوڈ کیا جا سکتا ہے: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### بلاک کے ساتھ تخلیق کردہ ہستیوں کو تلاش کرنا + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +اسٹور API ان ہستیوں کی بازیافت میں سہولت فراہم کرتا ہے جو موجودہ بلاک میں تخلیق یا اپ ڈیٹ کی گئی تھیں۔ اس کے لیے ایک عام صورت حال یہ ہے کہ ایک ہینڈلر کسی آن چین ایونٹ سے ٹرانزیکشن بناتا ہے، اور بعد کا ہینڈلر اس ٹرانزیکشن تک رسائی حاصل کرنا چاہتا ہے اگر یہ موجود ہو۔ ایسی صورت میں جہاں ٹرانزیکشن موجود نہیں ہے، سب گراف کو صرف یہ جاننے کے لیے ڈیٹا بیس میں جانا پڑے گا کہ ہستی موجود نہیں ہے۔ اگر سب گراف مصنف پہلے ہی جانتا ہے کہ ہستی کو اسی بلاک میں بنایا گیا ہو گا، تو loadInBlock کا استعمال اس ڈیٹا بیس راؤنڈ ٹرپ سے گریز کرتا ہے۔ کچھ سب گرافس کے لیے، یہ کھوئی ہوئی تلاشیں انڈیکسنگ کے وقت میں اہم کردار ادا کر سکتی ہیں. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### موجودہ ہستیوں کو اپ ڈیٹ کرنا + +موجودہ ہستی کو اپ ڈیٹ کرنے کے دو طریقے ہیں: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +پراپرٹیز کو تبدیل کرنا زیادہ تر معاملات میں سیدھا آگے ہے، جنریٹڈ پراپرٹی سیٹرز کی بدولت: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +درج ذیل دو ہدایات میں سے کسی ایک کے ساتھ پراپرٹیز کو غیر سیٹ کرنا بھی ممکن ہے: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### اسٹور سے ہستیوں کو ہٹانا + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### ایتھیریم API + +ایتھیریم API سمارٹ کنٹریکٹس، پبلک سٹیٹ ویری ایبلز، کنٹریکٹ فنکشنز، ایونٹس، ٹرانزیکشنز، بلاکس اور انکوڈنگ/ڈی کوڈنگ ایتھیریم ڈیٹا تک رسائی فراہم کرتا ہے. + +#### ایتھیریم کی اقسام کے لیے سپورٹ + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +مندرجہ ذیل مثال اس کی وضاحت کرتی ہے۔ جیسا کہ سب گراف اسکیما دیا گیا + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### ایونٹس اور بلاک/ٹرانزیکشن ڈیٹا + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### سمارٹ کنٹریکٹ اسٹیٹ تک رسائی + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +ایک عام نمونہ اس کنٹریکٹ تک رسائی حاصل کرنا ہے جہاں سے کوئی واقعہ شروع ہوتا ہے۔ یہ مندرجہ ذیل کوڈ کے ساتھ حاصل کیا جاتا ہے: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +کوئی بھی دوسرا کنٹریکٹ جو سب گراف کا حصہ ہے، تیار کردہ کوڈ سے درآمد کیا جا سکتا ہے اور اسے ایک درست ایڈریس کا پابند کیا جا سکتا ہے. + +#### واپس آنے والی کالوں کو ہینڈل کرنا + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +نوٹ کریں کہ گیتھ یا انفورا کلائنٹ سے منسلک گراف نوڈ تمام ریورٹس کا پتہ نہیں لگا سکتا، اگر آپ اس پر بھروسہ کرتے ہیں تو ہم پیراٹی کلائنٹ سے منسلک گراف نوڈ استعمال کرنے کی تجویز کرتے ہیں. + +#### انکوڈنگ/ڈی کوڈنگ ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +مزید معلومات کے لیے: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### لاگنگ API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### ایک یا زیادہ اقدار کو لاگ کرنا + +##### ایک سنگل ویلیو لاگ کرنا + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### موجودہ صف سے ایک ہی اندراج کو لاگ کرنا + +نیچے دی گئی مثال میں، تین ویلیوس پر مشتمل ایرے کے باوجود، آرگیومینٹ ایرے کی صرف پہلی ویلیو لاگ ان ہوتی ہے. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### موجودہ ایرے سے متعدد اندراجات کو لاگ کرنا + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### موجودہ صف سے مخصوص اندراج کو لاگ کرنا + +صف میں ایک مخصوص قدر ظاہر کرنے کے لیے، انڈیکس قدر فراہم کی جانی چاہیے. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### لاگنگ ایونٹ کی معلومات + +ذیل کی مثال کسی ایونٹ سے بلاک نمبر، بلاک ہیش اور ٹرانزیکشن ہیش کو لاگ کرتی ہے: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +IPFS ہیش یا پاتھ کو دیکھتے ہوئے، IPFS سے فائل کو پڑھنا اس طرح کیا جاتا ہے: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### کرپٹو API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### تبادلوں کا حوالہ ٹائپ کریں + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### ڈیٹا ماخذ میٹا ڈیٹا + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### ہستی اور ڈیٹا سورس سیاق و سباق + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/ur/developing/graph-ts/common-issues.mdx b/website/pages/ur/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..3496f69d86ba --- /dev/null +++ b/website/pages/ur/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: مشترکہ اسمبلی اسکرپٹ کے مسائل +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/ur/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/ur/developing/substreams-powered-subgraphs-faq.mdx index 02592fd21457..be2c57055f10 100644 --- a/website/pages/ur/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/ur/developing/substreams-powered-subgraphs-faq.mdx @@ -1,91 +1,91 @@ --- -title: Substreams-powered subgraphs FAQ +title: سب اسٹریمز سے چلنے والے سب گرافس FAQ --- -## What are Substreams? +## سب اسٹریمز کیا ہیں؟ Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. -Go to the [Substreams Documentation](/substreams) to learn more about Substreams. +سب سٹریمز کے بارے میں مزید جاننے کے لیے [سب سٹریمز دستاویزات](/substreams) پر جائیں. -## What are Substreams-powered subgraphs? +## سب سٹریمز سے چلنے والے سب گرافس کیا ہیں؟ -[Substreams-powered subgraphs](/cookbook/substreams-powered-subgraphs/) combine the power of Substreams with the queryability of subgraphs. When publishing a Substreams-powered Subgraph, the data produced by the Substreams transformations, can [output entity changes](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs), which are compatible with subgraph entities. +[سب اسٹریمز سے چلنے والے سب گرافس](/cookbook/substreams-powered-subgraphs/)سب اسٹریمز کی طاقت کو سب گرافس کی کیوری کے ساتھ جوڑتا ہے۔ سب اسٹریمز سے چلنے والے سب گراف کو شائع کرتے وقت، سب سٹریمز کی تبدیلیوں سے تیار کردہ ڈیٹا، [آؤٹ پٹ ہستی میں تبدیلیاں](https://github.com/streamingfast/substreams-sink-entity-changes/blob/develop/substreams-entity-change/src/tables.rs),کر سکتا ہے جو سب گراف اداروں کے ساتھ مطابقت رکھتا ہے. -If you are already familiar with subgraph development, then note that Substreams-powered subgraphs can then be queried, just as if it had been produced by the AssemblyScript transformation layer, with all the Subgraph benefits, like providing a dynamic and flexible GraphQL API. +اگر آپ سب گراف ڈیولپمنٹ سے پہلے ہی واقف ہیں، تو نوٹ کریں کہ سب سٹریمز سے چلنے والے سب گرافس کے بارے میں کیوری کیا جا سکتا ہے، بالکل اسی طرح جیسے اسے اسمبلی اسکرپٹ ٹرانسفارمیشن لیئر کے ذریعے تیار کیا گیا ہو، جیسے کہ ایک متحرک اور لچکدار GraphQL API فراہم کرنا. -## How are Substreams-powered subgraphs different from subgraphs? +## سب سٹریمز سے چلنے والے سب گرافس سب گراف سے کیسے مختلف ہیں؟ -Subgraphs are made up of datasources which specify on-chain events, and how those events should be transformed via handlers written in Assemblyscript. These events are processed sequentially, based on the order in which events happen on-chain. +سب گرافس ڈیٹا سورسز پر مشتمل ہوتے ہیں جو آن چین ایونٹس کی وضاحت کرتے ہیں، اور ان واقعات کو اسمبلی اسکرپٹ میں لکھے گئے ہینڈلرز کے ذریعے کیسے تبدیل کیا جانا چاہیے۔ ان واقعات پر ترتیب وار کارروائی کی جاتی ہے، اس ترتیب کی بنیاد پر جس میں ایونٹس آن چین ہوتے ہیں. -By contrast, substreams-powered subgraphs have a single datasource which references a substreams package, which is processed by the Graph Node. Substreams have access to additional granular on-chain data compared to conventional subgraphs, and can also benefit from massively parallelised processing, which can mean much faster processing times. +اس کے برعکس، سب سٹریمز سے چلنے والے سب گرافس میں ایک واحد ڈیٹا سورس ہوتا ہے جو سب سٹریم پیکیج کا حوالہ دیتا ہے، جس پر گراف نوڈ کے ذریعے کارروائی کی جاتی ہے۔ روایتی سب گرافس کے مقابلے سب سٹریمز کو اضافی دانے دار آن چین ڈیٹا تک رسائی حاصل ہے، اور یہ بڑے پیمانے پر متوازی پروسیسنگ سے بھی فائدہ اٹھا سکتے ہیں، جس کا مطلب بہت تیز پروسیسنگ کے اوقات ہو سکتا ہے. -## What are the benefits of using Substreams-powered subgraphs? +## سب سٹریمز سے چلنے والے سب گرافس استعمال کرنے کے کیا فوائد ہیں؟ -Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. +سب سٹریمز سے چلنے والے سب گراف سب سٹریمز کے تمام فوائد کو سب گراف کی کیوری کے ساتھ یکجا کرتے ہیں۔ وہ گراف میں زیادہ کمپوز ایبلٹی اور اعلی کارکردگی کا انڈیکسنگ لاتے ہیں۔ وہ ڈیٹا کے استعمال کے نئے کیسز کو بھی اہل بناتے ہیں۔ مثال کے طور پر، ایک بار جب آپ اپنا سب سٹریمزسے چلنے والا سب گراف بنا لیتے ہیں، تو آپ اپنے [سب سٹریم ماڈیولز] \(https://substreams.streamingfast.io/developers-guide/modules) کو مختلف [sinks] \(https://substreams.streamingfast.io/developers-guide/sink-targets)کو دوبارہ استعمال کر سکتے ہیں جیسے PostgreSQL، MongoDB، اور Kafka. -## What are the benefits of Substreams? +## سب سٹریمز کے فوائد کہاں ہیں؟ -There are many benefits to using Substreams, including: +سب سٹریمز کو استعمال کرنے کے بہت سے فوائد ہیں، بشمول: -- Composable: You can stack Substreams modules like LEGO blocks, and build upon community modules, further refining public data. +- کمپوز ایبل: آپ سب سٹریمز ماڈیولز جیسے LEGO بلاکس کو اسٹیک کر سکتے ہیں، اور عوامی ڈیٹا کو مزید بہتر کرتے ہوئے کمیونٹی ماڈیول بنا سکتے ہیں. -- High-performance indexing: Orders of magnitude faster indexing through large-scale clusters of parallel operations (think BigQuery). +- اعلی کارکردگی کی انڈیکسنگ: متوازی کارروائیوں کے بڑے پیمانے پر کلسٹرز کے ذریعے تیز تر انڈیکسنگ کے آرڈرز (سوچیں BigQuery). -- Sink anywhere: Sink your data to anywhere you want: PostgreSQL, MongoDB, Kafka, subgraphs, flat files, Google Sheets. +- کہیں بھی سینک: اپنے ڈیٹا کو جہاں چاہیں سینک: PostgreSQL، MongoDB، Kafka، سب گرافس، فلیٹ فائلز، Google Sheets. -- Programmable: Use code to customize extraction, do transformation-time aggregations, and model your output for multiple sinks. +- قابل پروگرام: اسے اپنی مرضی کے مطابق بنانے کے لیے کوڈ کریں، دو ٹرانسفارم ٹائم ایگریگیشنز، اور متعدد حواس کے لیے اپنے آؤٹ پٹ کو ماڈل کریں. -- Access to additional data which is not available as part of the JSON RPC +- اضافی ڈیٹا تک رسائی جو JSON RPC کے حصے کے طور پر دستیاب نہیں ہے -- All the benefits of the Firehose. +- Firehose کے تمام فوائد. -## What is the Firehose? +## Firehose کیا ہے؟ -Developed by [StreamingFast](https://www.streamingfast.io/), the Firehose is a blockchain data extraction layer designed from scratch to process the full history of blockchains at speeds that were previously unseen. Providing a files-based and streaming-first approach, it is a core component of StreamingFast's suite of open-source technologies and the foundation for Substreams. +[StreamingFast](https://www.streamingfast.io/) کے ذریعے تیار کردہ، Firehose ایک بلاکچین ڈیٹا نکالنے کی پرت ہے جسے شروع سے بلاکچینز کی مکمل تاریخ کو اس رفتار سے پروسیس کرنے کے لیے ڈیزائن کیا گیا ہے جو پہلے نظر نہیں آتی تھیں۔ فائلوں پر مبنی اور سٹریمنگ فرسٹ اپروچ فراہم کرنا، یہ سٹریمنگ فاسٹ کے اوپن سورس ٹیکنالوجیز کے سوٹ کا بنیادی جزو اور سب اسٹریمز کی بنیاد ہے. -Go to the [documentation](https://firehose.streamingfast.io/) to learn more about the Firehose. +Firehose کے بارے میں مزید جاننے کے لیے[documentation] (https://firehose.streamingfast.io/) پر جائیں. -## What are the benefits of the Firehose? +## Firehose کے کیا فوائد ہیں؟ -There are many benefits to using Firehose, including: +Firehose استعمال کرنے کے بہت سے فوائد ہیں، بشمول: -- Lowest latency & no polling: In a streaming-first fashion, the Firehose nodes are designed to race to push out the block data first. +- سب سے کم تاخیر اور کوئی پولنگ نہیں: اسٹریمنگ کے پہلے انداز میں، Firehose نوڈس کو پہلے بلاک ڈیٹا کو آگے بڑھانے کی دوڑ کے لیے ڈیزائن کیا گیا ہے. -- Prevents downtimes: Designed from the ground up for High Availability. +- ڈاؤن ٹائمز کو روکتا ہے: اعلی دستیابی کے لیے زمین سے ڈیزائن کیا گیا ہے. -- Never miss a beat: The Firehose stream cursor is designed to handle forks and to continue where you left off in any condition. +- کبھی بھی بیٹ مت چھوڑیں: Firehose سٹریم کرسر کو فورکس ہینڈل کرنے اور کسی بھی حالت میں وہیں سے جاری رکھنے کے لیے بنایا گیا ہے جہاں آپ نے چھوڑا تھا. -- Richest data model:  Best data model that includes the balance changes, the full call tree, internal transactions, logs, storage changes, gas costs, and more. +- امیرترین ڈیٹا ماڈل: بہترین ڈیٹا ماڈل جس میں بیلنس کی تبدیلیاں، مکمل کال ٹری، اندرونی ٹرانزیکشن، لاگز، اسٹوریج کی تبدیلیاں، گیس کی قیمتیں اور بہت کچھ شامل ہے. -- Leverages flat files: Blockchain data is extracted into flat files, the cheapest and most optimized computing resource available. +- فلیٹ فائلوں کا فائدہ اٹھاتا ہے: بلاکچین ڈیٹا کو فلیٹ فائلوں میں نکالا جاتا ہے، جو دستیاب سب سے سستا اور بہترین کمپیوٹنگ وسیلہ ہے. -## Where can developers access more information about Substreams-powered subgraphs and Substreams? +## ڈویلپرز سب سٹریمز سے چلنے والے سب گرافس اور سب سٹریمز کے بارے میں مزید معلومات کہاں تک رسائی حاصل کرسکتے ہیں؟ -The [Substreams documentation](/substreams) will teach you how to build Substreams modules. +[سب سٹریمز دستاویزات](/substreams) آپ کو سکھائے گا کہ سب سٹریمز کے ماڈیول کیسے بنائے جائیں. -The [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) will show you how to package them for deployment on The Graph. +[سب سٹریمز سے چلنے والے سب گرافس کے دستاویزات](/cookbook/substreams-powered-subgraphs/) آپ کو دکھائے گا کہ انہیں گراف پر تعیناتی کے لیے کیسے پیک کیا جائے. -## What is the role of Rust modules in Substreams? +## سب سٹریمز میں Rust ماڈیولز کا کیا کردار ہے؟ -Rust modules are the equivalent of the AssemblyScript mappers in subgraphs. They are compiled to WASM in a similar way, but the programming model allows for parallel execution. They define the sort of transformations and aggregations you want to apply to the raw blockchain data. +زنگ ماڈیول سب گراف میں اسمبلی اسکرپٹ میپرز کے مساوی ہیں۔ وہ اسی طرح WASM پر مرتب کیے گئے ہیں، لیکن پروگرامنگ ماڈل متوازی عمل درآمد کی اجازت دیتا ہے۔ وہ اس قسم کی تبدیلیوں اور مجموعوں کی وضاحت کرتے ہیں جسے آپ خام بلاکچین ڈیٹا پر لاگو کرنا چاہتے ہیں. -See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. +تفصیلات کے لیے [ماڈیول دستاویزات](https://substreams.streamingfast.io/developers-guide/modules) دیکھیں. -## What makes Substreams composable? +## سب سٹریمز کو کمپوز ایبل کیا بناتا ہے؟ -When using Substreams, the composition happens at the transformation layer enabling cached modules to be re-used. +سب سٹریمز کا استعمال کرتے وقت، کمپوزیشن ٹرانسفارمیشن لیئر پر ہوتی ہے جو کیشڈ ماڈیولز کو دوبارہ استعمال کرنے کے قابل بناتی ہے. -As an example, Alice can build a DEX price module, Bob can use it to build a volume aggregator for some tokens of his interest, and Lisa can combine four individual DEX price modules to create a price oracle. A single Substreams request will package all of these individual's modules, link them together, to offer a much more refined stream of data. That stream can then be used to populate a subgraph, and be queried by consumers. +مثال کے طور پر، ایلس DEX پرائس ماڈیول بنا سکتی ہے، باب اپنی دلچسپی کے کچھ ٹوکنز کے لیے حجم ایگریگیٹر بنانے کے لیے اسے استعمال کر سکتا ہے، اور لیزا قیمت اوریکل بنانے کے لیے چار انفرادی DEX قیمت ماڈیول کو جوڑ سکتی ہے۔ ایک واحد سب سٹریمز کی درخواست ان تمام افراد کے ماڈیولز کو پیک کرے گی، ان کو آپس میں جوڑ دے گی، تاکہ ڈیٹا کا بہت زیادہ بہتر سلسلہ پیش کیا جا سکے۔ اس سلسلے کو پھر سب گراف کو آباد کرنے کے لیے استعمال کیا جا سکتا ہے، اور صارفین اس سے کیوریز کر سکتے ہیں. -## How can you build and deploy a Substreams-powered Subgraph? +## آپ سب سٹریمز سے چلنے والے سب گراف کو کیسے بنا اور تعینات کر سکتے ہیں؟ -After [defining](/cookbook/substreams-powered-subgraphs/) a Substreams-powered Subgraph, you can use the Graph CLI to deploy it in [Subgraph Studio](https://thegraph.com/studio/). +سب سٹریمز سے چلنے والے سب گراف کی [defining](/cookbook/substreams-powered-subgraphs/) کے بعد، آپ گراف CLI کو [Subgraph Studio](https://thegraph.com/studio/) میں تعینات کرنے کے لیے استعمال کر سکتے ہیں. -## Where can I find examples of Substreams and Substreams-powered subgraphs? +## مجھے سب سٹریمز اور سب سٹریمز سے چلنے والے سب گرافس کی مثالیں کہاں مل سکتی ہیں؟ -You can visit [this Github repo](https://github.com/pinax-network/awesome-substreams) to find examples of Substreams and Substreams-powered subgraphs. +آپ سب سٹریمز اور سب سٹریم سے چلنے والے سب گرافس کی مثالیں تلاش کرنے کے لیے [یہ گٹ ہب ریپو](https://github.com/pinax-network/awesome-substreams) ملاحظہ کر سکتے ہیں. -## What do Substreams and Substreams-powered subgraphs mean for The Graph Network? +## گراف نیٹ ورک کے لیے سب سٹریمز اور سب سٹریمز سے چلنے والے سب گرافس کا کیا مطلب ہے؟ -The integration promises many benefits, including extremely high-performance indexing and greater composability by leveraging community modules and building on them. +انضمام بہت سے فوائد کا وعدہ کرتا ہے، بشمول انتہائی اعلی کارکردگی کی انڈیکسنگ اور کمیونٹی ماڈیولز کا فائدہ اٹھا کر اور ان پر تعمیر کرنے کے ذریعے زیادہ کمپوز ایبلٹی. diff --git a/website/pages/ur/developing/supported-networks.json b/website/pages/ur/developing/supported-networks.json index 5e12392b8c7d..1286c4859e34 100644 --- a/website/pages/ur/developing/supported-networks.json +++ b/website/pages/ur/developing/supported-networks.json @@ -1,5 +1,5 @@ { - "network": "Network", + "network": "نیٹ ورک", "cliName": "CLI Name", "chainId": "Chain ID", "studioAndHostedService": "Studio and Hosted Service", diff --git a/website/pages/ur/developing/supported-networks.mdx b/website/pages/ur/developing/supported-networks.mdx index 13889d91723d..dee72d2419e3 100644 --- a/website/pages/ur/developing/supported-networks.mdx +++ b/website/pages/ur/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: تعاون یافتہ نیٹ ورکس +title: سپورٹڈ نیٹ ورکس --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## گراف نوڈ -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. گراف نوڈ دوسرے پروٹوکول کو بھی انڈیکس کر سکتا ہے، فائرہوز انٹیگریشن کے ذریعے. NEAR، Arweave اور Cosmos پر مبنی نیٹ ورکس کے لیے فائرہوز انٹیگریشن بنائے گئے ہیں. diff --git a/website/pages/ur/firehose.mdx b/website/pages/ur/firehose.mdx index e9f3804f1629..ec5def6a02e5 100644 --- a/website/pages/ur/firehose.mdx +++ b/website/pages/ur/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose بلاکچین ڈیٹا پر کاروائی کرنے کے لیے فائلوں پر مبنی اور streaming-first طریقہ فراہم کرتا ہے. +![Firehose Logo](/img/firehose-logo.png) -Ethereum (اور بہت سی EVM chains)، NEAR، Solana، Cosmos اور Arweave کے لیے firehose integrations بنائی گئ ہیں، جن میں مزید کام جاری ہے. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -گراف نوڈ کے انٹیگریشن کو متعدد چینز کے لیے بنایا گیا ہے، اس لیے سب گرافس ڈیٹا کو فائر ہوز سے پاور پرفارمنٹ اور اسکیل ایبل انڈیکسنگ تک منتقل کر سکتے ہیں۔ فائر ہوز [سب اسٹریمز](/substreams) کو بھی طاقت دیتا ہے، جو کہ گراف کور ڈویلپرز کے ذریعہ بنائی گئی ایک نئی تبدیلی کی ٹیکنالوجی ہے. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -مزید جاننے کے لیے [firehose دستاویزات](https://firehose.streamingfast.io/) ملاحظہ کریں. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### شروع ہوا چاہتا ہے + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/ur/glossary.mdx b/website/pages/ur/glossary.mdx index b3b8e237ce6d..c6e26d302757 100644 --- a/website/pages/ur/glossary.mdx +++ b/website/pages/ur/glossary.mdx @@ -12,7 +12,7 @@ title: لغت - **سب گراف**: ایک حسب ضرورت API جو بلاکچین ڈیٹا پر بنایا گیا ہے جس سے [GraphQL](https://graphql.org/) کا استعمال کر کے کیوری کیا جا سکتا ہے۔ ڈویلپرز گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف بنا، تعینات اور شائع کر سکتے ہیں۔ اس کے بعد، انڈیکسرز سب گرافس کو انڈیکس کرنا شروع کر سکتے ہیں تاکہ انہیں سب گراف صارفین کی طرف سے کیوری کرنے کے لیے دستیاب ہو سکے. -- **ہوسٹڈ سروس**: گراف کے ڈیسینٹرالائزڈ نیٹ ورک کے طور پر سب گراف کی تعمیر اور کیوری کے لیے ایک عارضی سہاروں کی خدمت اس کی سروس کی لاگت، سروس کے معیار، اور ڈویلپر کے تجربے کو پختہ کر رہی ہے. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **انڈیکسرز**: نیٹ ورک کے شرکاء جو بلاکچینز سے ڈیٹا کو انڈیکس کرنے کے لیے انڈیکسنگ نوڈس چلاتے ہیں اور GraphQL کی کیوریز پیش کرتے ہیں. @@ -24,6 +24,8 @@ title: لغت - **انڈیکسر سیلف سٹیک**: GRT کی وہ مقدار جو انڈیکسرز ڈیسینٹرالائزڈ نیٹ ورک میں حصہ لینے کے لیے لگاتے ہیں۔ کم از کم 100,000 GRT ہے، اور کوئی اوپری حد نہیں ہے. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **ڈیلیگیٹرز**: نیٹ ورک کے شرکاء جو GRT کے مالک ہیں اور اپنی GRT انڈیکسرز کو تفویض کرتے ہیں۔ یہ انڈیکسرز کو نیٹ ورک پر سب گراف میں اپنا حصہ بڑھانے کی اجازت دیتا ہے۔ بدلے میں، ڈیلیگیٹرز کو انڈیکسنگ کے انعامات کا ایک حصہ ملتا ہے جو انڈیکسرز سب گراف پر کارروائی کرنے کے لیے وصول کرتے ہیں. - **ڈیلیگیشن ٹیکس**: ڈیلیگیٹرز کی طرف سے 0.5% فیس ادا کی جاتی ہے جب وہ انڈیکسرز کو GRT تفویض کرتے ہیں۔ فیس کی ادائیگی کے لیے استعمال ہونے والی GRT کو جلا دیا جاتا ہے. @@ -38,27 +40,21 @@ title: لغت - **سب گراف مینی فیسٹ**: ایک JSON فائل جو سب گراف کے GraphQL اسکیما، ڈیٹا کے ذرائع اور دیگر میٹا ڈیٹا کو بیان کرتی ہے.[ یہاں](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) ایک مثال ہے. -- **ری بیٹ پول**: ایک معاشی حفاظتی اقدام جس میں سب گراف صارفین کی طرف سے ادا کی جانے والی کیوری کی فیس ہوتی ہے جب تک کہ انڈیکسرز کے ذریعہ کیوری کی فیس میں چھوٹ کے طور پر ان کا دعوی نہ کیا جائے۔ بقایا GRT کو جلا دیا گیا ہے. - -- **ایپوک**: وقت کی اکائی جو نیٹ ورک میں ہے۔ ایک عہد اس وقت 6,646 بلاکس یا تقریباً 1 دن کا ہے. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **ایلوکیشن**: ایک انڈیکسر اپنا کل GRT حصص (بشمول ڈیلیگیٹرز کا حصہ) ان سب گرافوں کے لیے مختص کر سکتا ہے جو گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر شائع کیے گئے ہیں۔ مختصات چار مراحل میں سے ایک میں موجود ہیں. 1. **فعال**: ایک مختص کو فعال سمجھا جاتا ہے جب اسے آن چین بنایا جاتا ہے۔ اسے ایلوکیشن کھولنا کہا جاتا ہے، اور یہ نیٹ ورک کی طرف اشارہ کرتا ہے کہ انڈیکسر کسی خاص سب گراف کے لیے فعال طور پر انڈیکس کر رہا ہے اور کیوریز پیش کر رہا ہے۔ فعال مختصات سب گراف پر سگنل کے متناسب انڈیکسنگ انعامات اور مختص کردہ GRT کی رقم جمع کرتی ہیں. - 2. **بند**: ایک انڈیکسر حالیہ، اور درست، انڈیکسنگ کا ثبوت (POI) جمع کر کے دیئے گئے سب گراف پر جمع شدہ انڈیکسنگ کے انعامات کا دعویٰ کر سکتا ہے۔ یہ ایک مختص بند کرنے کے طور پر جانا جاتا ہے. ایک مختص کو بند کرنے سے پہلے کم از کم ایک دور کے لیے کھلا ہونا چاہیے۔ زیادہ سے زیادہ مختص کی مدت 28 دور ہے۔ اگر کوئی انڈیکسر 28 ایپوکس سے آگے کسی مختص کو کھلا چھوڑ دیتا ہے، تو اسے باسی مختص کے طور پر جانا جاتا ہے۔ جب کوئی مختص **بند** حالت میں ہوتا ہے، تو ایک ماہی گیر اب بھی غلط ڈیٹا پیش کرنے کے لیے انڈیکسر کو چیلنج کرنے کے لیے تنازعہ کھول سکتا ہے. - - 3. ** حتمی**: تنازعہ کی مدت ختم ہو گئی ہے، اور کیوری کی فیس کی چھوٹ انڈیکسرز کے ذریعے دعوی کرنے کے لیے دستیاب ہے. - - 4. **وصول**: مختص کے آخری مرحلے میں، تمام اہل انعامات تقسیم کر دیے گئے ہیں اور اس کے کیوری فیس ریبیٹس وصول کیے کۓ ہیں. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **سب گراف اسٹوڈیو**: سب گراف کی تعمیر، تعیناتی اور اشاعت کے لیے ایک طاقتور ڈیپ. -- **فشرمین**: نیٹ ورک کے شرکاء انڈیکسرز کے کیوریز کے جوابات اور POIs پر تنازعہ کر سکتے ہیں۔ اسے کہتے ہیں فشرمین ہونا۔ فشرمین کے حق میں حل ہونے والے تنازعہ کے نتیجے میں انڈیکسر کے لیے مالی جرمانے کے ساتھ ساتھ فشرمین کو ایوارڈ دیا جاتا ہے، اس طرح نیٹ ورک میں انڈیکسرز کے ذریعہ انجام دیے گئے انڈیکسنگ اور کیوری کے کام کی سالمیت کو ترغیب دیتا ہے۔ جرمانہ (سلیشنگ) فی الحال ایک انڈیکسر کے سیلف سٹیک کا 2.5% مقرر کیا گیا ہے، جس میں کٹے ہوئے GRT کا 50% فشرمین کو جائے گا، اور باقی 50% جلا دیا جائے گا. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **آربٹریٹرز**: آربٹریٹرز نیٹ ورک کے شرکاء ہوتے ہیں جو گورننس کے ذریعے سیٹ ہوتے ہیں۔ ثالث کا کردار انڈیکسنگ اور کیوری کے تنازعات کے نتائج کا فیصلہ کرنا ہے۔ ان کا مقصد گراف نیٹ ورک کی افادیت اور بھروسے کو زیادہ سے زیادہ کرنا ہے. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **سلیشنگ**: انڈیکسرز کو انڈیکسنگ (POI) کا غلط ثبوت فراہم کرنے یا غلط ڈیٹا پیش کرنے پر ان کے اسٹیکڈ GRT کو کم کیا جا سکتا ہے۔ سلیشنگ پرسینٹیج ایک پروٹوکول پیرامیٹر ہے جو فی الحال انڈیکسر کے سیلف اسٹیک کے 2.5% پر سیٹ ہے۔ کٹے ہوئے GRT کا 50% فشرمین کو جاتا ہے جس نے غلط ڈیٹا یا غلط POI پر اختلاف کیا۔ باقی 50 فیصد جل چکا ہے. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **انڈیکسنگ انعامات**: وہ انعامات جو انڈیکسرز کو انڈیکس کرنے والے سب گراف کے لیے موصول ہوتے ہیں۔ انڈیکسنگ کے انعامات GRT میں تقسیم کیے جاتے ہیں. @@ -66,7 +62,7 @@ title: لغت - **GRT**: گراف کے کام کا یوٹیلیٹی ٹوکن۔ GRT نیٹ ورک میں حصہ ڈالنے کے لیے نیٹ ورک کے شرکاء کو اقتصادی مراعات فراہم کرتا ہے. -- **POI یا انڈیکسنگ کا ثبوت**: جب کوئی انڈیکسر اپنا مختص بند کرتا ہے اور کسی دیے گئے سب گراف پر اپنے جمع کردہ انڈیکسر انعامات کا دعوی کرنا چاہتا ہے، تو انہیں انڈیکسنگ کا ایک درست اور حالیہ ثبوت (POI) فراہم کرنا چاہیے۔ ماہی گیر انڈیکسر کے ذریعہ فراہم کردہ POI پر تنازعہ کر سکتے ہیں۔ فشرمین کے حق میں حل ہونے والے تنازعہ کے نتیجے میں انڈیکسر میں کمی واقع ہو گی. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **گراف نوڈ**: گراف نوڈ وہ جزو ہے جو سب گراف کو انڈیکس کرتا ہے، اور نتیجے میں ڈیٹا کو GraphQL API کے ذریعے کیوری کے لیے دستیاب کرتا ہے۔ اس طرح یہ انڈیکسر اسٹیک میں مرکزی حیثیت رکھتا ہے، اور ایک کامیاب انڈیکسر چلانے کے لیے گراف نوڈ کا درست آپریشن بہت ضروری ہے. @@ -80,10 +76,10 @@ title: لغت - **کولڈاؤن کا دورانیہ**: ایک انڈیکسر جس نے اپنے ڈیلیگیشن پیرامیٹرز کو تبدیل کیا ہے اس وقت تک باقی وقت دوبارہ ایسا کر سکتا ہے. -- **L2 ٹرانسفر ٹولز**: سمارٹ کنٹریکٹس اور UI جو نیٹ ورک کے شرکاء کو Ethereum مین نیٹ سے Arbitrum One میں منتقل کرنے کے قابل بناتے ہیں۔ نیٹ ورک کے شرکاء ڈیلیگیٹڈ GRT، سب گرافس، کیوریشن شیئرز، اور انڈیکسر کا سیلف سٹیک منتقل کر سکتے ہیں۔ +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **گراف نیٹ ورک پر سب گراف کو _اپ گریڈ_ کرنا**: ہوسٹڈ سروس سے گراف نیٹ ورک پر سب گراف منتقل کرنے کا عمل۔ - **سب گراف کو _اپ ڈیٹ_ کرنا**: سب گراف کے مینی فیسٹ، سکیما، یا میپنگز میں اپ ڈیٹس کے ساتھ ایک نیا سب گراف ورزن جاری کرنے کا عمل۔ -- **مائیگریٹنگ**: سب گراف کے پرانے ورزن سے سب گراف کے نئے ورزن میں منتقل ہونے والے کیوریشن شیئرز کا عمل (یعنی، کیوریشن شیئرز تازہ ترین ورزن میں منتقل ہوتے ہیں جب v0.0.1 v0.0.2 میں اپ ڈیٹ کیا جاتا ہے)۔ +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/ur/graphcast.mdx b/website/pages/ur/graphcast.mdx index 58a8f3c75147..dc18415188b3 100644 --- a/website/pages/ur/graphcast.mdx +++ b/website/pages/ur/graphcast.mdx @@ -10,7 +10,7 @@ title: گراف کاسٹ گراف کاسٹ SDK (سافٹ ویئر ڈویلپمنٹ کٹ) ڈویلپرز کو ریڈیو بنانے کی اجازت دیتا ہے، جو گپ شپ سے چلنے والی ایپلیکیشنز ہیں جنہیں انڈیکسرز ایک مقررہ مقصد کی تکمیل کے لیے چلا سکتے ہیں۔ ہم مندرجہ ذیل استعمال کے معاملات کے لیے چند ریڈیوز بنانے کا ارادہ رکھتے ہیں (یا دیگر ڈویلپرز/ٹیموں کو مدد فراہم کرتے ہیں جو ریڈیو بنانا چاہتے ہیں): -- سب گراف ڈیٹا کی سالمیت کی ریئل ٹائم کراس چیکنگ ([POI ریڈیو](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - دوسرے انڈیکسرز سے وارپ سنکنگ سب گرافس، سب اسٹریمز، اور فائر ہوز ڈیٹا کے لیے نیلامی اور کوآرڈینیشن کا انعقاد. - فعال کیوری کے تجزیات پر خود رپورٹنگ، بشمول سب گراف کی درخواست والیوم، فیس والیوم وغیرہ. - انڈیکسنگ کے تجزیات پر خود رپورٹنگ، بشمول سب گراف انڈیکسنگ کا وقت، ہینڈلر گیس کے اخراجات، انڈیکسنگ کی غلطیوں کا سامنا کرنا وغیرہ. diff --git a/website/pages/ur/index.json b/website/pages/ur/index.json index 9dcaeb5af3aa..9d7220cbfe80 100644 --- a/website/pages/ur/index.json +++ b/website/pages/ur/index.json @@ -23,8 +23,8 @@ "description": "سب گراف بنانے کے لیے سٹوڈیو کا استعمال کریں" }, "migrateFromHostedService": { - "title": "ہوسٹڈ سروس سے منتقلی", - "description": "گراف نیٹ ورک میں سب گرافس کی منتقلی" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "ہوسٹڈ سروس", - "description": "ہوسٹڈ سروس پر سب گراف بنائیں اور دریافت کریں" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "تعاون یافتہ نیٹ ورکس", - "description": "گراف، گراف نیٹ ورک اور ہوسٹڈ سروس پر درج ذیل نیٹ ورکس کو سپورٹ کرتا ہے.", - "graphNetworkAndHostedService": "گراف نیٹ ورک اور ہوسٹڈ سروس", - "hostedService": "ہوسٹڈ سروس", - "betaWarning": "بیٹا میں." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/ur/mips-faqs.mdx b/website/pages/ur/mips-faqs.mdx index 63b595920baa..e59e86551e55 100644 --- a/website/pages/ur/mips-faqs.mdx +++ b/website/pages/ur/mips-faqs.mdx @@ -4,17 +4,19 @@ title: MIPs کے اکثر پوچھے گئے سوالات ## تعارف -گراف ایکوسسٹم میں حصہ لینے کا یہ ایک دلچسپ وقت ہے! [گراف ڈے 2022](https://thegraph.com/graph-day/2022/) کے دوران یانیو تل نے اعلان کیا کہ [ہوسٹڈ سروس کے غروب آفتاب](https://thegraph.com/blog/sunsetting-hosted-service/) ایک لمحہ جس کی طرف گراف ایکوسسٹم کئی سالوں سے کام کر رہا ہے. +> نوٹ: MIPs پروگرام مئی 2023 سے بند ہے۔ حصہ لینے والے تمام انڈیکسرز کا شکریہ! -ہوسٹڈ سروس کے غروب ہونے اور اس کی تمام سرگرمیوں کی ڈیسینٹرالائزڈ نیٹ ورک میں منتقلی کی حمایت کرنے کے لیے، گراف فاؤنڈیشن نے [مائیگریشن انفراسٹرکچر پرووائیڈرز (MIPs) پروگرام] \(https://thegraph.com/blog/mips-multi) کا اعلان کیا ہے۔ -چین-انڈیکسنگ-حوصلہ افزائی-پروگرام. +گراف کا ایکو سسٹم میں حصہ لینے کا یہ ایک دلچسپ وقت ہے! [گراف ڈے 2022](https://thegraph.com/graph-day/2022/) کے دوران Yaniv Tal نے اعلان کیا کہ [ہوسٹڈ سروس کے غروب آفتاب](https://thegraph.com/blog/sunsetting-hosted-service/)ایک لمحہ جس کی گراف کا ایکو سسٹم کئی سالوں سے کام کر رہا ہے. -MIPs پروگرام انڈیکسرز کے لیے ایک ترغیب دینے والا پروگرام ہے جو ایتھیریم مین نیٹ سے آگے انڈیکس چینز کے لیے وسائل کے ساتھ ان کی مدد کرتا ہے اور گراف پروٹوکول کو ڈیسینٹرالائزڈ نیٹ ورک کو ملٹی چین انفراسٹرکچر پرت میں پھیلانے میں مدد کرتا ہے. +ہوسٹڈ سروس کے غروب ہونے اور اس کی تمام سرگرمیوں کی ڈیسنٹرالا ئزڈ نیٹ ورک میں منتقلی کی حمایت کرنے کے لیے، گراف فاؤنڈیشن نے [مائیگریشن انفراسٹرکچر پرووائیڈرز (MIPs) پروگرام](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program) کا اعلان کیا ہے. -MIPs پروگرام نے GRT سپلائی (75 ملین GRT) کا 0.75% مختص کیا ہے، 0.5% انڈیکسرز کو انعام دینے کے لیے جو نیٹ ورک کو بوٹسٹریپ کرنے میں حصہ ڈالتے ہیں اور 0.25% ملٹی چین سب گراف استعمال کرنے والے سب گراف ڈویلپرز کے لیے مائیگریشن گرانٹس کے لیے مختص کیے گئے ہیں. +MIPs پروگرام انڈیکسر کے لیے ایک ترغیب دینے والا پروگرام ہے جو انہیں ایتھیریم مین نیٹ سے آگے انڈیکس چینز کے لیے وسائل کے ساتھ مدد فراہم کرتا ہے اور گراف پروٹوکول کو ڈیسنٹرالا ئزڈ نیٹ ورک کو ایک ملٹی چین انفراسٹرکچر پرت میں پھیلانے میں مدد کرتا ہے. + +MIPs پروگرام نے GRT سپلائی (75M GRT) کا 0.75% مختص کیا ہے، 0.5% انڈیکسرز کو انعام دینے کے لیے جو نیٹ ورک کو بوٹسٹریپ کرنے میں حصہ ڈالتے ہیں اور 0.25% نیٹ ورک گرانٹس کے لیے مختص کیے گئے ہیں جو ملٹی چین سب گراف استعمال کرنے والے سب گراف ڈویلپرز کے لیے ہیں. ### مفید وسائل -- [انڈیکسرر ٹولز ونسنٹ سے (وکٹر) ٹیگلیہ](https://indexer-2ools.vincenttaglia.com/#/) +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) - [گراف نیٹ ورک پر ایک مؤثر انڈیکسر کیسے بنیں](https://thegraph.com/blog/how-to-become-indexer/) - [انڈیکسر نالج ہب](https://thegraph.academy/indexers/) - [آلوکیشن آپٹیمائزر](https://github.com/graphprotocol/allocationopt.jl) @@ -26,9 +28,9 @@ MIPs پروگرام نے GRT سپلائی (75 ملین GRT) کا 0.75% مختص سیاق و سباق کے لیے، ثالثی چارٹر، [چارٹر کے بارے میں یہاں مزید جانیں](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract)، ناکام سب گراف کے لیے POI بنانے کے طریقہ کار کی وضاحت کرتا ہے. -کمیونٹی کے ایک رکن، [SunTzu](https://github.com/suntzu93) نے ثالثی چارٹر کے طریقہ کار کے مطابق اس عمل کو خودکار بنانے کے لیے ایک اسکرپٹ بنایا ہے۔ ریپو چیک کریں [یہاں](https://github.com/suntzu93/get_valid_poi_subgraph). +کمیونٹی کے ایک رکن، [SunTzu](https://github.com/suntzu93) نے ثالثی چارٹر کے طریقہ کار کے مطابق اس عمل کو خودکار بنانے کے لیے ایک اسکرپٹ بنایا ہے۔ ریپو چیک کریں [here](https://github.com/suntzu93/get_valid_poi_subgraph). -### 2. MIPs پروگرام سب سے پہلے کس سلسلہ کو ترغیب دے گا؟ +### 2. MIPs پروگرام سب سے پہلے کس چین کو ترغیب دے گا؟ پہلا چین جو ڈیسینٹرالائزڈ نیٹ ورک پر سپورٹ کیا جائے گا وہ ہے Gnosis چین! پہلے xDAI کے نام سے جانا جاتا تھا، Gnosis چین ایک EVM پر مبنی چین ہے۔ Gnosis Chain کو پہلے کے طور پر منتخب کیا گیا تھا کیونکہ اس کے چلانے والے نوڈس، انڈیکسر کی تیاری، گراف کے ساتھ سیدھ میں ہونا اور ویب 3 میں اپنانے کی صارف دوستی ہے. @@ -88,7 +90,7 @@ MIPs کے انعامات فی چین تقسیم کیے جائیں گے جب کا فیز 3 کے دوران مین نیٹ انڈیکسر رکھنے کی ضرورت ہوگی۔ اس پر مزید معلومات [جلد ہی اس تصور کے صفحے پر شیئر کی جائیں گی۔](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -### 12. کیا انعامات بنیان سے مشروط ہوں گے؟ +### 12. کیا انعامات ویسٹنگ سے مشروط ہوں گے؟ پروگرام کے اختتام پر تقسیم کی جانے والی فیصد بنیان سے مشروط ہوگی۔ اس پر مزید انڈیکسر معاہدے میں شیئر کیا جائے گا. diff --git a/website/pages/ur/network/benefits.mdx b/website/pages/ur/network/benefits.mdx index f358076ea185..dd0d9d2b3f80 100644 --- a/website/pages/ur/network/benefits.mdx +++ b/website/pages/ur/network/benefits.mdx @@ -14,7 +14,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg - 60-98% کم ماہانہ لاگت - $0 بنیادی ڈھانچے کے سیٹ اپ کے اخراجات - اعلی اپ ٹائم -- 438 انڈیکسرز تک رسائی (اور گنتی) +- Access to hundreds of independent Indexers around the world - عالمی برادری کی طرف سے 24/7 تکنیکی مدد ## فوائد کی وضاحت کی @@ -90,7 +90,7 @@ socialImage: https://thegraph.com/docs/img/seo/benefits.jpg ## اعتبار اور لچک -گراف کا ڈیسینٹرالائزڈ نیٹ ورک صارفین کو جغرافیائی فالتو پن تک رسائی فراہم کرتا ہے جو `graph-node` کی خود ہوسٹڈ کے وقت موجود نہیں ہے۔ 99.9%+ اپ ٹائم کی بدولت کیوریز قابل اعتماد طریقے سے پیش کیے جاتے ہیں، جو 168 انڈیکسرز (اور گنتی) کے ذریعے عالمی سطح پر نیٹ ورک کو محفوظ بناتے ہیں. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. نیچے کی سطر: گراف نیٹ ورک کم مہنگا ہے، استعمال میں آسان ہے، اور مقامی طور پر `graph-node` کو چلانے کے مقابلے میں بہتر نتائج پیدا کرتا ہے. diff --git a/website/pages/ur/network/explorer.mdx b/website/pages/ur/network/explorer.mdx index 345b310f59bc..b7ea891bf525 100644 --- a/website/pages/ur/network/explorer.mdx +++ b/website/pages/ur/network/explorer.mdx @@ -74,7 +74,7 @@ title: گراف ایکسپلورر ڈیلیگیٹرز گراف نیٹ ورک کی حفاظت اور ڈیسینٹرالائزیشن کو برقرار رکھنے میں کلیدی کردار ادا کرتے ہیں۔ وہ ایک یا ایک سے زیادہ انڈیکسرز کو GRT ٹوکن ڈیلیگیٹ (یعنی "اسٹیک")کر کے نیٹ ورک میں حصہ لیتے ہیں۔ ڈیلیگیٹرز کے بغیر، انڈیکسرز کے لیے اہم انعامات اور فیسیں حاصل کرنے کا امکان کم ہوتا ہے۔ لہٰذا، انڈیکسرز ڈیلیگیٹرزکو انڈیکسنگ کے انعامات اور استفسار کی فیس کا ایک حصہ پیش کرکے اپنی طرف متوجہ کرنے کی کوشش کرتے ہیں جو وہ کماتے ہیں. -ڈیلیگیٹرز ، بدلے میں، متعدد مختلف متغیرات کی بنیاد پر انڈیکسرز کو منتخب کرتے ہیں، جیسے کہ ماضی کی کارکردگی، انڈیکسنگ کے انعام کی شرح، اور استفسار کی فیس میں کمی۔ کمیونٹی کے اندر ساکھ بھی اس میں اہم کردار ادا کر سکتی ہے! [گراف کا ڈسکورڈ](https://discord.gg/graphprotocol) یا [ کے ذریعے منتخب کردہ انڈیکسرز کے ساتھ مربوط ہونے کی سفارش کی جاتی ہے۔ گراف فورم](https://forum.thegraph.com/)! +ڈیلیگیٹرز، بدلے میں، متعدد مختلف متغیرات کی بنیاد پر انڈیکسرز کو منتخب کرتے ہیں، جیسے کہ ماضی کی کارکردگی، انڈیکسنگ کے انعام کی شرح، اور کیوری فیس میں کمی۔ کمیونٹی کے اندر ساکھ بھی اس میں اہم کردار ادا کر سکتی ہے! [گراف ڈسکورڈ](https://discord.gg/graphprotocol) یا [گراف فورم](https://forum.thegraph.com/) کے ذریعے منتخب کردہ انڈیکسر کے ساتھ مربوط ہونے کی سفارش کی جاتی ہے! ![ایکسپلورر امیج 7](/img/Delegation-Overview.png) diff --git a/website/pages/ur/network/indexing.mdx b/website/pages/ur/network/indexing.mdx index ce4e6936c9e9..6b396ceccca7 100644 --- a/website/pages/ur/network/indexing.mdx +++ b/website/pages/ur/network/indexing.mdx @@ -2,7 +2,7 @@ title: انڈیکسنگ --- -انڈیکسرز گراف نیٹ ورک میں نوڈ آپریٹرز ہیں جو انڈیکسنگ اور کیوری پراسیسنگ کی خدمات فراہم کرنے کے لیے گراف ٹوکنز (GRT) کو داؤ پر لگاتے ہیں. انڈیکسرز اپنی خدمات کے بدلے میں کیوری فیس اور انڈیکسنگ کے انعامات حاصل کرتے ہیں. وہ ایک ریبیٹ پول سے بھی کماتے ہیں جو کوب-ڈگلس ری بیٹ فنکشن کی پیروی کرتے ہوۓ ان کے کام کے متناسب سے تمام نیٹ ورک شراکت داروں میں تقسیم کیا جاتا ہے. +انڈیکسرز گراف نیٹ ورک میں نوڈ آپریٹرز ہیں جو انڈیکسنگ اور کیوری پراسیسنگ کی خدمات فراہم کرنے کے لیے گراف ٹوکنز (GRT) کو داؤ پر لگاتے ہیں۔ انڈیکسرز اپنی خدمات کے لیے کیوری فیس اور انڈیکسنگ کے انعامات حاصل کرتے ہیں۔ وہ کیوری فیس بھی کماتے ہیں جو ایک کفایتی چھوٹ کی تقریب کے مطابق چھوٹ دی جاتی ہیں. پروٹوکول میں داؤ پر لگائی گئی GRT پگھلنے کی مدت سے مشروط ہے اور اگر انڈیکسرز بدنیتی پر مبنی ہوں اور ایپلیکیشنز کو غلط ڈیٹا پیش کرتے ہیں یا اگر وہ غلط طریقے سے انڈیکس کرتے ہیں تو اسے کم کیا جا سکتا ہے. انڈیکسرز نیٹ ورک میں حصہ ڈالنے کے لیے ڈیلیگیٹرز کی جانب سے دیے گئے سٹیک کے لیے بھی انعامات حاصل کرتے ہیں. @@ -26,7 +26,7 @@ title: انڈیکسنگ انڈیکسنگ کے انعامات پروٹوکول کے افراط زر سے آتے ہیں جو کہ %3 سالانہ جاری کرنے پر مقر ر ہے. وہ ہر ایک پر تمام کیوریشن سگنل کے تناسب کی بنیاد پر سب گرافس میں تقسیم کیے جاتے ہیں, پھر اس سب گراف پر ان کے مختص سٹیک کی بنیاد پر انڈیکسرز کو متناسب طور پر تقسیم کیے جاتے ہیں. **ایک مختص کرنے کو انڈیکسنگ کے درست ثبوت (POI) کے ساتھ مختص کرنا ضروری ہے جو ثالثی چارٹر کے ذریعہ مقرر کردہ معیارات پر پورا اترتا ہے تاکہ انعامات کا اہل ہو.** -انعامات کا حساب لگانے کے لیے کمیونٹی کی طرف سے متعدد ٹولز بنائے گئے ہیں; آپ کو ان کا ایک مجموعہ [کمیونٹی گائیڈز کا مجموعہ](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c) میں مل جائے گا. آپ [ڈسکورڈ سرور](https://discord.gg/graphprotocol) پر Delegators# اور Indexers# چینلز میں ٹولز کی تازہ ترین فہرست بھی تلاش کر سکتے ہیں. یہاں ہم ایک [تجویز کردہ ایلوکیشن آپٹیمائزر](https://github.com/graphprotocol/AllocationOpt.jl) کو مربوط کرتے ہیں جو کہ انڈیکسر سافٹ ویئر اسٹیک کے ساتھ مرکوب ہے. +انعامات کا حساب لگانے کے لیے کمیونٹی کی طرف سے متعدد ٹولز بنائے گئے ہیں۔ آپ کو ان کا ایک مجموعہ [کمیونٹی گائیڈز کلیکشن](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c) میں مل جائے گا۔ آپ [ڈسکورڈ سرور](https://discord.gg/graphprotocol) پر #Delegators اور #Indexers چینلز میں ٹولز کی تازہ ترین فہرست بھی تلاش کر سکتے ہیں۔ یہاں ہم ایک [تجویز کردہ ایلوکیشن آپٹیمائزر](https://github.com/graphprotocol/AllocationOpt.jl) کو انڈیکسر سافٹ ویئر سٹیک کے ساتھ مربوط کرتے ہیں. ### انڈئکسنگ کا ثبوت (POI) کیا ہے؟ @@ -81,17 +81,17 @@ query indexerAllocations { ### کیوری فیس ری بیٹس کیا ہیں اور وہ کب تقسیم کی جاتی ہیں? -کیوری فیس گیٹ وے کے ذریعہ جمع کی جاتی ہے جب بھی کبھی کوئی ایلوکیشن بند کی جاتی ہے اور سب گراف کی کیوری فی ری بیٹ پول میں جمع کی جاتی ہے. ری بیٹ پول کو انڈیکسرز کی حوصلہ افزائی کے لیے ڈیزائن کیا گیا ہے کہ وہ نیٹ ورک کے لیے کمائی جانے والی کیوری فیس کے کسی نہ کسی تناسب سے سٹیک مختص کریں. پول میں کیوری فیس کا وہ حصہ جو کسی خاص انڈیکسر کو مختص کیا جاتا ہے کوب ڈگلس پروڈکشن فنکشن کا استعمال کرتے ہوئے شمار کیا جاتا ہے; فی انڈیکسر کی تقسیم شدہ رقم پول میں ان کی شراکت اور سب گراف پر ان کے سٹیک کی ایلوکیشن کا ایک فنکشن ہے. +کیوری فیس گیٹ وے کے ذریعے جمع کی جاتی ہیں اور انڈیکسرز میں ایکسپونینشل ریبیٹ فنکشن کے مطابق تقسیم کی جاتی ہیں (جی آئی پی [یہاں](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162) دیکھیں)۔ ایکسپونینشل ریبیٹ فنکشن اس بات کو یقینی بنانے کے طریقے کے طور پر تجویز کیا گیا ہے کہ انڈیکسرز ایمانداری کے ساتھ کیوریز کو پیش کرتے ہوئے بہترین نتائج حاصل کریں۔ یہ انڈیکسرز کو حصص کی ایک بڑی رقم مختص کرنے کی ترغیب دے کر کام کرتا ہے (جسے کیوری کرتے وقت غلطی کی وجہ سے کم کیا جا سکتا ہے) کیوری فیس کی مقدار کے مطابق جو وہ جمع کر سکتے ہیں. -ایک بار ایلوکیشن کے بند ہونے اور تنازعہ کی مدت گزر جانے کے بعد انڈیکسر کے ذریعہ دعویٰ کرنے کے لیے ری بیٹس دستیاب ہوتی ہیں. دعویٰ کرنے پر، کیوری فی ری بیٹس انڈیکسر اور ان کے ڈیلیگیٹرز کو کیوری فیس میں کٹوتی اور ڈیلیگیشن کے پول کے تناسب کی بنیاد پر تقسیم کی جاتی ہے. +ایک بار مختص کرنے کے بعد ریبیٹ انڈیکسر کے ذریعہ دعوی کرنے کے لئے دستیاب ہے۔ دعویٰ کرنے پر،کیوری فیس کی چھوٹ انڈیکسر اور ان کے ڈیلیگیٹرز کو کیوری فیس میں کٹوتی اور ایکسپونینشل ریبیٹ فنکشن کی بنیاد پر تقسیم کی جاتی ہے. ### کیوری فی کٹ اور انڈیکسنگ ریوارڈ کٹ کیا ہے؟ `queryFeeCut` اور `indexingRewardCut` قدریں ڈیلی گیشن پیرامیٹر ہیں جنہیں انڈیکسر, انڈیکسر اور ان کے ڈیلیگیٹرز کے درمیان GRT کی تقسیم کو کنٹرول کرنے کے لیے cooldownBlocks کے ساتھ سیٹ کر سکتا ہے. ڈیلیگیشن کے پیرامیٹرز کو ترتیب دینے کے لیے ہدایات کے لیے [پروٹوکول میں حصہ لینا](/network/indexing#stake-in-the-protocol) میں آخری مراحل دیکھیں. -- **queryFeeCut** - ایک سب گراف پر جمع ہونے والی کیوری فیس کی چھوٹ کا % جو انڈیکسر میں تقسیم کیا جائے گا. اگر اسے 95% پر سیٹ کیا جاتا ہے، تو انڈیکسر کو 95% کیوری فیس ری بیٹ پول ملے گا جب ایک ایلوکیشن کا دعویٰ کیا جائے گا اور دیگر 5% ڈیلیگیٹرز کو جائیں گے. +- **کیوری فیس کٹ** - کیوری فیس کی چھوٹ کا % جو انڈیکسر میں تقسیم کیا جائے گا۔ اگر اسے 95% پر سیٹ کیا جاتا ہے، تو انڈیکسر کو 95% کیوری فیس موصول ہو گی جب ایک مختص بند ہو جائے گا اور باقی 5% ڈیلیگیٹرز کو جائے گا. -- **indexingRewardCut** - ایک سب گراف پر جمع ہونے والے انڈیکسنگ کے انعامات کا % جو انڈیکسرز میں تقسیم کیا جائے گا. اگر اسے 95% پر سیٹ کیا جاتا ہے تو، ایلوکیشن کے بند ہونے پر انڈیکسر کو انڈیکسنگ ریوارڈز پول کا 95% ملے گا اور ڈیلیگیٹرز باقی 5% کو تقسیم کر لیں گے. +- **انڈیکسنگ ریوارڈ کٹ** - انڈیکسنگ کے انعامات کا % جو انڈیکسر میں تقسیم کیے جائیں گے۔ اگر اسے 95% پر سیٹ کیا جاتا ہے تو، مختص کرنے کے بند ہونے پر انڈیکس کرنے والے کو انڈیکسنگ انعامات کا 95% ملے گا اور ڈیلیگیٹرز باقی 5% کو تقسیم کر دیں گے. ### انڈیکسر کیسے جانتے ہیں کہ کون سے سب گرافس کو انڈیکس کرنا ہے؟ @@ -112,7 +112,7 @@ query indexerAllocations { - **درمیانہ** - پروڈکشن انڈیکسر 100 سب گراف اور 200-500 درخواستیں فی سیکنڈ کو اٹھا سکتا ہے. - **بڑا** - تمام فی الحال زیر استعمال سب گرافس کو انڈیکس کرنے اور متعلقہ ٹریفک کے لیے درخواستیں پیش کرنے کے لیے تیار ہے. -| سیٹ اپ | Postgres
    (CPUs) | Postgres
    (GBs میں memory) | Postgres
    (TBs میں disk) | VMs
    (CPUs) | VMs
    (GBs میں میموری) | +| سیٹ اپ | Postgres
    (CPUs) | Postgres
    (GBs میں میموری) | Postgres
    (TBs میں ڈسک) | VMs
    (CPUs) | VMs
    (GBs میں میموری) | | --- | :-: | :-: | :-: | :-: | :-: | | چھوٹا | 4 | 8 | 1 | 4 | 16 | | معیاری | 8 | 30 | 1 | 12 | 48 | @@ -152,7 +152,7 @@ query indexerAllocations { | Port | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | | 8000 | GraphQL HTTP server
    (سب گراف کی کیوریز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (سب گراف subscriptions کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8001 | GraphQL WS
    (سب گراف سبسکرپشنز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | | 8020 | JSON-RPC
    (تعیناتیوں کے انتظام کے لیے) | / | --admin-port | - | | 8030 | سب گراف انڈیکسنگ اسٹیٹس API | /graphql | --index-node-port | - | | 8040 | Prometheus میٹرکس | /metrics | --metrics-port | - | @@ -297,7 +297,7 @@ kubectl config use-context $(kubectl config get-contexts --output='name' ### گراف نوڈ -[گراف نوڈ](https://github.com/graphprotocol/graph-node) ایک اوپن سورس Rust کا نفاذ ہے جو Ethereum بلاکچین کو ڈیٹا سٹور کو متعین طور پر اپ ڈیٹ کرنے کے لیے فراہم کرتا ہے جس سے GraphQL اینڈ پوائنٹ کے ذریعے کیوری کیا جا سکتا ہے. ڈویلپرز اپنے اسکیما کی وضاحت کرنے کے لیے سب گراف کا استعمال کرتے ہیں، اور بلاک چین سے حاصل کردہ ڈیٹا کو تبدیل کرنے کے لیے میپنگ کا ایک سیٹ اور گراف نوڈ پوری چین کو ہم آہنگ کرنے، نئے بلاکس کی مانیٹرنگ، اور GraphQL اینڈ پوائنٹ کے ذریعے اسے پیش کرنے کے لیے استعمال کرتے ہیں. +[گراف نوڈ](https://github.com/graphprotocol/graph-node) ایک اوپن سورس Rust کا نفاذ ہے جو ایتھیریم بلاکچین کو ڈیٹا سٹور کو متعین طور پر اپ ڈیٹ کرنے کے لیے فراہم کرتا ہے جس سے GraphQL اینڈ پوائنٹ کے ذریعے کیوری کیا جا سکتا ہے. ڈویلپرز اپنے اسکیما کی وضاحت کرنے کے لیے سب گراف کا استعمال کرتے ہیں، اور بلاک چین سے حاصل کردہ ڈیٹا کو تبدیل کرنے کے لیے میپنگ کا ایک سیٹ اور گراف نوڈ پوری چین کو ہم آہنگ کرنے، نئے بلاکس کی مانیٹرنگ، اور GraphQL اینڈ پوائنٹ کے ذریعے اسے پیش کرنے کے لیے استعمال کرتے ہیں. #### سورس سے شروع کرنا @@ -375,7 +375,7 @@ docker-compose up #### شروع کرتے ہوئے -انڈیکسر ایجنٹ اور انڈیکسر سروس آپ کے گراف نوڈ انفراسٹرکچر کے ساتھ مل کر واقع ہونی چاہیے۔ آپ کے انڈیکسر کمپونینٹس کے لیے ورچوئل ایگزیکیوشن ماحول قائم کرنے کے بہت سے طریقے ہیں۔ یہاں ہم وضاحت کریں گے کہ انہیں NPM پیکجز یا سورس کا استعمال کرتے ہوئے، یا گوگل کلاؤڈ kubernetes انجن پر kubernetes اور docker کے ذریعے بیئر میٹل پر کیسے چلایا جائے۔ اگر سیٹ اپ کی یہ مثالیں آپ کے بنیادی ڈھانچے میں اچھی طرح سے شامل نہیں ہوتی ہیں تو ممکنہ طور پر حوالہ دینے کے لیے کمیونٹی گائیڈ ہو گا، آئیں [ڈسکورڈ](https://discord.gg/graphprotocol) پر ہیلو کہیں! اپنے انڈیکسر اجزاء کو شروع کرنے سے پہلے [پروٹوکول میں سٹیک کرناا](/network/indexing#stake-in-the-protocol) یاد رکھیں! +انڈیکسر ایجنٹ اور انڈیکسر سروس آپ کے گراف نوڈ انفراسٹرکچر کے ساتھ مل کر واقع ہونی چاہیے۔ آپ کے انڈیکسر اجزاء کے لیے ورچوئل ایگزیکیوشن ماحول قائم کرنے کے بہت سے طریقے ہیں۔ یہاں ہم وضاحت کریں گے کہ انہیں NPM پیکجز یا سورس کا استعمال کرتے ہوئے، یا گوگل کلاؤڈ کبرنیٹس انجن پر کبرنیٹس اور ڈوکر کے ذریعے بیری میٹل پر کیسے چلایا جائے۔ اگر سیٹ اپ کی یہ مثالیں آپ کے بنیادی ڈھانچے میں اچھی طرح سے ترجمہ نہیں کرتی ہیں تو ممکنہ طور پر حوالہ دینے کے لیے کمیونٹی گائیڈ ہو گا، آئیں [ڈسکورڈ](https://discord.gg/graphprotocol) پر ہیلو کہیں! اپنے انڈیکسر اجزاء کو شروع کرنے سے پہلے [پروٹوکول میں حصہ لینا](/network/indexing#stake-in-the-protocol) یاد رکھیں! #### NPM پیکیجیز سے @@ -621,8 +621,8 @@ Indexer-cli کارروائی کی قطار کے ساتھ دستی طور پر ک - فریق ثالث کا اصلاح کنندہ ٹول یا indexer-cli صارف کے ذریعے قطار میں ایکشن شامل کیا گیا - انڈیکسر تمام queued کارروائیوں کو دیکھنے کے لیے `indexer-cli` استعمال کر سکتا ہے -- Indexer (یا دیگر software) `indexer-cli` کا استعمال کرتے ہوئے queue میں کارروائیوں کو منظور یا منسوخ کر سکتا ہے۔ approve اور cancel کی کمانڈز کے بطور ان پٹ action ids کی ایک array لیتی ہیں. -- Execution worker باقاعدگی سے منظور شدہ کارروائیوں کے لیے قطار میں polling کرتا ہے۔ یہ قطار سے `approved` کارروائیوں کو پکڑے گا، ان کو execute کرنے کی کوشش کرے گا، اور db میں اقدار کو `success` یا `failed` پر عمل درآمد کی حیثیت کے لحاظ سے اپ ڈیٹ کرے گا. +- انڈیکسر (یا دیگر سافٹ ویئر) `indexer-cli` کا استعمال کرتے ہوئے queue میں کارروائیوں کو منظور یا منسوخ کر سکتا ہے۔ approve اور cancel کی کمانڈز کے بطور ان پٹ action ids کی ایک ایرے لیتی ہیں. +- Execution worker باقاعدگی سے منظور شدہ کارروائیوں کے لیے قطار میں پولنگ کرتا ہے۔ یہ قطار سے `approved` کارروائیوں کو پکڑے گا، ان کو چلانے کی کوشش کرے گا، اور db میں اقدار کو `success` یا `failed` پر عمل درآمد کی حیثیت کے لحاظ سے اپ ڈیٹ کرے گا. - اگر کوئی کارروائی کامیاب ہوتی ہے تو کارکن اس بات کو یقینی بنائے گا کہ ایک انڈیکسنگ کا اصول موجود ہے جو ایجنٹ کو بتاتا ہے کہ ایلوکیشن کو آگے بڑھنے کا طریقہ کس طرح منظم کرنا ہے، جب ایجنٹ `auto` یا `oversight` موڈ میں ہوتا ہے تو دستی کارروائی کرتے وقت مفید ہوتا ہے. - انڈیکسر ایکشن کے عمل کی تاریخ دیکھنے کے لیے action queue کی نگرانی کر سکتا ہے اور اگر ضرورت ہو تو ایکشن کے اجزاء کو دوبارہ منظور اور اپ ڈیٹ کر سکتا ہے اگر وہ عمل درآمد میں ناکام رہے. action queue تمام queued اور taken اعمال کی تاریخ فراہم کرتی ہے. @@ -662,21 +662,21 @@ ActionType { سورس سے استعمال کی مثال: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` نوٹ کریں کہ ایلوکیشن کے انتظام کے لیے معاون کارروائی کی اقسام میں مختلف ان پٹ تقاضے ہوتے ہیں: @@ -798,8 +798,4 @@ setDelegationParameters(950000, 600000, 500) - **Closed** - ایک انڈیکسر 1 ایپوک گزر جانے کے بعد ایلوکیشن کو بند کرنے کے لیے آزاد ہوتا ہے ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) یا ان کا انڈیکسر ایجنٹ **maxAllocationEpochs** (فی الحال 28 دن) کے بعد مختص کو خود بخود بند کر دے گا. جب کوئی ایلوکیشن انڈیکسنگ کے درست ثبوت (POI) کے ساتھ بند کر دیا جاتا ہے تو ان کے انڈیکسنگ کے انعامات انڈیکسر اور اس کے ڈیلیگیٹرز میں تقسیم کیے جاتے ہیں (مزید جاننے کے لیے نیچے "انعامات کیسے تقسیم کیے جاتے ہیں؟" دیکھیں). -- **Finalized** - ایک بار ایلوکیشن کے بند ہونے کے بعد ایک تنازعہ کی مدت ہوتی ہے جس کے بعد ایلوکیشن کو **finalized** سمجھا جاتا ہے اور اس کی کیوری کی فیس کی چھوٹ دعوی کرنے کے لیے دستیاب ہوتی ہے (claim()). انڈیکسر ایجنٹ **finalized** ایکوکیشنز کا پتہ لگانے کے لیے نیٹ ورک کی نگرانی کرتا ہے اور ان کا دعوی کرتا ہے اگر وہ قابل ترتیب (اور اختیاری) حد سے اوپر ہیں، **—-allocation-claim-threshold**. - -- **Claimed** - ایلوکیشن کی حتمی حالت؛ اس نے اپنا کورس ایک فعال ایلوکیشن کے طور پر چلایا ہے، تمام اہل انعامات تقسیم کر دیے گئے ہوتے ہیں اور اس کی کیوری کی فیس میں چھوٹ کا دعوی کیا گیا ہوتا ہے. - انڈیکسرز کو تجویز کی جاتی ہے کہ وہ offchain مطابقت پذیری کی فعالیت کو استعمال کریں تاکہ on-chain ایلوکیشن سے پہلے سب گراف کی تعیناتیوں کو chainhead سے ہم آہنگ کیا جا سکے. یہ خصوصیت خاص طور پر ان سب گرافوں کے لیے مفید ہے جن کی مطابقت پذیری میں 28 ایپوک سے زیادہ وقت لگ سکتا ہے یا غیر یقینی طور پر ناکام ہونے کے کچھ امکانات ہوتے ہیں. diff --git a/website/pages/ur/new-chain-integration.mdx b/website/pages/ur/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/ur/new-chain-integration.mdx +++ b/website/pages/ur/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/ur/operating-graph-node.mdx b/website/pages/ur/operating-graph-node.mdx index aef53211bc1b..91ace3b3a74f 100644 --- a/website/pages/ur/operating-graph-node.mdx +++ b/website/pages/ur/operating-graph-node.mdx @@ -22,7 +22,7 @@ title: گراف نوڈ کو آپریٹ کرنا اگرچہ کچھ سب گراف کو صرف ایک مکمل نوڈ کی ضرورت ہو سکتی ہے، کچھ میں انڈیکسنگ کی خصوصیات ہوسکتی ہیں جن کے لیے اضافی RPC فعالیت کی ضرورت ہوتی ہے۔ خاص طور پر سب گراف جو `eth_calls` کو انڈیکسنگ کے حصے کے طور پر بناتے ہیں ان کے لیے آرکائیو نوڈ کی ضرورت ہوگی جو [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898) کو سپورٹ کرتا ہو، اور `callHandlers`، یا `blockHandlers` کے ساتھ `call` فلٹر کے ساتھ سب گراف، `trace_filter` سپورٹ کی ضرورت ہے ([ٹریس ماڈیول کی دستاویزات یہاں دیکھیں](https://openethereum.github.io/JSONRPC-trace-module)). -**آنے والا: نیٹ ورک فائر ہوزس** - فائر ہوز ایک gRPC سروس ہے جو ایک ترتیب شدہ، ابھی تک fork-aware، بلاکس کا سلسلہ فراہم کرتی ہے، جسے گراف کے بنیادی ڈویلپرز نے پیمانے پر کارکردگی کا مظاہرہ کرنے والی انڈیکسنگ کو بہتر طریقے سے سپورٹ کرنے کے لیے تیار کیا ہے. یہ فی الحال انڈیکسنگ کی ضرورت نہیں ہے، لیکن انڈیکسرز کی حوصلہ افزائی کی جاتی ہے کہ وہ مکمل نیٹ ورک سپورٹ سے پہلے خود کو ٹیکنالوجی سے واقف کر لیں. فائر ہوز کے بارے میں مزید [یہاں](https://firehose.streamingfast.io/) جانیں. +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS نوڈس @@ -80,7 +80,7 @@ cargo run -p graph-node --release -- \ | Port | Purpose | Routes | CLI Argument | Environment Variable | | --- | --- | --- | --- | --- | | 8000 | GraphQL HTTP server
    (سب گراف کی کیوریز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (سب گراف subscriptions کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8001 | GraphQL WS
    (سب گراف سبسکرپشنز کے لیے) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | | 8020 | JSON-RPC
    (تعیناتیوں کے انتظام کے لیے) | / | --admin-port | - | | 8030 | سب گراف انڈیکسنگ اسٹیٹس API | /graphql | --index-node-port | - | | 8040 | Prometheus میٹرکس | /metrics | --metrics-port | - | diff --git a/website/pages/ur/publishing/publishing-a-subgraph.mdx b/website/pages/ur/publishing/publishing-a-subgraph.mdx index f77b75fea6de..b92fc89f20c4 100644 --- a/website/pages/ur/publishing/publishing-a-subgraph.mdx +++ b/website/pages/ur/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ title: ڈیسینٹرلائزڈ نیٹ ورک پر سب گراف شائع کرن ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف شائع کرنا اسے [کیوریٹرز](/network/curating) کے لیے اس پر کیوریٹنگ شروع کرنے کے لیے، اور [انڈیکسرز](/network/indexing) کے لیے دستیاب کرتا ہے۔ اس کی ترتیب شروع کرنے کے لیے. -ڈیسینٹرالائزڈ نیٹ ورک پر سب گراف کو کیسے شائع کیا جائے اس بارے میں واک تھرو کے لیے، [یہ ویڈیو](https://youtu.be/HfDgC2oNnwo?t=580) دیکھیں. + آپ سپورٹڈ نیٹ ورکس کی فہرست [یہاں](/developing/supported-networks) تلاش کر سکتے ہیں. diff --git a/website/pages/ur/querying/querying-best-practices.mdx b/website/pages/ur/querying/querying-best-practices.mdx index 8c8858a6a09a..144a23d1f309 100644 --- a/website/pages/ur/querying/querying-best-practices.mdx +++ b/website/pages/ur/querying/querying-best-practices.mdx @@ -67,18 +67,18 @@ query [operationName]([variableName]: [variableType]) { ### GraphQL API کو ایک کیوری بھیجنا -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL ایک لینگویج اور کنونشنز کا مجموعہ ہے جو HTTP پر نقل و حمل کرتا ہے. -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +اس کا مطلب ہے کہ آپ معیاری `fetch` (مقامی طور پر یا `@whatwg-node/fetch` یا `isomorphic-fetch` کے ذریعے) کا استعمال کرتے ہوئے GraphQL API سے کیوری کرسکتے ہیں. -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +تاہم، جیسا کہ ["ایک درخواست سے کیوری کرنا"](/querying/querying-from-an-application) میں بتایا گیا ہے، ہم آپ کو ہمارا `graph-client` استعمال کرنے کی تجویز کرتے ہیں جو منفرد خصوصیات کی حمایت کرتا ہے جیسے: - کراس چین سب گراف ہینڈلنگ: ایک کیوری میں متعدد سب گرافس سے کیوری کرنا - [خودکار بلاک ٹریکنگ](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [خودکار صفحہ بندی](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - مکمل طور پر ٹائپ شدہ نتیجہ -Here's how to query The Graph with `graph-client`: +`graph-client` کے ساتھ گراف سے کیوری کرنے کا طریقہ یہاں ہے: ```tsx import { execute } from '../.graphclient' @@ -102,9 +102,9 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +مزید GraphQL کلائنٹ متبادلات کا احاطہ ["ایک درخواست سے کیوری کرنا"](/querying/querying-from-an-application) میں کیا گیا ہے. -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +اب جب کہ ہم نے GraphQL کیوریز کی ترکیب کے بنیادی اصولوں کا احاطہ کیا ہے، آئیے اب GraphQL کیوری تحریر کے بہترین طریقوں کو دیکھتے ہیں. --- @@ -112,7 +112,7 @@ Now that we covered the basic rules of GraphQL queries syntax, let's now look at ### ہمیشہ جامد کیوریز لکھیں -A common (bad) practice is to dynamically build query strings as follows: +ایک عام (خراب) پریکٹس مندرجہ ذیل طور پر کیوری کے سٹرنگس کو متحرک طور پر بنانا ہے: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +جبکہ مذکورہ بالا ٹکڑا ایک درست GraphQL کیوری پیدا کرتا ہے، **اس میں بہت سی خرابیاں ہیں**: - یہ مجموعی طور پر کیوری کو **سمجھنا مشکل** بناتا ہے - ڈویلپرز **سٹرنگ انٹرپولیشن کو محفوظ طریقے سے صاف کرنے کے ذمہ دار ہیں** - متغیر کی ویلیوس کو درخواست کے پیرامیٹرز کے حصے کے طور پر نہ بھیجنا **سرور سائیڈ پر ممکنہ کیشنگ کو روکتا ہے** - یہ **ٹولز کو کیوری کا مستحکم تجزیہ کرنے سے روکتا ہے** (مثال کے طور پر: Linter، یا جنریشنز ٹولز ٹائپ کریں) -For this reason, it is recommended to always write queries as static strings: +اس وجہ سے، کیوریز کو ہمیشہ جامد سٹرنگس کے طور پر لکھنے کی سفارش کی جاتی ہے: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +ایسا کرنے سے **بہت سے فائدے** ہوتے ہیں: - کیوریز کو **پڑھنے اور برقرار رکھنے میں آسانی** - GraphQL **سرور متغیرات کو صاف کرتا ہے** - سرور کی سطح پر **متغیرات کو کیش کیا جا سکتا ہے** - **کیوریز کا مستحکم طور پر ٹولز کے ذریعے تجزیہ کیا جا سکتا ہے** (مندرجہ ذیل حصوں میں اس پر مزید) -**Note: How to include fields conditionally in static queries** +**نوٹ: جامد کیوریز میں فیلڈز کو مشروط طور پر کیسے شامل کیا جائے** -We might want to include the `owner` field only on a particular condition. +ہم صرف ایک خاص شرط پر `owner` فیلڈ کو شامل کرنا چاہتے ہیں. -For this, we can leverage the `@include(if:...)` directive as follows: +اس کے لیے، ہم ذیل میں `@include(if:...)` ہدایت کا فائدہ اٹھا سکتے ہیں: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,21 +191,21 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +نوٹ: مخالف ہدایت `@skip(if: ...)` ہے. ### کارکردگی کی تجاویز -**"Ask for what you want"** +**"جو چاہو مانگو"** -GraphQL became famous for its "Ask for what you want" tagline. +GraphQL اپنی "جو چاہو مانگو" ٹیگ لائن کے لیے مشہور ہوا. -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +اس وجہ سے، GraphQL میں، تمام دستیاب فیلڈز کو انفرادی طور پر فہرست بنائے بغیر حاصل کرنے کا کوئی طریقہ نہیں ہے. -When querying GraphQL APIs, always think of querying only the fields that will be actually used. +GraphQL APIs سے کیوری کرتے وقت، ہمیشہ صرف ان فیلڈز سے کیوری کرنے کے بارے میں سوچیں جو حقیقت میں استعمال ہوں گے. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +اوور فیچنگ کی ایک عام وجہ ہستیوں کا مجموعہ ہے۔ پہلے سے طے شدہ طور پر، کیوریز ایک مجموعہ میں 100 ہستیوں کو حاصل کریں گے، جو عام طور پر اس سے کہیں زیادہ ہوتا ہے جو اصل میں استعمال کیا جائے گا، مثلاً، صارف کو دکھانے کے لیے۔ اس لیے کیوریز کو تقریباً ہمیشہ پہلے واضح طور پر سیٹ کرنا چاہیے، اور اس بات کو یقینی بنانا چاہیے کہ وہ صرف اتنی ہی ہستیوں کو حاصل کریں جتنی انھیں درحقیقت ضرورت ہے۔ اس کا اطلاق نہ صرف کیوری میں اعلیٰ سطحی مجموعوں پر ہوتا ہے، بلکہ اس سے بھی زیادہ ہستیوں کے گھریلو مجموعوں پر ہوتا ہے. -For example, in the following query: +مثال کے طور پر، درج ذیل کیوری میں: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +جواب میں 100 ٹوکنز میں سے ہر ایک کے لیے 100 ٹرانزیکشن ہو سکتے ہیں. -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +اگر ایپلیکیشن کو صرف 10 ٹرانزیکشنز کی ضرورت ہے، کیوری کو واضح طور پر ٹرانزیکشنز فیلڈ پر `first: 10` سیٹ کرنا چاہیے. -**Combining multiple queries** +**متعدد کیوریز کو یکجا کرنا** -Your application might require querying multiple types of data as follows: +آپ کی درخواست کو درج ذیل کے طور پر متعدد قسم کے ڈیٹا سے کیوری کرنے کی ضرورت ہو سکتی ہے: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +جب کہ یہ نفاذ مکمل طور پر درست ہے، اس کے لیے GraphQL API کے ساتھ دو چکر لگانے کی ضرورت ہوگی. -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +خوش قسمتی سے، درج ذیل کے طور پر ایک ہی GraphQL درخواست میں متعدد کیوریز بھیجنا بھی درست ہے: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +یہ نقطہ نظر نیٹ ورک پر گزارے جانے والے وقت کو کم کرکے (آپ کو API کا ایک راؤنڈ ٹرپ بچاتا ہے) **مجموعی کارکردگی کو بہتر بنائے گا** اور ایک **مزید جامع نفاذ** فراہم کرے گا. ### لیوریج GraphQL فریگمنٹس -A helpful feature to write GraphQL queries is GraphQL Fragment. +GraphQL کیوریز لکھنے کے لیے ایک مددگار خصوصیت GraphQL فریگمنٹ ہے. -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +درج ذیل کیوری کو دیکھتے ہوئے، آپ دیکھیں گے کہ کچھ فیلڈز کو متعدد سلیکشن سیٹس (`{ ... }`) میں دہرایا جاتا ہے: ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +اس طرح کے دہرائے جانے والے فیلڈز (`id`, `active`, `status`) بہت سے مسائل لاتے ہیں: - مزید وسیع کیوریز کے لیے پڑھنا مشکل ہے - ایسے ٹولز کا استعمال کرتے وقت جو کیوریز کی بنیاد پر ٹائپ اسکرپٹ کی قسمیں تیار کرتے ہیں (_آخری حصے میں اس پر مزید_)، `newDelegate` اور `oldDelegate` کے نتیجے میں دو الگ الگ ان لائن انٹرفیس ہوں گے. -A refactored version of the query would be the following: +کیوری کا ایک ریفیکٹر ورژن درج ذیل ہوگا: ```graphql query { @@ -334,15 +334,15 @@ fragment DelegateItem on Transcoder { } ``` -Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. +GraphQL `fragment` کا استعمال پڑھنے کی اہلیت کو بہتر بنائے گا (خاص طور پر پیمانے پر) لیکن اس کے نتیجے میں ٹائپ اسکپٹ ٹائپس جینریشن بہتر ہوں گی. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +ٹائپ جنریشن ٹول کا استعمال کرتے وقت، مندرجہ بالا کیوری ایک مناسب `DelegateItemFragment` قسم پیدا کرے گا (_آخری "ٹولز" سیکشن دیکھیں_). ### GraphQL فریگمنٹ کیا کریں اور نہ کریں -**Fragment base must be a type** +**فریگمنٹ بیس ایک ٹائپ کا ہونا چاہیے** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +ایک فریگمینٹ غیر قابل اطلاق ٹائپ پر مبنی نہیں ہو سکتا، مختصراً، **اس ٹائپ پر جس میں فیلڈز نہیں ہیں**: ```graphql fragment MyFragment on BigInt { @@ -350,11 +350,11 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt` ایک **اسکالر** (مقامی "سادہ" ٹائپ) ہے جسے فریگمینٹس کی بنیاد کے طور پر استعمال نہیں کیا جاسکتا. -**How to spread a Fragment** +**فریگمینٹ پھیلانے کا طریقہ** -Fragments are defined on specific types and should be used accordingly in queries. +فریگمینٹس کی وضاحت مخصوص ٹائپس پر کی جاتی ہے اور اس کے مطابق کیوریز میں استعمال کیا جانا چاہیے. مثال: @@ -377,17 +377,17 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` اور `oldDelegate` ٹائپ `Transcoder` ہیں. -It is not possible to spread a fragment of type `Vote` here. +یہاں `Vote` ٹائپ کے فریگمینٹس کو پھیلانا ممکن نہیں ہے. -**Define Fragment as an atomic business unit of data** +**فریگمنٹ کو ڈیٹا کی ایٹم بزنس یونٹ کے طور پر بیان کریں** -GraphQL Fragment must be defined based on their usage. +GraphQL فریگمنٹ کو ان کے استعمال کی بنیاد پر بیان کیا جانا چاہیے. -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +زیادہ تر استعمال کے کیس کے لیے، فی ٹائپ کے ایک فریگمینٹ کی وضاحت کرنا (بار بار فیلڈز کے استعمال یا ٹائپ جنریشن کی صورت میں) کافی ہے. -Here is a rule of thumb for using Fragment: +فریگمینٹ استعمال کرنے کے لیے یہاں انگوٹھے کا اصول ہے: - جب ایک ہی قسم کے فیلڈز کو ایک کیوری میں دہرایا جاتا ہے، تو انہیں ایک فریگمینٹ میں گروپ کریں - جب ایک جیسے لیکن ایک جیسے فیلڈز کو دہرایا نہیں جاتا ہے، تو متعدد فریگمینٹس بنائیں، مثال کے طور پر: @@ -417,31 +417,31 @@ fragment VoteWithPoll on Vote { ### GraphQL ویب پر مبنی ایکسپلوررز -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +کیوریز کو اپنی درخواست میں چلا کر ان پر تکرار کرنا بوجھل ہو سکتا ہے۔ اس وجہ سے، اپنی درخواست میں شامل کرنے سے پہلے اپنے کیوریز کو جانچنے کے لیے [گراف ایکسپلورر](https://thegraph.com/explorer) کا استعمال کرنے میں ہچکچاہٹ محسوس نہ کریں۔ گراف ایکسپلورر آپ کے کیوریز کو جانچنے کے لیے آپ کو پہلے سے ترتیب شدہ GraphQL پلے گراؤنڈ فراہم کرے گا. -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +اگر آپ اپنے سوالات کو ڈیبگ/ٹیسٹ کرنے کے لیے مزید لچکدار طریقہ تلاش کر رہے ہیں، تو اسی طرح کے دیگر ویب پر مبنی ٹولز دستیاب ہیں جیسے کہ [Altair](https://altair.sirmuel.design/) اور [GraphiQL](https://graphiql-online.com/graphiql). ### GraphQL لنٹنگ -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +مذکورہ بالا بہترین طریقوں اور نحوی اصولوں کو برقرار رکھنے کے لیے، درج ذیل ورک فلو اور IDE ٹولز کو استعمال کرنے کی انتہائی سفارش کی جاتی ہے. **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) صفر کی کوشش کے ساتھ GraphQL کے بہترین طریقوں میں سرفہرست رہنے میں آپ کی مدد کرے گا. -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +["operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) کو ترتیب دینا ضروری اصولوں کو نافذ کرے گا جیسے: - `@graphql-eslint/fields-on-correct-type`: کیا فیلڈ ایک مناسب ٹائپ پر استعمال ہوتی ہے؟ - `@graphql-eslint/no-unused variables`: کیا دیئے گئے متغیر کو غیر استعمال شدہ رہنا چاہئے؟ - اور مزید! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +یہ آپ کو پلے گراؤنڈ میں یا انہیں پروڈکشن میں چلانے کے **کیوریز کی جانچ کیے بغیر بھی غلطیوں کو پکڑنے** کی اجازت دے گا! ### IDE plugins -**VSCode and GraphQL** +**VSCode اور GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +[GraphQL VSCode ایکسٹینشن](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) حاصل کرنے کے لیے آپ کے ترقیاتی ورک فلو میں ایک بہترین اضافہ ہے: - نحو کو نمایاں کرنا - خودکار تکمیل کی تجاویز @@ -449,15 +449,15 @@ The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemNa - ٹکڑے - ٹکڑوں اور ان پٹ کی اقسام کے لیے تعریف پر جائیں -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +اگر آپ `graphql-eslint` استعمال کر رہے ہیں، تو [ESLint VSCode ایکسٹینشن](https://marketplace. visualstudio. com/items? itemName=dbaeumer. vscode-eslint) ہے آپ کے کوڈ میں موجود غلطیوں اور انتباہات کو درست طریقے سے دیکھنا ضروری ہے. -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij اور GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQL پلگ ان](https://plugins.jetbrains.com/plugin/8097-graphql/) فراہم کر کے GraphQL کے ساتھ کام کرتے ہوئے آپ کے تجربے کو نمایاں طور پر بہتر بنائے گا: - نحو کو نمایاں کرنا - خودکار تکمیل کی تجاویز - اسکیما کے خلاف توثیق - ٹکڑے -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +اس [ویب سٹورم مضمون](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) پر مزید معلومات جو پلگ ان کی تمام اہم خصوصیات کو ظاہر کرتا ہے. diff --git a/website/pages/ur/querying/querying-from-an-application.mdx b/website/pages/ur/querying/querying-from-an-application.mdx index 1e472c6710d0..6c71ca139ac0 100644 --- a/website/pages/ur/querying/querying-from-an-application.mdx +++ b/website/pages/ur/querying/querying-from-an-application.mdx @@ -33,11 +33,11 @@ GraphQL اینڈ پوائنٹ کا استعمال کرتے ہوئے، آپ سب - [خودکار صفحہ بندی](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - مکمل طور پر ٹائپ شدہ نتیجہ -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +نیز مقبول GraphQL کلائنٹس جیسے کہ اپولو اور URQL کے ساتھ مربوط اور تمام ماحول کے ساتھ مطابقت رکھتا ہے (ری ایکٹ، اینگولر، نوڈ جے ایس، ری ایکٹ نیٹیو)، `graph-client` کا استعمال آپ کو گراف کے ساتھ بات چیت کرنے کا بہترین تجربہ فراہم کرے گا. -Let's look at how to fetch data from a subgraph with `graphql-client`. +آئیے دیکھتے ہیں کہ `graphql-client` کے ساتھ سب گراف سے ڈیٹا کیسے حاصل کیا جائے. -To get started, make sure to install The Graph Client CLI in your project: +شروع کرنے کے لیے، اپنے پروجیکٹ میں گراف کلائنٹ CLI کو میں انسٹال کرنا یقینی بنائیں: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +اپنے کیوری کی وضاحت `.graphql` فائل میں کریں (یا آپ کی `.js` یا `.ts` فائل میں ان لائن): ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +پھر، ایک کنفیگریشن فائل بنائیں (جسے `.graphclientrc.yml` کہا جاتا ہے) اور گراف کی طرف سے فراہم کردہ اپنے GraphQL اینڈ پوائنٹس کی طرف اشارہ کریں، مثال کے طور پر: ```yaml # .graphclientrc.yml @@ -90,13 +90,13 @@ documents: - ./src/example-query.graphql ``` -Running the following The Graph Client CLI command will generate typed and ready to use JavaScript code: +درج ذیل گراف کلائنٹ CLI کمانڈ کو چلانے سے ٹائپ شدہ اور جاوا اسکرپٹ کوڈ استعمال کرنے کے لیے تیار ہو جائے گا: ```sh گراف کلائنٹ کی تعمیر ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +آخر میں، اپنی `.ts` فائل کو اپ ڈیٹ کریں تاکہ تیار کردہ ٹائپ شدہ GraphQL دستاویزات استعمال کریں: ```tsx import React, { useEffect } from 'react' @@ -134,17 +134,17 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️ اہم اطلاع** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client` بالکل دوسرے GraphQL کلائنٹس جیسے اپولو کلائنٹ، URQL، یا React کیوری کے ساتھ مربوط ہے۔ آپ کو [آفیشل ریپوزٹری میں مثالیں ملیں گی](https://github.com/graphprotocol/graph-client/tree/main/examples). -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +تاہم، اگر آپ کسی دوسرے کلائنٹ کے ساتھ جانے کا انتخاب کرتے ہیں، ذہن میں رکھیں کہ **آپ کراس چین سب گراف ہینڈلنگ یا خودکار صفحہ بندی استعمال کرنے کے قابل نہیں ہوں گے، جو گراف سے کیوری کرنے کی بنیادی خصوصیات ہیں**. ### اپولو کلائنٹ -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[اپولو کلائنٹ](https://www.apollographql.com/docs/) فرنٹ اینڈ ایکو سسٹم پر ہر جگہ موجود GraphQL کلائنٹ ہے. -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +React، Angular، Vue، Ember، iOS، اور Android کے لیے دستیاب، اپولو کلائنٹ، اگرچہ سب سے بھاری کلائنٹ ہے، GraphQL کے اوپری حصے میں جدید UI بنانے کے لیے بہت سی خصوصیات لاتا ہے: - ایڈوانس ایرر ہینڈلنگ - صفحہ بندی @@ -152,9 +152,9 @@ Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, altho - پر امید UI - مقامی ریاستی انتظام -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +آئیے دیکھتے ہیں کہ ویب پروجیکٹ میں اپولو کلائنٹ کے ساتھ سب گراف سے ڈیٹا کیسے حاصل کیا جائے. -First, install `@apollo/client` and `graphql`: +پہلے، `@apollo/client` اور `graphql` انسٹال کریں: ```sh npm install @apollo/client graphql @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +متغیرات کو استعمال کرنے کے لیے، آپ پر کیوری `variables` دلیل دے سکتے ہیں: ```javascript const tokensQuery = ` @@ -226,16 +226,16 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +ایک اور آپشن [URQL](https://formidable.com/open-source/urql/) ہے جو Node.js، React/Preact، Vue، اور Svelte ماحول میں دستیاب ہے، جس میں زیادہ جدید ہے۔ خصوصیات: - لچکدار کیش سسٹم - قابل توسیع ڈیزائن (اس کے اوپر نئی صلاحیتوں کو شامل کرنے میں آسانی) - ہلکا پھلکا بنڈل (اپولو کلائنٹ سے ~5x ہلکا) - فائل اپ لوڈز اور آف لائن موڈ کے لیے سپورٹ -Let's look at how to fetch data from a subgraph with URQL in a web project. +آئیے دیکھتے ہیں کہ ویب پروجیکٹ میں URQL کے ساتھ سب گراف سے ڈیٹا کیسے حاصل کیا جائے. -First, install `urql` and `graphql`: +پہلے، `urql` اور `graphql` انسٹال کریں: ```sh npm install urql graphql diff --git a/website/pages/ur/querying/querying-the-hosted-service.mdx b/website/pages/ur/querying/querying-the-hosted-service.mdx index 1c4efa468717..6c0ad182dc84 100644 --- a/website/pages/ur/querying/querying-the-hosted-service.mdx +++ b/website/pages/ur/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: ہوسٹڈ سروس سے کیوری کرنا --- -سب گراف کے تعینات ہونے کے ساتھ، [GraphiQL](https://github.com/graphql/graphiql) کھولنے کے لیے [ہوسٹڈ سروس](https://thegraph.com/hosted-service/) ملاحظہ کریں۔ انٹرفیس جہاں آپ کیوریز جاری کرکے اور اسکیما کو دیکھ کر سب گراف کے لیے تعینات GraphQL API کو تلاش کرسکتے ہیں. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. ذیل میں ایک مثال فراہم کی گئی ہے، لیکن براہ کرم سب گراف کی ہستیوں سے کیوری کرنے کے مکمل حوالہ کے لیے [کیوری API](/querying/graphql-api) دیکھیں. @@ -19,9 +19,9 @@ title: ہوسٹڈ سروس سے کیوری کرنا } ``` -## ہوسٹڈ سروس کا استعمال +## Using the hosted service -گراف ایکسپلورر اور اس کا GraphQL پلے گراؤنڈ ہوسٹڈ سروس پر تعینات سب گراف کو دریافت کرنے اور کیوری کرنے کا ایک مفید طریقہ ہے. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. کچھ اہم خصوصیات ذیل میں تفصیلی ہیں: diff --git a/website/pages/ur/querying/querying-with-python.mdx b/website/pages/ur/querying/querying-with-python.mdx new file mode 100644 index 000000000000..68437fbc2636 --- /dev/null +++ b/website/pages/ur/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## شروع ہوا چاہتا ہے + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/ur/quick-start.mdx b/website/pages/ur/quick-start.mdx new file mode 100644 index 000000000000..5179a2f47abf --- /dev/null +++ b/website/pages/ur/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: فورا شروع کریں +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +یہ گائیڈ یہ فرض کرتے ہوئے لکھی گئ ہے کہ آپ کے پاس ہے: + +- ایک سمارٹ کنٹریکٹ ایڈریس جو آپ کی مرضی کے نیٹ ورک پر ہے +- آپ کے سب گراف کو کیوریٹ کرنے کے لۓ جی آر ٹی +- ایک کرپٹو والیٹ + +## 1. سب گراف سٹوڈیو پر سب گراف بنائیں + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +ایک بار منسلک ہو جاۓ، آپ "سب گراف بنائیں" کو دبا کر شروع کر سکتے ہیں. اپنی مرظی کے نیٹ ورک کو چنیں اور جاری رکھیں پر کلک کریں. + +## 2. گراف CLI انسٹال کریں + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +اپنی مقامی مشین پر، درج زیل کمانڈز میں سے ایک کو رن کریں: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. اپنا سب گراف شروع کریں + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +جب آپ اپنے سب گراف کو شروع کرتے ہیں, CLI ٹول درج ذیل معلومات کے لۓ آپ سے پوچھے گا: + +- پروٹوکول: پروٹوکول منتخب جس سے آپ کا سب گراف ڈیٹا انڈیکس کرے گا +- سب گراف سلگ: اپنے سب گراف کے لیےؑ نام رکھیں. آپ کا سب گراف سلگ آپ کع سب گراف کا شناخت کنندہ ہے. +- سب گراف بنانے کے لیۓ ڈائریکٹری: اپنی مقامی ڈائریکٹری منتخب کریں +- ایتھیریم نیٹ ورک(اختیاری): آپ کو یہ بتانے کی ضرورت ہو سکتی ہے کہ آپ کا سب گراف کس EVM سے مطابقت رکھنے والے نیٹ ورک سے ڈیٹا کو انڈیکس کرے گا +- کنٹریکٹ ایڈریس: وہ سمارٹ کنٹریکٹ ایڈریس تلاش کریں جس سے آپ ڈیٹا کیوری کرنا چاہتے ہیں +- ABI: اگر ABI خود بخود نہیں ہے، آپ کو اسے JSON فائل کے طور پر دستی طور پر ان پٹ کرنے کی ضرورت ہوگی +- سٹارٹ بلاک: یہ تجویز کیا جاتا ہے کے آپ وقت بچانے کے لیۓ سٹارٹ بلاک میں ان پٹ کریں جبکہ آپ کا سب گراف بلاکچین ڈیٹا کو انڈیکس کرتا ہے۔ آپ اس بلاک کو تلاش کر کے سٹارٹ بلاک کا پتہ لگا سکتے ہیں جہاں آپ کا کنٹریکٹ تعینات کیا گیا تھا. +- کنٹریکٹ کا نام: اپنے کنٹریکٹ کا نام درج کریں +- کنٹریکٹ کے واقعات کو انڈیکس کریں بطور ادارے: یہ تجویز کیا جاتا ہے کہ آپ اسے درست پر سیٹ کریں کیونکہ یہ خود بخود ہر خارج ہونے والے واقع کے لیے آپ کے سب گراف میں میپنگس کا اضافہ کر دے گا۔ +- ایک اور کنٹریکٹ شامل کریں(اختیاری): آپ ایک اور کنٹریکٹ شامل کر سکتے ہیں۔ + +درج ذیل کمانڈ کو رن کر کے اپنے سب گراف کو موجودہ کنٹریکٹ سے شروع کریں: + +```sh +graph init --studio +``` + +اپنے سب گراف کو شروع کرتے وقت کیا توقع کی جائے اس کی مثال کے لیے درج ذیل اسکرین شاٹ دیکھیں: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. اپنا سب گراف لکھیں + +پچھلی کمانڈز ایک سکیفولڈ سب گراف بناتی ہیں جسے آپ اپنے سب گراف کی تعمیر کے لیے نقطہ آغاز کے طور پر استعمال کر سکتے ہیں۔ سب گراف میں تبدیلی کرتے وقت، آپ بنیادی طور پر تین فائلوں کے ساتھ کام کریں گے: + +- مینی فیسٹ (subgraph.yaml) - مینی فیسٹ اس بات کی وضاحت کرتا ہے کہ آپ کے سب گراف کس ڈیٹا سورسز کو انڈیکس کریں گے. +- سکیما (schema.graphql) - GraphQL سکیما اس بات کی وضاحت کرتا ہے کہ آپ سب گراف سے کون سا ڈیٹا حاصل کرنا چاہتے ہیں. +- اسمبلی اسکرپٹ میپنگ (mapping.ts) - یہ وہ کوڈ ہے جو آپ کے ڈیٹا سورس سے ڈیٹا کو اسکیما میں بیان کردہ اداروں میں ترجمہ کرتا ہے. + +اپنا سب گراف لکھنے کے طریقے کے بارے میں مزید معلومات کے لیے، دیکھیں [سب گراف بنانا](/developing/creating-a-subgraph). + +## 5. سب گراف سٹوڈیو پر تعینات کریں + +ایک بار آپ کا سب گراف لکھا جائے، درج ذیل کمانڈز رن کریں: + +```sh +$ graph codegen +$ graph build +``` + +- اپنے سب گراف کی تصدیق اور اسے تعینات کریں. تعیناتی کی کلید آپ کو سب گراف پیج پر ملے گی جو سب گراف سٹوڈیو میں موجود ہے. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. اپنے سب گراف کو ٹیسٹ کریں + +آپ پلے گراؤنڈ سیکشن میں اپنا سب گراف ایک سامپل کیوری بنا کر ٹیسٹ کر سکتے ہیں. + +لوگز آپ کو بتائیں گے اکر آپ کے سب گراف میں مسائل ہیں۔ آپریشنل سب گراف کے لوگز اس طرح کے دکھیں گے: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. اپنے سب گراف کو گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر شائع کریں + +ایک بار آپ کا سب گراف سب گراف سٹوڈیو میں تعینات ہو جاتا ہے، آپ نے اسے ٹیسٹ کر لیا ہے، اور اسے پروڈکشن میں ڈالنے کے لیے تیار ہیں، پھر آپ اسے ڈیسینٹرالائزڈ نیٹ ورک پر شائع کر سکتے ہیں. + +سب گراف سٹوڈیو میں، سب گراف پر کلک کریں۔ سب گراف کے پیج پر، آپ اوپر دائیں جانب شائع کے بٹن پر کلک کر سکیں گے. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +اس سے پہلے کے آپ اپنا سب گراف کیوری کریں، انڈیکسرز کو اس پر کیوریز پیش کرنا شروع کرنے کی ضرورت ہے۔ اس عمل کو ہموار کرنے کے لیے، آپ جی آر ٹی کا استعمال کرتے ہوئے اپنا سب گراف کیوریٹ کر سکتے ہیں. + +لکھنے کے وقت، یہ تجویز کیا جاتا ہے کہ آپ 10,000 GRT کے ساتھ اپنا سب گراف کیوریٹ کریں تاکہ یہ یقینی بنایا جا سکے کہ یہ انڈیکسڈ ہے اور جلد از جلد کیوری کے لیے دستیاب ہے. + +گیس کی قیمتیں بچانے کے لیے، جب آپ اپنا سب گراف گراف کے ڈیسینٹرالائزڈ نیٹ ورک پر شائع کرتے ہیں تو آپ اس بٹن کو منتخب کرکے اپنے سب گراف کو اسی ٹرانزیکشن میں درست کر سکتے ہیں جسے آپ نے شائع کیا تھا: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. اپنے سب گراف کو کیوری کریں + +اب، آپ اپنے سب گراف کی کیوریز کو اپنے سب گراف کے کیوری URL پر بھیج کر اپنے سب گراف سے کیوری کر سکتے ہیں، جسے آپ کیوری کے بٹن پر کلک کر کے تلاش کر سکتے ہیں. + +اگر آپ کے پاس اپنی API کلید فری کے ذریعے نہیں ہے تو آپ اپنے ڈیپ سے کیوری کر سکتے ہیں، ریٹ محدود عارضی کیوری URL کے ذریعے نہیں ہے جسے ترقی اور سٹیجنگ کے لیے استعمال کیا جا سکتا ہے. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/ur/substreams.mdx b/website/pages/ur/substreams.mdx index 715dd765c51a..01f9333f5460 100644 --- a/website/pages/ur/substreams.mdx +++ b/website/pages/ur/substreams.mdx @@ -2,8 +2,43 @@ title: سب اسٹریمز --- -سب اسٹریمز ایک نئی ٹیکنالوجی ہے جسے گراف پروٹوکول کور ڈویلپرز نے تیار کیا ہے، جو کہ انڈیکسڈ بلاکچین ڈیٹا کی انتہائی تیزی سے کھپت اور پروسیسنگ کو قابل بنانے کے لیے بنایا گیا ہے۔ سب اسٹریمز فی الحال کھلے بیٹا میں ہیں، متعدد بلاکچینز میں جانچ اور ترقی کے لیے دستیاب ہیں. +![Substreams Logo](/img/substreams-logo.png) -مزید جاننے اور سب اسٹریمز کی تعمیر شروع کرنے کے لیے [سب اسٹریم دستاویزات](https://substreams.streamingfast.io/) ملاحظہ کریں. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### شروع ہوا چاہتا ہے + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/ur/sunrise.mdx b/website/pages/ur/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/ur/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/ur/tokenomics.mdx b/website/pages/ur/tokenomics.mdx index 2ad6a83ade4b..87d3dcf6aefa 100644 --- a/website/pages/ur/tokenomics.mdx +++ b/website/pages/ur/tokenomics.mdx @@ -11,7 +11,7 @@ description: گراف نیٹ ورک کو طاقتور ٹوکنومکس کے ذر یہ ایک B2B2C ماڈل سے ملتا جلتا ہے، سوائے اس کے کہ یہ شرکا کے ڈیسینٹرالائزڈ نیٹ ورک سے چلتا ہے۔ نیٹ ورک کے شرکاء GRT انعامات کے بدلے اختتامی صارفین کو ڈیٹا فراہم کرنے کے لیے مل کر کام کرتے ہیں۔ GRT ورک یوٹیلیٹی ٹوکن ہے جو ڈیٹا فراہم کرنے والوں اور صارفین کو مربوط کرتا ہے۔ GRT نیٹ ورک کے اندر ڈیٹا فراہم کرنے والوں اور صارفین کو مربوط کرنے کے لیے ایک افادیت کے طور پر کام کرتا ہے اور پروٹوکول کے شرکاء کو ڈیٹا کو مؤثر طریقے سے ترتیب دینے کی ترغیب دیتا ہے. -گراف کا استعمال کرتے ہوئے، صارفین آسانی سے بلاکچین سے ڈیٹا تک رسائی حاصل کر سکتے ہیں، صرف اپنی ضرورت کی مخصوص معلومات کے لیے ادائیگی کر سکتے ہیں۔ گراف آج ویب 3 ایکو سسٹم میں بہت سی [مقبول ایپلیکیشنز](https://thegraph.com/explorer) کے ذریعے استعمال کیا جاتا ہے. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. گراف اسی طرح بلاکچین ڈیٹا کو انڈیکس کرتا ہے جس طرح گوگل ویب کو انڈیکس کرتا ہے۔ درحقیقت، ہو سکتا ہے کہ آپ پہلے ہی گراف کو سمجھے بغیر استعمال کر رہے ہوں۔ اگر آپ نے ڈیپ کے سامنے والے حصے کو دیکھا ہے جو اس کا ڈیٹا سب گراف سے حاصل کرتا ہے، تو آپ نے سب گراف سے ڈیٹا دریافت کیا! @@ -75,7 +75,7 @@ description: گراف نیٹ ورک کو طاقتور ٹوکنومکس کے ذر انڈیکسرز دو طریقوں سے GRT انعامات حاصل کر سکتے ہیں: -1. کیوری کی فیس: ڈیولپرز یا صارفین کے ذریعے سب گراف ڈیٹا کے کیوریز کے لیے GRT ادا کیا جاتا ہے۔ کیوری کی فیس ایک ریبیٹ پول میں جمع کی جاتی ہے اور انڈیکسرز میں تقسیم کی جاتی ہے. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. انڈیکسنگ کے انعامات: 3% سالانہ اجراء انڈیکسرز میں تقسیم کیا جاتا ہے اس بنیاد پر کہ وہ انڈیکس کر رہے ہیں۔ یہ انعامات انڈیکسرز کو انڈیکس سب گراف کی ترغیب دیتے ہیں، کبھی کبھار کیوری کی فیس شروع ہونے سے پہلے، انڈیکسنگ کے ثبوت (POIs) جمع کرنے اور جمع کروانے کے لیے اس بات کی تصدیق کرتے ہیں کہ انھوں نے ڈیٹا کو درست طریقے سے ترتیب دیا ہے. diff --git a/website/pages/vi/about.mdx b/website/pages/vi/about.mdx index c1f7c886900f..9b7d45a8ce53 100644 --- a/website/pages/vi/about.mdx +++ b/website/pages/vi/about.mdx @@ -1,47 +1,47 @@ --- -title: About The Graph +title: Về The Graph --- -This page will explain what The Graph is and how you can get started. +Trang này sẽ giải thích The Graph là gì và cách bạn có thể bắt đầu. ## What is The Graph? The Graph is a decentralized protocol for indexing and querying blockchain data. The Graph makes it possible to query data that is difficult to query directly. -Projects with complex smart contracts like [Uniswap](https://uniswap.org/) and NFTs initiatives like [Bored Ape Yacht Club](https://boredapeyachtclub.com/) store data on the Ethereum blockchain, making it really difficult to read anything other than basic data directly from the blockchain. +Các dự án với các hợp đồng thông minh phức tạp như [Uniswap](https://uniswap.org/) và các sáng kiến NFT như [Bored Ape Yacht Club](https://boredapeyachtclub.com/) lưu trữ dữ liệu trên chuỗi khối Ethereum, khiến việc đọc bất kỳ thứ gì khác ngoài dữ liệu cơ bản trực tiếp từ chuỗi khối này thực sự khó khăn. -In the case of Bored Ape Yacht Club, we can perform basic read operations on [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) like getting the owner of a certain Ape, getting the content URI of an Ape based on their ID, or the total supply, as these read operations are programmed directly into the smart contract, but more advanced real-world queries and operations like aggregation, search, relationships, and non-trivial filtering are not possible. For example, if we wanted to query for apes that are owned by a certain address, and filter by one of its characteristics, we would not be able to get that information by interacting directly with the contract itself. +Trong trường hợp của Bored Ape Yacht Club, chúng ta có thể thực hiện các thao tác đọc cơ bản trên [the contract](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code) chẳng hạn như truy vấn chủ sở hữu của một con Ape nhất định, truy vấn nội dung của một con Ape dựa trên ID của chúng hoặc tổng nguồn cung, vì các hoạt động đọc này được lập trình trực tiếp vào hợp đồng thông minh, nhưng các truy vấn và hoạt động trong thế giới thực nâng cao hơn như tổng hợp, tìm kiếm, các mối quan hệ và lọc bất thường khó thực hiện. Ví dụ: nếu chúng ta muốn truy vấn những con ape thuộc sở hữu của một địa chỉ nhất định và lọc theo một trong những đặc điểm của nó, chúng ta sẽ không thể lấy thông tin đó bằng cách tương tác trực tiếp với chính hợp đồng. -To get this data, you would have to process every single [`transfer`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) event ever emitted, read the metadata from IPFS using the Token ID and IPFS hash, and then aggregate it. Even for these types of relatively simple questions, it would take **hours or even days** for a decentralized application (dapp) running in a browser to get an answer. +Để có được dữ liệu này, bạn sẽ phải xử lý từng sự kiện [`chuyển khoản`](https://etherscan.io/address/0xbc4ca0eda7647a8ab7c2061c2e118a18a936f13d#code#L1746) đã từng được phát ra, hãy đọc siêu dữ liệu (metadata) từ IPFS bằng cách sử dụng Token ID và mã băm IPFS, sau đó tổng hợp lại. Ngay cả đối với những loại câu hỏi tương đối đơn giản này, nó sẽ mất **hàng giờ thậm chí hàng ngày** cho một ứng dụng phi tập trung (dapp) chạy trong trình duyệt để nhận được câu trả lời. You could also build out your own server, process the transactions there, save them to a database, and build an API endpoint on top of it all in order to query the data. However, this option is [resource intensive](/network/benefits/), needs maintenance, presents a single point of failure, and breaks important security properties required for decentralization. -**Indexing blockchain data is really, really hard.** +**Lập chỉ mục dữ liệu blockchain thực sự rất rất khó.** -Blockchain properties like finality, chain reorganizations, or uncled blocks complicate this process further, and make it not just time consuming but conceptually hard to retrieve correct query results from blockchain data. +Các thuộc tính của blockchain như tính hoàn thiện, tổ chức lại chuỗi hoặc các khối không có tiêu đề làm phức tạp thêm quá trình này và khiến nó không chỉ tốn thời gian mà còn khó về mặt khái niệm để truy xuất kết quả truy vấn chính xác từ dữ liệu blockchain. -The Graph solves this with a decentralized protocol that indexes and enables the performant and efficient querying of blockchain data. These APIs (indexed "subgraphs") can then be queried with a standard GraphQL API. Today, there is a hosted service as well as a decentralized protocol with the same capabilities. Both are backed by the open source implementation of [Graph Node](https://github.com/graphprotocol/graph-node). +The Graph giải quyết vấn đề này bằng một giao thức phi tập trung có thể lập chỉ mục và cho phép truy vấn dữ liệu blockchain một cách hiệu quả và tác dụng tốt. Sau đó, các API này ("subgraphs" được lập chỉ mục) có thể được truy vấn bằng API GraphQL tiêu chuẩn. Ngày nay, có một dịch vụ lưu trữ cũng như một giao thức phi tập trung với các khả năng tương tự. Cả hai đều được hỗ trợ bởi việc triển khai mã nguồn mở của [Graph Node](https://github.com/graphprotocol/graph-node). -## How The Graph Works +## Cách thức hoạt động của The Graph -The Graph learns what and how to index Ethereum data based on subgraph descriptions, known as the subgraph manifest. The subgraph description defines the smart contracts of interest for a subgraph, the events in those contracts to pay attention to, and how to map event data to data that The Graph will store in its database. +The Graph tìm hiểu những gì và cách thức lập chỉ mục dữ liệu Ethereum dựa trên mô tả subgraph, được gọi là bản kê khai subgraph (subgraph manifest). Mô tả subgraph xác định các hợp đồng thông minh quan tâm cho một subgraph, các sự kiện trong các hợp đồng đó cần chú ý và cách ánh xạ dữ liệu sự kiện với dữ liệu mà The Graph sẽ lưu trữ trong cơ sở dữ liệu của nó. -Once you have written a `subgraph manifest`, you use the Graph CLI to store the definition in IPFS and tell the indexer to start indexing data for that subgraph. +Khi bạn đã viết một `subgraph manifest`, bạn sử dụng Graph CLI để lưu trữ định nghĩa trong IPFS và yêu cầu indexer bắt đầu lập chỉ mục dữ liệu cho subgraph đó. -This diagram gives more detail about the flow of data once a subgraph manifest has been deployed, dealing with Ethereum transactions: +Biểu đồ này cung cấp chi tiết hơn về luồng dữ liệu khi một tệp kê khai subgraph đã được triển khai, xử lý các giao dịch Ethereum: ![A graphic explaining how The Graph uses Graph Node to serve queries to data consumers](/img/graph-dataflow.png) -The flow follows these steps: +Quy trình thực hiện theo các bước sau: 1. A dapp adds data to Ethereum through a transaction on a smart contract. -2. The smart contract emits one or more events while processing the transaction. -3. Graph Node continually scans Ethereum for new blocks and the data for your subgraph they may contain. -4. Graph Node finds Ethereum events for your subgraph in these blocks and runs the mapping handlers you provided. The mapping is a WASM module that creates or updates the data entities that Graph Node stores in response to Ethereum events. +2. Hợp đồng thông minh phát ra một hoặc nhiều sự kiện trong khi xử lý giao dịch. +3. Graph Node liên tục quét Ethereum để tìm các khối mới và dữ liệu cho subgraph của bạn mà chúng có thể chứa. +4. Graph Node tìm các sự kiện Ethereum cho subgraph của bạn trong các khối này và chạy các trình xử lý ánh xạ mà bạn đã cung cấp. Ánh xạ là một mô-đun WASM tạo hoặc cập nhật các thực thể dữ liệu mà Graph Node lưu trữ để đáp ứng với các sự kiện Ethereum. 5. The dapp queries the Graph Node for data indexed from the blockchain, using the node's [GraphQL endpoint](https://graphql.org/learn/). The Graph Node in turn translates the GraphQL queries into queries for its underlying data store in order to fetch this data, making use of the store's indexing capabilities. The dapp displays this data in a rich UI for end-users, which they use to issue new transactions on Ethereum. The cycle repeats. -## Next Steps +## Bước tiếp theo -In the following sections we will go into more detail on how to define subgraphs, how to deploy them, and how to query data from the indexes that Graph Node builds. +Trong các phần sau, chúng ta sẽ đi vào chi tiết hơn về cách xác định subgraph, cách triển khai chúng và cách truy vấn dữ liệu từ các chỉ mục mà Graph Node xây dựng. -Before you start writing your own subgraph, you might want to have a look at the Graph Explorer and explore some of the subgraphs that have already been deployed. The page for each subgraph contains a playground that lets you query that subgraph's data with GraphQL. +Trước khi bắt đầu viết subgraph của riêng mình, bạn có thể muốn xem Trình khám phá Graph (Graph Explorer) và khám phá một số subgraph đã được triển khai. Trang cho mỗi subgraph chứa một sân chơi (playground) cho phép bạn truy vấn dữ liệu của subgraph đó bằng GraphQL. diff --git a/website/pages/vi/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/vi/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..f92096a8a216 100644 --- a/website/pages/vi/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/vi/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## Khái quát -## What are L2 Transfer Tools? +### What are L2 Transfer Tools? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### Can I use the same wallet I use on Ethereum mainnet? If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### What happens if I don’t finish my transfer in 7 days? + +The L2 Transfer Tools use Arbitrum’s native mechanism to send messages from L1 to L2. This mechanism is called a “retryable ticket” and is used by all native token bridges, including the Arbitrum GRT bridge. You can read more about retryable tickets in the [Arbitrum docs](https://docs.arbitrum.io/arbos/l1-to-l2-messaging). + +When you transfer your assets (subgraph, stake, delegation or curation) to L2, a message is sent through the Arbitrum GRT bridge which creates a retryable ticket in L2. The transfer tool includes some ETH value in the transaction, that is used to 1) pay to create the ticket and 2) pay for the gas to execute the ticket in L2. However, because gas prices might vary in the time until the ticket is ready to execute in L2, it is possible that this auto-execution attempt fails. When that happens, the Arbitrum bridge will keep the retryable ticket alive for up to 7 days, and anyone can retry “redeeming” the ticket (which requires a wallet with some ETH bridged to Arbitrum). + +This is what we call the “Confirm” step in all the transfer tools - it will run automatically in most cases, as the auto-execution is most often successful, but it is important that you check back to make sure it went through. If it doesn’t succeed and there are no successful retries in 7 days, the Arbitrum bridge will discard the ticket, and your assets (subgraph, stake, delegation or curation) will be lost and can’t be recovered. The Graph core devs have a monitoring system in place to detect these situations and try to redeem the tickets before it’s too late, but it is ultimately your responsibility to ensure your transfer is completed in time. If you’re having trouble confirming your transaction, please reach out using [this form](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) and core devs will be there help you. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## Subgraph Transfer -## How do I transfer my subgraph? +### How do I transfer my subgraph? + + To transfer your subgraph, you will need to complete the following steps: @@ -30,55 +54,147 @@ To transfer your subgraph, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Where should I initiate my transfer from? You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. -## How long do I need to wait until my subgraph is transferred +### How long do I need to wait until my subgraph is transferred The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. -## Will my subgraph still be discoverable after I transfer it to L2? +### Will my subgraph still be discoverable after I transfer it to L2? Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. -## Does my subgraph need to be published to transfer it? +### Does my subgraph need to be published to transfer it? To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. -## After I transfer, do I also need to re-publish on Arbitrum? +### After I transfer, do I also need to re-publish on Arbitrum? After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Will my subgraph's curation move with my subgraph? If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Can I move my subgraph back to Ethereum mainnet after I transfer? Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. -## Why do I need bridged ETH to complete my transfer? +### Why do I need bridged ETH to complete my transfer? Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +## Delegation + +### How do I transfer my delegation? + + + +To transfer your delegation, you will need to complete the following steps: + +1. Initiate delegation transfer on Ethereum mainnet +2. Wait 20 minutes for confirmation +3. Confirm delegation transfer on Arbitrum + +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? + +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. + +### What happens if the Indexer I currently delegate to isn't on Arbitrum One? + +The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. + +### Do Delegators have the option to delegate to another Indexer? + +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. + +### What if I can't find the Indexer I'm delegating to on L2? + +The L2 transfer tool will automatically detect the Indexer you previously delegated to. + +### Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? + +The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. + +### Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? + +The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. + +### Can my rewards be negatively impacted if I do not transfer my delegation? + +It is anticipated that all network participation will move to Arbitrum One in the future. + +### How long does it take to complete the transfer of my delegation to L2? + +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? + +Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. + +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? + +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. + +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. + +### Is there any delegation tax? + +No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. + +### Will my unrealized rewards be transferred when I transfer my delegation? + +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. + +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ + +### Is moving delegations to L2 mandatory? Is there a deadline? + +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? + +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. + +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ + +### I don't see a button to transfer my delegation. Why is that? + +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. + +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? + +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ + +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? + +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + ## Curation Signal -## How do I transfer my curation? +### How do I transfer my curation? To transfer your curation, you will need to complete the following steps: @@ -90,25 +206,29 @@ To transfer your curation, you will need to complete the following steps: \*If necessary - i.e. you are using a contract address. -## How will I know if the subgraph I curated has moved to L2? +### How will I know if the subgraph I curated has moved to L2? When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. -## What if I do not wish to move my curation to L2? +### What if I do not wish to move my curation to L2? When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. -## How do I know my curation successfully transferred? +### How do I know my curation successfully transferred? Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. -## Can I transfer my curation on more than one subgraph at a time? +### Can I transfer my curation on more than one subgraph at a time? There is no bulk transfer option at this time. ## Indexer Stake -## How do I transfer my stake to Arbitrum? +### How do I transfer my stake to Arbitrum? + +> Disclaimer: If you are currently unstaking any portion of your GRT on your Indexer, you will not be able to use L2 Transfer Tools. + + To transfer your stake, you will need to complete the following steps: @@ -120,7 +240,7 @@ To transfer your stake, you will need to complete the following steps: \*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Will all of my stake transfer? You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. @@ -128,91 +248,45 @@ If you plan on transferring parts of your stake over multiple transactions, you Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. -## How much time do I have to confirm my stake transfer to Arbitrum? +### How much time do I have to confirm my stake transfer to Arbitrum? \*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. -## What if I have open allocations? +### What if I have open allocations? If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +### Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. -## How long will it take to transfer my stake? +### How long will it take to transfer my stake? It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. -## Do I have to index on Arbitrum before I transfer my stake? +### Do I have to index on Arbitrum before I transfer my stake? You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. -## Can Delegators move their delegation before I move my indexing stake? +### Can Delegators move their delegation before I move my indexing stake? No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +### Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. -## Delegation - -## How do I transfer my delegation? - -To transfer your delegation, you will need to complete the following steps: - -1. Initiate delegation transfer on Ethereum mainnet - -2. Wait 20 minutes for confirmation - -3. Confirm delegation transfer on Arbitrum - -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? - -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. - -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? - -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. - -## Do Delegators have the option to delegate to another Indexer? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -## What if I can't find the Indexer I'm delegating to on L2? +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -The L2 transfer tool will automatically detect the Indexer you previously delegated to. - -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? - -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. - -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? - -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. - -## Can my rewards be negatively impacted if I do not transfer my delegation? - -It is anticipated that all network participation will move to Arbitrum One in the future. - -## How long does it take to complete the transfer of my delegation to L2? - -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). - -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? - -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. - -## Is there any delegation tax? - -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## Vesting Contract Transfer -## How do I transfer my vesting contract? +### How do I transfer my vesting contract? To transfer your vesting, you will need to complete the following steps: @@ -222,7 +296,9 @@ To transfer your vesting, you will need to complete the following steps: 3. Confirm vesting transfer on Arbitrum -## How do I transfer my vesting contract if I am only partially vested? +### How do I transfer my vesting contract if I am only partially vested? + + 1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) @@ -232,7 +308,9 @@ To transfer your vesting, you will need to complete the following steps: 4. Withdraw any remaining ETH from the transfer tool contract -## How do I transfer my vesting contract if I am fully vested? +### How do I transfer my vesting contract if I am fully vested? + + For those that are fully vested, the process is similar: @@ -244,7 +322,7 @@ For those that are fully vested, the process is similar: 4. Withdraw any remaining ETH from the transfer tool contract -## Can I transfer my vesting contract to Arbitrum? +### Can I transfer my vesting contract to Arbitrum? You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). @@ -256,27 +334,27 @@ Please note that you will not be able to release/withdraw GRT from the L2 vestin If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +### I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +### I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. -## Can I specify a different beneficiary for my vesting contract on L2? +### Can I specify a different beneficiary for my vesting contract on L2? Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +### My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. This allows you to transfer your stake or delegation to any L2 address. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +### My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. @@ -298,18 +376,36 @@ To transfer your vesting contract to L2, you will send any GRT balance to L2 usi \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Can I move my vesting contract back to L1? There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. -## Why do I need to move my vesting contract to begin with? +### Why do I need to move my vesting contract to begin with? You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### What happens if I try to cash out my contract when it is only partially vested? Is this possible? This is not a possibility. You can move funds back to L1 and withdraw them there. -## What if I don't want to move my vesting contract to L2? +### What if I don't want to move my vesting contract to L2? You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. diff --git a/website/pages/vi/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/vi/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..c3227cf3cf8e 100644 --- a/website/pages/vi/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/vi/arbitrum/l2-transfer-tools-guide.mdx @@ -1,15 +1,15 @@ --- -title: L2 Transfer Tools Guide +title: Hướng dẫn sử dụng công cụ chuyển L2 --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. - The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +Một số câu hỏi thường gặp về những công cụ này được trả lời trong [Câu hỏi thường gặp về Công cụ chuyển L2](/arbitrum/l2-transfer-tools-faq). Câu hỏi thường gặp chứa các giải thích sâu sắc về cách sử dụng các công cụ, cách chúng hoạt động và những điều cần lưu ý khi sử dụng chúng. ## How to transfer your subgraph to Arbitrum (L2) + + ## Benefits of transferring your subgraphs The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. @@ -28,13 +28,13 @@ As soon as the subgraph is transferred, since all curation is converted to GRT, Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. -## Choosing your L2 wallet +## Chọn ví L2 của bạn When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Nếu bạn đang sử dụng ví "thông thường" như MetaMask (Tài khoản thuộc sở hữu bên ngoài hoặc EOA, tức là ví không phải là hợp đồng thông minh), thì đây là tùy chọn và bạn nên giữ cùng địa chỉ chủ sở hữu như trong L1. If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. @@ -50,13 +50,13 @@ You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you c You can find the L2 Transfer Tool when you're looking at your subgraph's page on Subgraph Studio: -![transfer tool](/img/L2-transfer-tool1.png) +![công cụ chuyển](/img/L2-transfer-tool1.png) It is also available on Explorer if you're connected with the wallet that owns a subgraph and on that subgraph's page on Explorer: -![Transferring to L2](/img/transferToL2.png) +![Chuyển sang L2](/img/transferToL2.png) -Clicking on the Transfer to L2 button will open the transfer tool where you can start the transfer process. +Nhấp vào nút Chuyển sang L2 sẽ mở công cụ chuyển nơi bạn có thể bắt đầu quá trình chuyển. ## Step 1: Starting the transfer @@ -68,7 +68,7 @@ After opening the Transfer Tool, you will be able to input the L2 wallet address If you execute this step, **make sure you proceed until completing step 3 in less than 7 days, or the subgraph and your signal GRT will be lost.** This is due to how L1-L2 messaging works on Arbitrum: messages that are sent through the bridge are "retry-able tickets" that must be executed within 7 days, and the initial execution might need a retry if there are spikes in the gas price on Arbitrum. -![Start the trnasfer to L2](/img/startTransferL2.png) +![Bắt đầu chuyển sang L2](/img/startTransferL2.png) ## Step 2: Waiting for the subgraph to get to L2 @@ -76,7 +76,7 @@ After you start the transfer, the message that sends your L1 subgraph to L2 must Once this wait time is over, Arbitrum will attempt to auto-execute the transfer on the L2 contracts. -![Wait screen](/img/screenshotOfWaitScreenL2.png) +![Màn hình chờ](/img/screenshotOfWaitScreenL2.png) ## Step 3: Confirming the transfer @@ -84,7 +84,7 @@ In most cases, this step will auto-execute as the L2 gas included in step 1 shou If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. -![Confirm the transfer to L2](/img/confirmTransferToL2.png) +![Xác nhận chuyển sang L2](/img/confirmTransferToL2.png) ## Step 4: Finishing the transfer on L2 @@ -96,7 +96,7 @@ At this point, your subgraph and GRT have been received on Arbitrum, but the sub This will publish the subgraph so that Indexers that are operating on Arbitrum can start serving it. It will also mint curation signal using the GRT that were transferred from L1. -## Step 5: Updating the query URL +## Bước 5: Cập nhật URL truy vấn Your subgraph has been successfully transferred to Arbitrum! To query the subgraph, the new URL will be : @@ -116,13 +116,13 @@ A fraction of these GRT corresponding to the subgraph owner is sent to L2 togeth At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. -## Choosing your L2 wallet +## Chọn ví L2 của bạn If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. If you're using a "regular" wallet like Metamask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same Curator address as in L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 receiving wallet address. +Nếu bạn đang sử dụng ví hợp đồng thông minh, chẳng hạn như multisig (ví dụ: Safe), thì việc chọn địa chỉ ví L2 khác là bắt buộc, vì rất có thể tài khoản này chỉ tồn tại trên mạng chính và bạn sẽ không thể thực hiện giao dịch trên Arbitrum bằng ví này. Nếu bạn muốn tiếp tục sử dụng ví hợp đồng thông minh hoặc multisig, hãy tạo một ví mới trên Arbitrum và sử dụng địa chỉ của nó làm địa chỉ ví nhận L2. **It is very important to use a wallet address that you control, and that can make transactions on Arbitrum, as otherwise the curation will be lost and cannot be recovered.** @@ -144,7 +144,7 @@ If you execute this step, **make sure you proceed until completing step 3 in les Starting the transfer: -![Send signal to L2](/img/sendingCurationToL2Step2First.png) +![Gửi tín hiệu tới L2](/img/sendingCurationToL2Step2First.png) After you start the transfer, the message that sends your L1 curation to L2 must propagate through the Arbitrum bridge. This takes approximately 20 minutes (the bridge waits for the mainnet block containing the transaction to be "safe" from potential chain reorgs). @@ -158,7 +158,7 @@ In most cases, this step will auto-execute as the L2 gas included in step 1 shou If this is the case, you will need to connect using an L2 wallet that has some ETH on Arbitrum, switch your wallet network to Arbitrum, and click on "Confirm Transfer" to retry the transaction. -![Send signal to L2](/img/L2TransferToolsFinalCurationImage.png) +![Gửi tín hiệu tới L2](/img/L2TransferToolsFinalCurationImage.png) ## Withdrawing your curation on L1 diff --git a/website/pages/vi/billing.mdx b/website/pages/vi/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/vi/billing.mdx +++ b/website/pages/vi/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/vi/chain-integration-overview.mdx b/website/pages/vi/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/vi/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/vi/cookbook/arweave.mdx b/website/pages/vi/cookbook/arweave.mdx index 15aaf1a38831..11b83ed840e8 100644 --- a/website/pages/vi/cookbook/arweave.mdx +++ b/website/pages/vi/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -168,7 +168,7 @@ Here is an example subgraph for reference: - [Example subgraph for Arweave](https://github.com/graphprotocol/graph-tooling/tree/main/examples/arweave-blocks-transactions) -## FAQ +## CÂU HỎI THƯỜNG GẶP ### Can a subgraph index Arweave and other chains? diff --git a/website/pages/vi/cookbook/base-testnet.mdx b/website/pages/vi/cookbook/base-testnet.mdx index b1e3a4fc7c6d..9b890255f26e 100644 --- a/website/pages/vi/cookbook/base-testnet.mdx +++ b/website/pages/vi/cookbook/base-testnet.mdx @@ -11,7 +11,7 @@ What you'll need: ## Subgraph Studio -### 1. Install the Graph CLI +### 1. Cài đặt Graph CLI The Graph CLI (>=v0.41.0) is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. @@ -41,7 +41,7 @@ Make sure that the graph-cli is updated to latest (above 0.41.0) graph --version ``` -Initialize your subgraph from an existing contract. +Khởi tạo subgraph của bạn từ một hợp đồng hiện có. ```sh graph init --studio @@ -57,21 +57,21 @@ Your subgraph slug is an identifier for your subgraph. The CLI tool will walk yo - Contract name: `` - Yes/no to indexing events (yes means your subgraph will be bootstrapped with entities in the schema and simple mappings for emitted events) -### 3. Write your Subgraph +### 3. Viết Subgraph của bạn > If emitted events are the only thing you want to index, then no additional work is required, and you can skip to the next step. The previous command creates a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: - Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. Make sure to add `base-testnet` as the network name in manifest file to deploy your subgraph on Base testnet. -- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retreive from the subgraph. -- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. +- Lược đồ (schema.graphql) - Lược đồ GraphQL xác định dữ liệu nào bạn muốn lấy từ subgraph. +- Ánh xạ AssemblyScript (mapping.ts) - Đây là mã dịch dữ liệu từ các nguồn dữ liệu của bạn sang các thực thể được xác định trong lược đồ. If you want to index additional data, you will need extend the manifest, schema and mappings. For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). -### 4. Deploy to the Subgraph Studio +### 4. Triển khai đến Subgraph Studio Before you can deploy your subgraph, you will need to authenticate with the Subgraph Studio. You can do this by running the following command: diff --git a/website/pages/vi/cookbook/cosmos.mdx b/website/pages/vi/cookbook/cosmos.mdx index ef21e4bc0855..39c05c76221c 100644 --- a/website/pages/vi/cookbook/cosmos.mdx +++ b/website/pages/vi/cookbook/cosmos.mdx @@ -198,7 +198,7 @@ $ graph build Once your subgraph has been created, you can deploy your subgraph by using the `graph deploy` CLI command after running the `graph create` CLI command: -**Hosted Service** +**Dịch vụ được lưu trữ** ```bash graph create account/subgraph-name --product hosted-service @@ -230,7 +230,7 @@ The GraphQL endpoint for Cosmos subgraphs is determined by the schema definition The [Cosmos Hub blockchain](https://hub.cosmos.network/) is the first blockchain in the [Cosmos](https://cosmos.network/) ecosystem. You can visit the [official documentation](https://docs.cosmos.network/) for more information. -#### Networks +#### Các Mạng Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testnet-001`.
    Other Cosmos Hub networks, i.e. `cosmoshub-3`, are halted, therefore no data is provided for them. @@ -242,7 +242,7 @@ Cosmos Hub mainnet is `cosmoshub-4`. Cosmos Hub current testnet is `theta-testne [Osmosis](https://osmosis.zone/) is a decentralized, cross-chain automated market maker (AMM) protocol built on top of the Cosmos SDK. It allows users to create custom liquidity pools and trade IBC-enabled tokens. You can visit the [official documentation](https://docs.osmosis.zone/) for more information. -#### Networks +#### Các Mạng Osmosis mainnet is `osmosis-1`. Osmosis current testnet is `osmo-test-4`. diff --git a/website/pages/vi/cookbook/grafting.mdx b/website/pages/vi/cookbook/grafting.mdx index 54ad7a0eaff8..342e468b5aac 100644 --- a/website/pages/vi/cookbook/grafting.mdx +++ b/website/pages/vi/cookbook/grafting.mdx @@ -10,20 +10,36 @@ Grafting reuses the data from an existing subgraph and starts indexing it at a l The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Nó thêm hoặc xóa các loại thực thể +- Nó loại bỏ các thuộc tính khỏi các loại thực thể +- Nó thêm các thuộc tính nullable vào các loại thực thể +- Nó biến các thuộc tính không thể nullable thành các thuộc tính nullable +- Nó thêm giá trị vào enums +- Nó thêm hoặc xóa các giao diện +- Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai For more information, you can check: -- [Grafting](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) +- [Ghép](https://thegraph.com/docs/en/developing/creating-a-subgraph#grafting-onto-existing-subgraphs) In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/vi/cookbook/near.mdx b/website/pages/vi/cookbook/near.mdx index 879e8e5c15aa..efdef2a007c7 100644 --- a/website/pages/vi/cookbook/near.mdx +++ b/website/pages/vi/cookbook/near.mdx @@ -193,7 +193,7 @@ $ graph deploy --node --ipfs https://api.thegraph.com/ipfs/ # u The node configuration will depend on where the subgraph is being deployed. -### Hosted Service +### Dịch vụ được lưu trữ ```sh graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token @@ -239,7 +239,7 @@ Here are some example subgraphs for reference: [NEAR Receipts](https://github.com/graphprotocol/graph-tooling/tree/main/examples/near-receipts) -## FAQ +## CÂU HỎI THƯỜNG GẶP ### How does the beta work? @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/vi/cookbook/upgrading-a-subgraph.mdx b/website/pages/vi/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..a83363d0fc61 100644 --- a/website/pages/vi/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/vi/cookbook/upgrading-a-subgraph.mdx @@ -2,16 +2,16 @@ title: Upgrading an Existing Subgraph to The Graph Network --- -## Introduction +## Giới thiệu This is a guide on how to upgrade your subgraph from the hosted service to The Graph's decentralized network. Over 1,000 subgraphs have successfully upgraded to The Graph Network including projects like Snapshot, Loopring, Audius, Premia, Livepeer, Uma, Curve, Lido, and many more! The process of upgrading is quick and your subgraphs will forever benefit from the reliability and performance that you can only get on The Graph Network. -### Prerequisites +### Điều kiện tiên quyết - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d97ed9412312 100644 --- a/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/vi/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: @@ -15,13 +15,13 @@ These are the steps to deploy your subgraph to the Subgraph Studio: We are using the same CLI to deploy subgraphs to our [hosted service](https://thegraph.com/hosted-service/) and to the [Subgraph Studio](https://thegraph.com/studio/). Here are the commands to install graph-cli. This can be done using npm or yarn. -**Install with yarn:** +**Cài đặt bằng yarn:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**Cài đặt bằng npm:** ```bash npm install -g @graphprotocol/graph-cli diff --git a/website/pages/vi/deploying/hosted-service.mdx b/website/pages/vi/deploying/hosted-service.mdx index 2e6093531110..5c052c198b7b 100644 --- a/website/pages/vi/deploying/hosted-service.mdx +++ b/website/pages/vi/deploying/hosted-service.mdx @@ -10,7 +10,7 @@ If you don't have an account on the hosted service, you can sign up with your Gi For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). -## Create a Subgraph +## Tạo một Subgraph First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` @@ -38,7 +38,7 @@ The `` in this case is your GitHub user or organization name, `/ [] @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/vi/deploying/subgraph-studio.mdx b/website/pages/vi/deploying/subgraph-studio.mdx index 1406065463d4..2434e731d2cc 100644 --- a/website/pages/vi/deploying/subgraph-studio.mdx +++ b/website/pages/vi/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network @@ -47,8 +41,8 @@ The Graph Network is not yet able to support all of the data-sources & features - Index a [supported network](/developing/supported-networks) - Must not use any of the following features: - ipfs.cat & ipfs.map - - Non-fatal errors - - Grafting + - Lỗi không nghiêm trọng + - Ghép More features & networks will be added to The Graph Network incrementally. diff --git a/website/pages/vi/developing/creating-a-subgraph.mdx b/website/pages/vi/developing/creating-a-subgraph.mdx index 1fc288833c35..3169fec69eb8 100644 --- a/website/pages/vi/developing/creating-a-subgraph.mdx +++ b/website/pages/vi/developing/creating-a-subgraph.mdx @@ -6,31 +6,31 @@ A subgraph extracts data from a blockchain, processing it and storing it so that ![Defining a Subgraph](/img/defining-a-subgraph.png) -The subgraph definition consists of a few files: +Định nghĩa subgraph bao gồm một số tệp: -- `subgraph.yaml`: a YAML file containing the subgraph manifest +- `subgraph.yaml`: một tệp YAML chứa tệp kê khai subgraph -- `schema.graphql`: a GraphQL schema that defines what data is stored for your subgraph, and how to query it via GraphQL +- `schema.graphql`: một lược đồ GraphQL xác định dữ liệu nào được lưu trữ cho subgraph của bạn và cách truy vấn nó qua GraphQL -- `AssemblyScript Mappings`: [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) code that translates from the event data to the entities defined in your schema (e.g. `mapping.ts` in this tutorial) +- `Ánh xạ AssemblyScript`: Mã [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) dịch từ dữ liệu sự kiện sang các thực thể được xác định trong lược đồ của bạn (ví dụ: `mapping.ts` trong hướng dẫn này) > In order to use your subgraph on The Graph's decentralized network, you will need to [create an API key](/deploying/subgraph-studio-faqs/#2-how-do-i-create-an-api-key). It is recommended that you [add signal](/network/curating/#how-to-signal) to your subgraph with at least [10,000 GRT](/network-transition-faq/#how-can-i-ensure-that-my-subgraph-will-be-picked-up-by-indexer-on-the-graph-network). -Before you go into detail about the contents of the manifest file, you need to install the [Graph CLI](https://github.com/graphprotocol/graph-cli) which you will need to build and deploy a subgraph. +Trước khi đi vào chi tiết về nội dung của tệp kê khai, bạn cần cài đặt [Graph CLI](https://github.com/graphprotocol/graph-cli) mà bạn sẽ cần để xây dựng và triển khai một subgraph. -## Install the Graph CLI +## Cài đặt Graph CLI -The Graph CLI is written in JavaScript, and you will need to install either `yarn` or `npm` to use it; it is assumed that you have yarn in what follows. +Graph CLI được viết bằng JavaScript và bạn sẽ cần cài đặt `yarn` hoặc `npm` để dùng nó; Chúng ta sẽ giả định rằng bạn đã có yarn trong những các bước sau. -Once you have `yarn`, install the Graph CLI by running +Một khi bạn có `yarn`, cài đặt Graph CLI bằng cách chạy -**Install with yarn:** +**Cài đặt bằng yarn:** ```bash yarn global add @graphprotocol/graph-cli ``` -**Install with npm:** +**Cài đặt bằng npm:** ```bash npm install -g @graphprotocol/graph-cli @@ -38,9 +38,9 @@ npm install -g @graphprotocol/graph-cli Once installed, the `graph init` command can be used to set up a new subgraph project, either from an existing contract or from an example subgraph. This command can be used to create a subgraph on the Subgraph Studio by passing in `graph init --product subgraph-studio`. If you already have a smart contract deployed to your preferred network, bootstrapping a new subgraph from that contract can be a good way to get started. -## From An Existing Contract +## Từ Một Hợp đồng Hiện có -The following command creates a subgraph that indexes all events of an existing contract. It attempts to fetch the contract ABI from Etherscan and falls back to requesting a local file path. If any of the optional arguments are missing, it takes you through an interactive form. +Lệnh sau tạo một subgraph lập chỉ mục tất cả các sự kiện của một hợp đồng hiện có. Nó cố gắng lấy ABI hợp đồng từ Etherscan và quay trở lại yêu cầu đường dẫn tệp cục bộ. Nếu thiếu bất kỳ đối số tùy chọn nào, nó sẽ đưa bạn đến một biểu mẫu tương tác. ```sh graph init \ @@ -51,17 +51,17 @@ graph init \ [] ``` -The `` is the ID of your subgraph in Subgraph Studio, it can be found on your subgraph details page. +`` là ID của subgraph của bạn trong Subgraph Studio, bạn có thể tìm thấy mã này trên trang chi tiết subgraph của mình. -## From An Example Subgraph +## Từ một Subgraph mẫu -The second mode `graph init` supports is creating a new project from an example subgraph. The following command does this: +Chế độ thứ hai mà `graph init` hỗ trợ là tạo một dự án mới từ một subgraph mẫu. Lệnh sau thực hiện điều này: ```sh graph init --studio ``` -The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. The following sections will go over the files that make up the subgraph manifest for this example. +Subgraph mẫu dựa trên hợp đồng Gravity của Dani Grant quản lý hình đại diện của người dùng và phát ra các sự kiện `NewGravatar` hoặc `UpdateGravatar` bất cứ khi nào hình đại diện được tạo hoặc cập nhật. Subgraph xử lý các sự kiện này bằng cách ghi các thực thể `Gravatar` vào kho lưu trữ Graph Node và đảm bảo chúng được cập nhật theo sự kiện. Các phần sau sẽ xem xét các tệp tạo nên subgraph cho ví dụ này. ## Add New dataSources To An Existing Subgraph @@ -89,11 +89,11 @@ The contract `address` will be written to the `networks.json` for the relevant n > **Note:** When using the interactive cli, after successfully running `graph init`, you'll be prompted to add a new `dataSource`. -## The Subgraph Manifest +## Tệp kê khai Subgraph -The subgraph manifest `subgraph.yaml` defines the smart contracts your subgraph indexes, which events from these contracts to pay attention to, and how to map event data to entities that Graph Node stores and allows to query. The full specification for subgraph manifests can be found [here](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). +Tệp kê khai subgraph `subgraph.yaml` xác định các hợp đồng thông minh lập chỉ mục subgraph của bạn, các sự kiện từ các hợp đồng này cần chú ý đến và cách ánh xạ dữ liệu sự kiện tới các thực thể mà Graph Node lưu trữ và cho phép truy vấn. Bạn có thể tìm thấy thông số kỹ thuật đầy đủ cho các tệp kê khai subgraph [tại đây](https://github.com/graphprotocol/graph-node/blob/master/docs/subgraph-manifest.md). -For the example subgraph, `subgraph.yaml` is: +Đối với subgraph mẫu, `subgraph.yaml` là: ```yaml specVersion: 0.0.4 @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -134,59 +142,63 @@ dataSources: file: ./src/mapping.ts ``` -The important entries to update for the manifest are: +Các mục nhập quan trọng cần cập nhật cho tệp kê khai là: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. -- `features`: a list of all used [feature](#experimental-features) names. +- `features`: một danh sách tất cả các tên [tính năng](#experimental-features) đã sử dụng. - `dataSources.source`: the address of the smart contract the subgraph sources, and the ABI of the smart contract to use. The address is optional; omitting it allows to index matching events from all contracts. - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. -- `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. +- `dataSources.mapping.abis`: một hoặc nhiều tệp ABI được đặt tên cho hợp đồng nguồn cũng như bất kỳ hợp đồng thông minh nào khác mà bạn tương tác từ bên trong ánh xạ. -- `dataSources.mapping.eventHandlers`: lists the smart contract events this subgraph reacts to and the handlers in the mapping—./src/mapping.ts in the example—that transform these events into entities in the store. +- `dataSources.mapping.eventHandlers`: liệt kê các sự kiện hợp đồng thông minh mà subgraph này phản ứng và các trình xử lý trong ánh xạ—./src/mapping.ts trong ví dụ — biến những sự kiện này thành các thực thể trong cửa hàng. -- `dataSources.mapping.callHandlers`: lists the smart contract functions this subgraph reacts to and handlers in the mapping that transform the inputs and outputs to function calls into entities in the store. +- `dataSources.mapping.callHandlers`: liệt kê các chức năng của hợp đồng thông minh mà subgraph này phản ứng và xử lý trong ánh xạ chuyển đổi đầu vào và đầu ra cho các lệnh gọi hàm thành các thực thể trong cửa hàng. - `dataSources.mapping.blockHandlers`: lists the blocks this subgraph reacts to and handlers in the mapping to run when a block is appended to the chain. Without a filter, the block handler will be run every block. An optional call-filter can be provided by adding a `filter` field with `kind: call` to the handler. This will only run the handler if the block contains at least one call to the data source contract. -A single subgraph can index data from multiple smart contracts. Add an entry for each contract from which data needs to be indexed to the `dataSources` array. +Một subgraph n có thể lập chỉ mục dữ liệu từ nhiều hợp đồng thông minh. Thêm mục nhập cho mỗi hợp đồng mà từ đó dữ liệu cần được lập chỉ mục vào mảng `dataSources`. -The triggers for a data source within a block are ordered using the following process: +Các trình kích hoạt cho nguồn dữ liệu trong một khối được sắp xếp theo quy trình sau: -1. Event and call triggers are first ordered by transaction index within the block. +1. Trình kích hoạt sự kiện và cuộc gọi được sắp xếp đầu tiên theo chỉ mục giao dịch trong khối. 2. Event and call triggers within the same transaction are ordered using a convention: event triggers first then call triggers, each type respecting the order they are defined in the manifest. -3. Block triggers are run after event and call triggers, in the order they are defined in the manifest. +3. Trình kích hoạt chặn được chạy sau trình kích hoạt sự kiện và cuộc gọi, theo thứ tự chúng được xác định trong tệp kê khai. These ordering rules are subject to change. -### Getting The ABIs +### Nhận các ABI -The ABI file(s) must match your contract(s). There are a few ways to obtain ABI files: +(Các) tệp ABI phải khớp với (các) hợp đồng của bạn. Có một số cách để lấy tệp ABI: -- If you are building your own project, you will likely have access to your most current ABIs. -- If you are building a subgraph for a public project, you can download that project to your computer and get the ABI by using [`truffle compile`](https://truffleframework.com/docs/truffle/overview) or using solc to compile. -- You can also find the ABI on [Etherscan](https://etherscan.io/), but this isn't always reliable, as the ABI that is uploaded there may be out of date. Make sure you have the right ABI, otherwise running your subgraph will fail. +- Nếu bạn đang xây dựng dự án của riêng mình, bạn có thể sẽ có quyền truy cập vào các ABI mới nhất của mình. +- Nếu bạn đang xây dựng một subgraph cho một dự án công cộng, bạn có thể tải dự án đó xuống máy tính của mình và lấy ABI bằng cách sử dụng [`truffle compile`](https://truffleframework.com/docs/truffle/overview) hoặc sử dụng solc để biên dịch. +- Bạn cũng có thể tìm thấy ABI trên [Etherscan](https://etherscan.io/), nhưng điều này không phải lúc nào cũng đáng tin cậy, vì ABI được tải lên có thể đã lỗi thời. Đảm bảo rằng bạn có ABI phù hợp, nếu không việc chạy subgraph của bạn sẽ không thành công. -## The GraphQL Schema +## Lược đồ GraphQL The schema for your subgraph is in the file `schema.graphql`. GraphQL schemas are defined using the GraphQL interface definition language. If you've never written a GraphQL schema, it is recommended that you check out this primer on the GraphQL type system. Reference documentation for GraphQL schemas can be found in the [GraphQL API](/querying/graphql-api) section. -## Defining Entities +## Xác định các Thực thể -Before defining entities, it is important to take a step back and think about how your data is structured and linked. All queries will be made against the data model defined in the subgraph schema and the entities indexed by the subgraph. Because of this, it is good to define the subgraph schema in a way that matches the needs of your dapp. It may be useful to imagine entities as "objects containing data", rather than as events or functions. +Trước khi xác định các thực thể (entities), điều quan trọng là phải lùi lại một bước và suy nghĩ về cách dữ liệu của bạn được cấu trúc và liên kết. Tất cả các truy vấn sẽ được thực hiện dựa trên mô hình dữ liệu được xác định trong lược đồ subgraph và các thực thể được lập chỉ mục bởi subgraph. Bởi vì điều này, rất tốt để xác định lược đồ subgraph theo cách phù hợp với nhu cầu của dapp của bạn. Có thể hữu ích khi hình dung các thực thể là "đối tượng chứa dữ liệu", chứ không phải là các sự kiện hoặc chức năng. With The Graph, you simply define entity types in `schema.graphql`, and Graph Node will generate top level fields for querying single instances and collections of that entity type. Each type that should be an entity is required to be annotated with an `@entity` directive. By default, entities are mutable, meaning that mappings can load existing entities, modify them and store a new version of that entity. Mutability comes at a price, and for entity types for which it is known that they will never be modified, for example, because they simply contain data extracted verbatim from the chain, it is recommended to mark them as immutable with `@entity(immutable: true)`. Mappings can make changes to immutable entities as long as those changes happen in the same block in which the entity was created. Immutable entities are much faster to write and to query, and should therefore be used whenever possible. -### Good Example +### Ví dụ tốt -The `Gravatar` entity below is structured around a Gravatar object and is a good example of how an entity could be defined. +Thực thể `Gravatar` bên dưới được cấu trúc xung quanh một đối tượng Gravatar và là một ví dụ điển hình về cách một thực thể có thể được xác định. ```graphql type Gravatar @entity(immutable: true) { @@ -198,7 +210,7 @@ type Gravatar @entity(immutable: true) { } ``` -### Bad Example +### Ví dụ tồi The example `GravatarAccepted` and `GravatarDeclined` entities below are based around events. It is not recommended to map events or function calls to entities 1:1. @@ -218,30 +230,31 @@ type GravatarDeclined @entity { } ``` -### Optional and Required Fields +### Các trường tùy chọn và bắt buộc -Entity fields can be defined as required or optional. Required fields are indicated by the `!` in the schema. If a required field is not set in the mapping, you will receive this error when querying the field: +Các trường thực thể có thể được xác định theo yêu cầu hoặc tùy chọn. Các trường bắt buộc được biểu thị bằng `!` trong lược đồ. Nếu trường bắt buộc không được đặt trong ánh xạ, bạn sẽ nhận được lỗi này khi truy vấn trường: ``` -Null value resolved for non-null field 'name' +Giá trị rỗng (null) được giải quyết cho trường không phải null 'name' ``` Each entity must have an `id` field, which must be of type `Bytes!` or `String!`. It is generally recommended to use `Bytes!`, unless the `id` contains human-readable text, since entities with `Bytes!` id's will be faster to write and query as those with a `String!` `id`. The `id` field serves as the primary key, and needs to be unique among all entities of the same type. For historical reasons, the type `ID!` is also accepted and is a synonym for `String!`. For some entity types the `id` is constructed from the id's of two other entities; that is possible using `concat`, e.g., `let id = left.id.concat(right.id)` to form the id from the id's of `left` and `right`. Similarly, to construct an id from the id of an existing entity and a counter `count`, `let id = left.id.concatI32(count)` can be used. The concatenation is guaranteed to produce unique id's as long as the length of `left` is the same for all such entities, for example, because `left.id` is an `Address`. -### Built-In Scalar Types +### Các loại vô hướng tích hợp -#### GraphQL Supported Scalars +#### GraphQL Vô hướng được hỗ trợ We support the following scalars in our GraphQL API: -| Type | Description | +| Loại | Miêu tả | | --- | --- | -| `Bytes` | Byte array, represented as a hexadecimal string. Commonly used for Ethereum hashes and addresses. | +| `Bytes` | Mảng byte, được biểu diễn dưới dạng chuỗi thập lục phân. Thường được sử dụng cho các mã băm và địa chỉ Ethereum. | | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -261,13 +274,13 @@ Once the enum is defined in the schema, you can use the string representation of More detail on writing enums can be found in the [GraphQL documentation](https://graphql.org/learn/schema/). -#### Entity Relationships +#### Mối quan hệ thực thể An entity may have a relationship to one or more other entities in your schema. These relationships may be traversed in your queries. Relationships in The Graph are unidirectional. It is possible to simulate bidirectional relationships by defining a unidirectional relationship on either "end" of the relationship. Relationships are defined on entities just like any other field except that the type specified is that of another entity. -#### One-To-One Relationships +#### Mối quan hệ một-một Define a `Transaction` entity type with an optional one-to-one relationship with a `TransactionReceipt` entity type: @@ -283,7 +296,7 @@ type TransactionReceipt @entity(immutable: true) { } ``` -#### One-To-Many Relationships +#### Mối quan hệ một-nhiều Define a `TokenBalance` entity type with a required one-to-many relationship with a Token entity type: @@ -299,13 +312,13 @@ type TokenBalance @entity { } ``` -#### Reverse Lookups +#### Tra cứu ngược Reverse lookups can be defined on an entity through the `@derivedFrom` field. This creates a virtual field on the entity that may be queried but cannot be set manually through the mappings API. Rather, it is derived from the relationship defined on the other entity. For such relationships, it rarely makes sense to store both sides of the relationship, and both indexing and query performance will be better when only one side is stored and the other is derived. For one-to-many relationships, the relationship should always be stored on the 'one' side, and the 'many' side should always be derived. Storing the relationship this way, rather than storing an array of entities on the 'many' side, will result in dramatically better performance for both indexing and querying the subgraph. In general, storing arrays of entities should be avoided as much as is practical. -#### Example +#### Ví dụ We can make the balances for a token accessible from the token by deriving a `tokenBalances` field: @@ -322,11 +335,11 @@ type TokenBalance @entity { } ``` -#### Many-To-Many Relationships +#### Mối quan hệ nhiều-nhiều For many-to-many relationships, such as users that each may belong to any number of organizations, the most straightforward, but generally not the most performant, way to model the relationship is as an array in each of the two entities involved. If the relationship is symmetric, only one side of the relationship needs to be stored and the other side can be derived. -#### Example +#### Ví dụ Define a reverse lookup from a `User` entity type to an `Organization` entity type. In the example below, this is achieved by looking up the `members` attribute from within the `Organization` entity. In queries, the `organizations` field on `User` will be resolved by finding all `Organization` entities that include the user's ID. @@ -395,7 +408,7 @@ type MyFirstEntity @entity { } ``` -## Defining Fulltext Search Fields +## Xác định các Trường Tìm kiếm toàn Văn bản Fulltext search queries filter and rank entities based on a text search input. Fulltext queries are able to return matches for similar words by processing the query text input into stems before comparing them to the indexed text data. @@ -437,43 +450,43 @@ query { } ``` -> **[Feature Management](#experimental-features):** From `specVersion` `0.0.4` and onwards, `fullTextSearch` must be declared under the `features` section in the subgraph manifest. +> **[Quản lý Tính năng](#experimental-features):** Từ `specVersion` `0.0.4` và trở đi, `fullTextSearch` phải được khai báo dưới phần `features` trong tệp kê khai subgraph. -### Languages supported +### Các ngôn ngữ được hỗ trợ Choosing a different language will have a definitive, though sometimes subtle, effect on the fulltext search API. Fields covered by a fulltext query field are examined in the context of the chosen language, so the lexemes produced by analysis and search queries vary from language to language. For example: when using the supported Turkish dictionary "token" is stemmed to "toke" while, of course, the English dictionary will stem it to "token". Supported language dictionaries: -| Code | Dictionary | -| ------ | ---------- | -| simple | General | -| da | Danish | -| nl | Dutch | -| en | English | -| fi | Finnish | -| fr | French | -| de | German | -| hu | Hungarian | -| it | Italian | -| no | Norwegian | -| pt | Portuguese | -| ro | Romanian | -| ru | Russian | -| es | Spanish | -| sv | Swedish | -| tr | Turkish | - -### Ranking Algorithms +| Mã | Từ điển | +| ------ | ----------------- | +| simple | Khái quát | +| da | Tiếng Đan Mạch | +| nl | Tiếng Hà Lan | +| en | Tiếng Anh | +| fi | Tiếng Phần Lan | +| fr | Tiếng Pháp | +| de | Tiếng Đức | +| hu | Tiếng Hungary | +| it | Tiếng Ý | +| no | Tiếng Na uy | +| pt | Portuguese | +| ro | Tiếng Rumani | +| ru | Tiếng Nga | +| es | Tiếng Tây Ban Nha | +| sv | Tiếng Thụy Điển | +| tr | Tiếng Thổ Nhĩ Kỳ | + +### Thuật toán Xếp hạng Supported algorithms for ordering results: -| Algorithm | Description | -| ------------- | ----------------------------------------------------------------------- | -| rank | Use the match quality (0-1) of the fulltext query to order the results. | -| proximityRank | Similar to rank but also includes the proximity of the matches. | +| Thuật toán | Miêu tả | +| ------------- | ------------------------------------------------------------------------------- | +| xếp hạng | Sử dụng chất lượng đối sánh (0-1) của truy vấn toàn văn bản để sắp xếp kết quả. | +| proximityRank | Tương tự như rank nhưng cũng bao gồm các kết quả tương tự gần giống. | -## Writing Mappings +## Viết Ánh xạ The mappings take data from a particular source and transform it into entities that are defined within your schema. Mappings are written in a subset of [TypeScript](https://www.typescriptlang.org/docs/handbook/typescript-in-5-minutes.html) called [AssemblyScript](https://github.com/AssemblyScript/assemblyscript/wiki) which can be compiled to WASM ([WebAssembly](https://webassembly.org/)). AssemblyScript is stricter than normal TypeScript, yet provides a familiar syntax. @@ -510,7 +523,7 @@ The first handler takes a `NewGravatar` event and creates a new `Gravatar` entit The second handler tries to load the existing `Gravatar` from the Graph Node store. If it does not exist yet, it is created on-demand. The entity is then updated to match the new event parameters before it is saved back to the store using `gravatar.save()`. -### Recommended IDs for Creating New Entities +### Các ID được Đề xuất để tạo các Thực thể Mới Every entity has to have an `id` that is unique among all entities of the same type. An entity's `id` value is set when the entity is created. Below are some recommended `id` values to consider when creating new entities. NOTE: The value of `id` must be a `string`. @@ -520,7 +533,7 @@ Every entity has to have an `id` that is unique among all entities of the same t We provide the [Graph Typescript Library](https://github.com/graphprotocol/graph-ts) which contains utilies for interacting with the Graph Node store and conveniences for handling smart contract data and entities. You can use this library in your mappings by importing `@graphprotocol/graph-ts` in `mapping.ts`. -## Code Generation +## Tạo mã In order to make it easy and type-safe to work with smart contracts, events and entities, the Graph CLI can generate AssemblyScript types from the subgraph's GraphQL schema and the contract ABIs included in the data sources. @@ -562,13 +575,13 @@ import { Gravatar } from '../generated/schema' Code generation does not check your mapping code in `src/mapping.ts`. If you want to check that before trying to deploy your subgraph to the Graph Explorer, you can run `yarn build` and fix any syntax errors that the TypeScript compiler might find. -## Data Source Templates +## Mẫu Nguồn Dữ liệu A common pattern in EVM-compatible smart contracts is the use of registry or factory contracts, where one contract creates, manages, or references an arbitrary number of other contracts that each have their own state and events. The addresses of these sub-contracts may or may not be known upfront and many of these contracts may be created and/or added over time. This is why, in such cases, defining a single data source or a fixed number of data sources is impossible and a more dynamic approach is needed: _data source templates_. -### Data Source for the Main Contract +### Nguồn Dữ liệu cho Hợp đồng Chính First, you define a regular data source for the main contract. The snippet below shows a simplified example data source for the [Uniswap](https://uniswap.org) exchange factory contract. Note the `NewExchange(address,address)` event handler. This is emitted when a new exchange contract is created on-chain by the factory contract. @@ -595,7 +608,7 @@ dataSources: handler: handleNewExchange ``` -### Data Source Templates for Dynamically Created Contracts +### Mẫu Nguồn Dữ liệu cho các Hợp đồng được Tạo Tự động Then, you add _data source templates_ to the manifest. These are identical to regular data sources, except that they lack a pre-defined contract address under `source`. Typically, you would define one template for each type of sub-contract managed or referenced by the parent contract. @@ -631,7 +644,7 @@ templates: handler: handleRemoveLiquidity ``` -### Instantiating a Data Source Template +### Khởi tạo một Mẫu Nguồn Dữ liệu In the final step, you update your main contract mapping to create a dynamic data source instance from one of the templates. In this example, you would change the main contract mapping to import the `Exchange` template and call the `Exchange.create(address)` method on it to start indexing the new exchange contract. @@ -645,11 +658,11 @@ export function handleNewExchange(event: NewExchange): void { } ``` -> **Note:** A new data source will only process the calls and events for the block in which it was created and all following blocks, but will not process historical data, i.e., data that is contained in prior blocks. +> **Lưu ý:** Nguồn dữ liệu mới sẽ chỉ xử lý các lệnh gọi và sự kiện cho khối mà nó được tạo và tất cả các khối tiếp theo, nhưng sẽ không xử lý dữ liệu lịch sử, tức là dữ liệu được chứa trong các khối trước đó. > -> If prior blocks contain data relevant to the new data source, it is best to index that data by reading the current state of the contract and creating entities representing that state at the time the new data source is created. +> Nếu các khối trước đó chứa dữ liệu có liên quan đến nguồn dữ liệu mới, tốt nhất là lập chỉ mục dữ liệu đó bằng cách đọc trạng thái hiện tại của hợp đồng và tạo các thực thể đại diện cho trạng thái đó tại thời điểm nguồn dữ liệu mới được tạo. -### Data Source Context +### Bối cảnh Nguồn Dữ liệu Data source contexts allow passing extra configuration when instantiating a template. In our example, let's say exchanges are associated with a particular trading pair, which is included in the `NewExchange` event. That information can be passed into the instantiated data source, like so: @@ -674,7 +687,7 @@ let tradingPair = context.getString('tradingPair') There are setters and getters like `setString` and `getString` for all value types. -## Start Blocks +## Khối Bắt đầu The `startBlock` is an optional setting that allows you to define from which block in the chain the data source will start indexing. Setting the start block allows the data source to skip potentially millions of blocks that are irrelevant. Typically, a subgraph developer will set `startBlock` to the block in which the smart contract of the data source was created. @@ -702,13 +715,13 @@ dataSources: handler: handleNewEvent ``` -> **Note:** The contract creation block can be quickly looked up on Etherscan: +> **Lưu ý:** Khối tạo hợp đồng có thể được nhanh chóng tra cứu trên Etherscan: > -> 1. Search for the contract by entering its address in the search bar. -> 2. Click on the creation transaction hash in the `Contract Creator` section. -> 3. Load the transaction details page where you'll find the start block for that contract. +> 1. Tìm kiếm hợp đồng bằng cách nhập địa chỉ của nó vào thanh tìm kiếm. +> 2. Nhấp vào băm giao dịch tạo trong phần `Contract Creator`. +> 3. Tải trang chi tiết giao dịch nơi bạn sẽ tìm thấy khối bắt đầu cho hợp đồng đó. -## Call Handlers +## Trình xử lý lệnh gọi While events provide an effective way to collect relevant changes to the state of a contract, many contracts avoid generating logs to optimize gas costs. In these cases, a subgraph can subscribe to calls made to the data source contract. This is achieved by defining call handlers referencing the function signature and the mapping handler that will process calls to this function. To process these calls, the mapping handler will receive an `ethereum.Call` as an argument with the typed inputs to and outputs from the call. Calls made at any depth in a transaction's call chain will trigger the mapping, allowing activity with the data source contract through proxy contracts to be captured. @@ -716,7 +729,7 @@ Call handlers will only trigger in one of two cases: when the function specified > **Note:** Call handlers currently depend on the Parity tracing API. Certain networks, such as BNB chain and Arbitrum, does not support this API. If a subgraph indexing one of these networks contain one or more call handlers, it will not start syncing. Subgraph developers should instead use event handlers. These are far more performant than call handlers, and are supported on every evm network. -### Defining a Call Handler +### Xác định một Trình xử lý lệnh gọi To define a call handler in your manifest, simply add a `callHandlers` array under the data source you would like to subscribe to. @@ -745,7 +758,7 @@ dataSources: The `function` is the normalized function signature to filter calls by. The `handler` property is the name of the function in your mapping you would like to execute when the target function is called in the data source contract. -### Mapping Function +### Chức năng Ánh xạ Each call handler takes a single parameter that has a type corresponding to the name of the called function. In the example subgraph above, the mapping contains a handler for when the `createGravatar` function is called and receives a `CreateGravatarCall` parameter as an argument: @@ -764,11 +777,13 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { The `handleCreateGravatar` function takes a new `CreateGravatarCall` which is a subclass of `ethereum.Call`, provided by `@graphprotocol/graph-ts`, that includes the typed inputs and outputs of the call. The `CreateGravatarCall` type is generated for you when you run `graph codegen`. -## Block Handlers +## Trình xử lý Khối In addition to subscribing to contract events or function calls, a subgraph may want to update its data as new blocks are appended to the chain. To achieve this a subgraph can run a function after every block or after blocks that match a pre-defined filter. -### Supported Filters +### Bộ lọc được hỗ trợ + +#### Call Filter ```yaml filter: @@ -806,7 +821,46 @@ dataSources: kind: call ``` -### Mapping Function +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + +### Chức năng Ánh xạ The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -820,7 +874,7 @@ export function handleBlock(block: ethereum.Block): void { } ``` -## Anonymous Events +## Sự kiện Ẩn danh If you need to process anonymous events in Solidity, that can be achieved by providing the topic 0 of the event, as in the example: @@ -852,12 +906,12 @@ Inside the handler function, the receipt can be accessed in the `Event.receipt` Starting from `specVersion` `0.0.4`, subgraph features must be explicitly declared in the `features` section at the top level of the manifest file, using their `camelCase` name, as listed in the table below: -| Feature | Name | -| --------------------------------------------------------- | --------------------------------------------------- | -| [Non-fatal errors](#non-fatal-errors) | `nonFatalErrors` | -| [Full-text Search](#defining-fulltext-search-fields) | `fullTextSearch` | -| [Grafting](#grafting-onto-existing-subgraphs) | `grafting` | -| [IPFS on Ethereum Contracts](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | +| Tính năng | Tên | +| ---------------------------------------------------------- | --------------------------------------------------- | +| [Lỗi không nghiêm trọng](#non-fatal-errors) | `nonFatalErrors` | +| [Tìm kiếm toàn văn](#defining-fulltext-search-fields) | `fullTextSearch` | +| [Ghép](#grafting-onto-existing-subgraphs) | `grafting` | +| [IPFS trên hợp đồng Ethereum](#ipfs-on-ethereum-contracts) | `ipfsOnEthereumContracts` or `nonDeterministicIpfs` | For instance, if a subgraph uses the **Full-Text Search** and the **Non-fatal Errors** features, the `features` field in the manifest should be: @@ -872,23 +926,23 @@ dataSources: ... Note that using a feature without declaring it will incur a **validation error** during subgraph deployment, but no errors will occur if a feature is declared but not used. -### IPFS on Ethereum Contracts +### IPFS trên hợp đồng Ethereum A common use case for combining IPFS with Ethereum is to store data on IPFS that would be too expensive to maintain on-chain, and reference the IPFS hash in Ethereum contracts. Given such IPFS hashes, subgraphs can read the corresponding files from IPFS using `ipfs.cat` and `ipfs.map`. To do this reliably, it is required that these files are pinned to an IPFS node with high availability, so that the [hosted service](https://thegraph.com/hosted-service) IPFS node can find them during indexing. -> **Note:** The Graph Network does not yet support `ipfs.cat` and `ipfs.map`, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Lưu ý:** Mạng The Graph chưa hỗ trợ `ipfs.cat` and `ipfs.map`, và các nhà phát triển không nên triển khai các subgraphs sử dụng chức năng đó vào mạng thông qua Studio. > **[Feature Management](#experimental-features):** `ipfsOnEthereumContracts` must be declared under `features` in the subgraph manifest. For non EVM chains, the `nonDeterministicIpfs` alias can also be used for the same purpose. When running a local Graph Node, the `GRAPH_ALLOW_NON_DETERMINISTIC_IPFS` environment variable must be set in order to index subgraphs using this experimental functionality. -### Non-fatal errors +### Lỗi không nghiêm trọng Indexing errors on already synced subgraphs will, by default, cause the subgraph to fail and stop syncing. Subgraphs can alternatively be configured to continue syncing in the presence of errors, by ignoring the changes made by the handler which provoked the error. This gives subgraph authors time to correct their subgraphs while queries continue to be served against the latest block, though the results might be inconsistent due to the bug that caused the error. Note that some errors are still always fatal. To be non-fatal, the error must be known to be deterministic. -> **Note:** The Graph Network does not yet support non-fatal errors, and developers should not deploy subgraphs using that functionality to the network via the Studio. +> **Lưu ý:** Mạng The Graph chưa hỗ trợ các lỗi không nghiêm trọng và các nhà phát triển không nên triển khai các subgraph sử dụng chức năng đó vào mạng thông qua Studio. Enabling non-fatal errors requires setting the following feature flag on the subgraph manifest: @@ -932,7 +986,9 @@ If the subgraph encounters an error, that query will return both the data and a ] ``` -### Grafting onto Existing Subgraphs +### Ghép vào các Subgraph Hiện có + +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. @@ -951,23 +1007,23 @@ Because grafting copies rather than indexes base data, it is much quicker to get The grafted subgraph can use a GraphQL schema that is not identical to the one of the base subgraph, but merely compatible with it. It has to be a valid subgraph schema in its own right, but may deviate from the base subgraph's schema in the following ways: -- It adds or removes entity types -- It removes attributes from entity types -- It adds nullable attributes to entity types -- It turns non-nullable attributes into nullable attributes -- It adds values to enums -- It adds or removes interfaces -- It changes for which entity types an interface is implemented +- Nó thêm hoặc xóa các loại thực thể +- Nó loại bỏ các thuộc tính khỏi các loại thực thể +- Nó thêm các thuộc tính nullable vào các loại thực thể +- Nó biến các thuộc tính không thể nullable thành các thuộc tính nullable +- Nó thêm giá trị vào enums +- Nó thêm hoặc xóa các giao diện +- Nó thay đổi đối với loại thực thể nào mà một giao diện được triển khai -> **[Feature Management](#experimental-features):** `grafting` must be declared under `features` in the subgraph manifest. +> **[Quản lý Tính năng](#experimental-features):** `grafting` phải được khai báo dưới`features` trong tệp kê khai subgraph. ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. -### Overview +### Tổng quan Rather than fetching files "in line" during handler exectuion, this introduces templates which can be spawned as new data sources for a given file identifier. These new data sources fetch the files, retrying if they are unsuccessful, running a dedicated handler when the file is found. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/vi/developing/developer-faqs.mdx b/website/pages/vi/developing/developer-faqs.mdx index 0b925a79dce2..ad92cfc58a79 100644 --- a/website/pages/vi/developing/developer-faqs.mdx +++ b/website/pages/vi/developing/developer-faqs.mdx @@ -1,5 +1,5 @@ --- -title: Developer FAQs +title: Câu hỏi thường gặp dành cho nhà phát triển --- ## 1. What is a subgraph? @@ -8,15 +8,15 @@ A subgraph is a custom API built on blockchain data. Subgraphs are queried using ## 2. Can I delete my subgraph? -It is not possible to delete subgraphs once they are created. +Không thể xóa các subgraph sau khi chúng được tạo. ## 3. Can I change my subgraph name? -No. Once a subgraph is created, the name cannot be changed. Make sure to think of this carefully before you create your subgraph so it is easily searchable and identifiable by other dapps. +Không. Khi một subgraph được tạo, không thể thay đổi tên. Hãy đảm bảo suy nghĩ kỹ về điều này trước khi bạn tạo subgraph của mình để các dapp khác có thể dễ dàng tìm kiếm và nhận dạng được. ## 4. Can I change the GitHub account associated with my subgraph? -No. Once a subgraph is created, the associated GitHub account cannot be changed. Make sure to think of this carefully before you create your subgraph. +Không. Sau khi tạo subgraph, không thể thay đổi tài khoản GitHub được liên kết. Hãy đảm bảo suy nghĩ kỹ về điều này trước khi bạn tạo subgraph của mình. ## 5. Am I still able to create a subgraph if my smart contracts don't have events? @@ -36,13 +36,13 @@ Check out the "Instantiating a data source template" section on: [Data Source Te ## 8. How do I make sure I'm using the latest version of graph-node for my local deployments? -You can run the following command: +Bạn có thể chạy lệnh sau: ```sh docker pull graphprotocol/graph-node:latest ``` -**NOTE:** docker / docker-compose will always use whatever graph-node version was pulled the first time you ran it, so it is important to do this to make sure you are up to date with the latest version of graph-node. +**LƯU Ý:** docker / docker-compose sẽ luôn sử dụng bất kỳ phiên bản graph-node nào được kéo vào lần đầu tiên bạn chạy nó, vì vậy điều quan trọng là phải làm điều này để đảm bảo bạn được cập nhật phiên bản mới nhất của graph-node. ## 9. How do I call a contract function or access a public state variable from my subgraph mappings? @@ -60,15 +60,15 @@ Unfortunately, this is currently not possible. `graph init` is intended as a bas ## 12. What is the recommended way to build "autogenerated" ids for an entity when handling events? -If only one entity is created during the event and if there's nothing better available, then the transaction hash + log index would be unique. You can obfuscate these by converting that to Bytes and then piping it through `crypto.keccak256` but this won't make it more unique. +Nếu chỉ một thực thể được tạo trong sự kiện và nếu không có gì tốt hơn khả dụng, thì chỉ mục log + băm giao dịch sẽ là duy nhất. Bạn có thể làm xáo trộn chúng bằng cách chuyển đổi nó thành Byte và sau đó chuyển nó qua`crypto.keccak256` nhưng điều này sẽ không làm cho nó độc đáo hơn. ## 13. When listening to multiple contracts, is it possible to select the contract order to listen to events? -Within a subgraph, the events are always processed in the order they appear in the blocks, regardless of whether that is across multiple contracts or not. +Trong một subgraph, các sự kiện luôn được xử lý theo thứ tự chúng xuất hiện trong các khối, bất kể điều đó có qua nhiều hợp đồng hay không. ## 14. Is it possible to differentiate between networks (mainnet, Goerli, local) from within event handlers? -Yes. You can do this by importing `graph-ts` as per the example below: +Đúng. Bạn có thể thực hiện việc này bằng cách nhập `graph-ts` theo ví dụ bên dưới: ```javascript import { dataSource } from '@graphprotocol/graph-ts' @@ -83,7 +83,7 @@ Yes. Goerli supports block handlers, call handlers and event handlers. It should ## 16. Can I import ethers.js or other JS libraries into my subgraph mappings? -Not currently, as mappings are written in AssemblyScript. One possible alternative solution to this is to store raw data in entities and perform logic that requires JS libraries on the client. +Hiện tại thì không, vì các ánh xạ được viết bằng AssemblyScript. Một giải pháp thay thế khả thi cho điều này là lưu trữ dữ liệu thô trong các thực thể và thực hiện logic yêu cầu thư viện JS trên máy khách. ## 17. Is it possible to specify what block to start indexing on? @@ -95,7 +95,7 @@ Yes, you should take a look at the optional start block feature to start indexin ## 19. Is there a way to query the subgraph directly to determine the latest block number it has indexed? -Yes! Try the following command, substituting "organization/subgraphName" with the organization under it is published and the name of your subgraph: +Có! Hãy thử lệnh sau, thay thế "organization/subgraphName" bằng tổ chức dưới nó được xuất bản và tên của subgraph của bạn: ```sh curl -X POST -d '{ "query": "{indexingStatusForCurrentVersion(subgraphName: \"organization/subgraphName\") { chains { latestBlock { hash number }}}}"}' https://api.thegraph.com/index-node/graphql @@ -107,11 +107,11 @@ You can find the list of the supported networks [here](/developing/supported-net ## 21. Is it possible to duplicate a subgraph to another account or endpoint without redeploying? -You have to redeploy the subgraph, but if the subgraph ID (IPFS hash) doesn't change, it won't have to sync from the beginning. +Bạn phải triển khai lại subgraph, nhưng nếu ID subgraph (mã băm IPFS) không thay đổi, nó sẽ không phải đồng bộ hóa từ đầu. ## 22. Is this possible to use Apollo Federation on top of graph-node? -Federation is not supported yet, although we do want to support it in the future. At the moment, something you can do is use schema stitching, either on the client or via a proxy service. +Federation chưa được hỗ trợ, mặc dù chúng tôi muốn hỗ trợ nó trong tương lai. Hiện tại, điều bạn có thể làm là sử dụng tính năng ghép lược đồ, trên máy khách hoặc thông qua dịch vụ proxy. ## 23. Is there a limit to how many objects The Graph can return per query? @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/vi/developing/graph-ts/api.mdx b/website/pages/vi/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..5a93ec9ee4b6 --- /dev/null +++ b/website/pages/vi/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## Tham chiếu API + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Các primitives cấp thấp để dịch giữa các hệ thống kiểu khác nhau như Ethereum, JSON, GraphQL và AssemblyScript. + +### Các phiên bản + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Phiên bản | Ghi chú phát hành | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Các loại cài sẵn + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### API cửa hàng + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Tạo các thực thể + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Tải các thực thể từ cửa hàng + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Cập nhật các thực thể hiện có + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Xóa các thực thể khỏi cửa hàng + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Hỗ trợ các loại Ethereum + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Sự kiện và dữ liệu Khối/Giao dịch + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Quyền truy cập vào Trạng thái Hợp đồng Thông minh + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Xử lý các lệnh gọi được hoàn nguyên + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Mã hóa / Giải mã ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging một hoặc nhiều giá trị + +##### Logging một giá trị duy nhất + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging một mục nhập từ một mảng hiện có + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging nhiều mục nhập từ một mảng hiện có + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging một mục cụ thể từ một mảng hiện có + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging thông tin sự kiện + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Tham chiếu Chuyển đổi Loại + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Siêu Dữ liệu Nguồn Dữ liệu + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity và DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/vi/developing/graph-ts/common-issues.mdx b/website/pages/vi/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/vi/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/vi/developing/supported-networks.json b/website/pages/vi/developing/supported-networks.json index 5e12392b8c7d..ce2e98ccfe7c 100644 --- a/website/pages/vi/developing/supported-networks.json +++ b/website/pages/vi/developing/supported-networks.json @@ -1,5 +1,5 @@ { - "network": "Network", + "network": "Mạng lưới", "cliName": "CLI Name", "chainId": "Chain ID", "studioAndHostedService": "Studio and Hosted Service", diff --git a/website/pages/vi/developing/supported-networks.mdx b/website/pages/vi/developing/supported-networks.mdx index 58ce56345f7c..8cc8159f29f4 100644 --- a/website/pages/vi/developing/supported-networks.mdx +++ b/website/pages/vi/developing/supported-networks.mdx @@ -1,5 +1,5 @@ --- -title: Supported Networks +title: Các mạng được hỗ trợ --- export { getStaticPropsForSupportedNetworks as getStaticProps } from '@/src/buildGetStaticProps' @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/vi/developing/unit-testing-framework.mdx b/website/pages/vi/developing/unit-testing-framework.mdx index 8ffc66465e3a..e0a3390bbb25 100644 --- a/website/pages/vi/developing/unit-testing-framework.mdx +++ b/website/pages/vi/developing/unit-testing-framework.mdx @@ -76,7 +76,7 @@ And finally, do not use `graph test` (which uses your global installation of gra } ``` -### Usage +### Sử dụng To use **Matchstick** in your subgraph project just open up a terminal, navigate to the root folder of your project and simply run `graph test [options] ` - it downloads the latest **Matchstick** binary and runs the specified test or all tests in a test folder (or all existing tests if no datasource flag is specified). @@ -223,7 +223,7 @@ test("handleNewGravatar() should create a new entity", () => { Runs a code block before any of the tests in the file. If `beforeAll` is declared inside of a `describe` block, it runs at the beginning of that `describe` block. -Examples: +Các ví dụ: Code inside `beforeAll` will execute once before _all_ tests in the file. @@ -407,7 +407,7 @@ describe('handleUpdatedGravatars', () => { Runs a code block after every test. If `afterEach` is declared inside of a `describe` block, it runs after each test in that `describe` block. -Examples: +Các ví dụ: Code inside `afterEach` will execute after every test. @@ -994,7 +994,7 @@ Using **Matchstick**, subgraph developers are able to run a script that will cal The test coverage tool takes the compiled test `wasm` binaries and converts them to `wat` files, which can then be easily inspected to see whether or not the handlers defined in `subgraph.yaml` have been called. Since code coverage (and testing as whole) is in very early stages in AssemblyScript and WebAssembly, **Matchstick** cannot check for branch coverage. Instead we rely on the assertion that if a given handler has been called, the event/function for it have been properly mocked. -### Prerequisites +### Điều kiện tiên quyết To run the test coverage functionality provided in **Matchstick**, there are a few things you need to prepare beforehand: @@ -1012,7 +1012,7 @@ In order for that function to be visible (for it to be included in the `wat` fil export { handleNewGravatar } ``` -### Usage +### Sử dụng Once that's all set up, to run the test coverage tool, simply run: diff --git a/website/pages/vi/firehose.mdx b/website/pages/vi/firehose.mdx index 5e2b37ee4bb6..02f0d63c72db 100644 --- a/website/pages/vi/firehose.mdx +++ b/website/pages/vi/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/vi/global.json b/website/pages/vi/global.json index 6a3eb234bfce..85a32302db5f 100644 --- a/website/pages/vi/global.json +++ b/website/pages/vi/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", - "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", + "collapse": "Thu gọn", + "expand": "Mở rộng", + "previous": "Trước", + "next": "Tiếp", + "editPage": "Chỉnh sửa trang", + "pageSections": "Các Mục trang", + "linkToThisSection": "Liên kết đến mục này", + "technicalLevelRequired": "Yêu cầu Trình độ Kỹ thuật", + "notFoundTitle": "Ối! Trang này đã bị lạc mất trong không gian...", + "notFoundSubtitle": "Kiểm tra xem bạn có đang sử dụng đúng địa chỉ hay không hoặc khám phá trang web của chúng tôi bằng cách nhấp vào liên kết bên dưới.", + "goHome": "Về Trang chủ", "video": "Video" } diff --git a/website/pages/vi/glossary.mdx b/website/pages/vi/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/vi/glossary.mdx +++ b/website/pages/vi/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/vi/graphcast.mdx b/website/pages/vi/graphcast.mdx index e397aad36e43..45b647730496 100644 --- a/website/pages/vi/graphcast.mdx +++ b/website/pages/vi/graphcast.mdx @@ -2,7 +2,7 @@ title: Graphcast --- -## Introduction +## Giới thiệu Is there something you'd like to learn from or share with your fellow Indexers in an automated manner, but it's too much hassle or costs too much gas? @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/vi/index.json b/website/pages/vi/index.json index 9e28e13d5001..5d69c14b124b 100644 --- a/website/pages/vi/index.json +++ b/website/pages/vi/index.json @@ -1,77 +1,76 @@ { - "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "title": "Bắt đầu", + "intro": "Tìm hiểu về The Graph một giao thức phi tập trung để lập chỉ mục và truy vấn dữ liệu từ các blockchain.", "shortcuts": { "aboutTheGraph": { - "title": "About The Graph", - "description": "Learn more about The Graph" + "title": "Về The Graph", + "description": "Tìm hiểu thêm về The Graph" }, "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "title": "Bắt đầu nhanh", + "description": "Nhảy vào và bắt đầu với The Graph" }, "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "title": "Câu hỏi thường gặp dành cho nhà phát triển", + "description": "Các câu hỏi thường gặp" }, "queryFromAnApplication": { - "title": "Query from an Application", - "description": "Learn to query from an application" + "title": "Truy vấn từ một ứng dụng", + "description": "Học cách truy vấn từ một ứng dụng" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Tạo một Subgraph", + "description": "Sử dụng Studio để tạo các subgraph" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { - "title": "Network Roles", - "description": "Learn about The Graph’s network roles.", + "title": "Các vai trò trong mạng", + "description": "Tìm hiểu về các vai trò trong mạng The Graph.", "roles": { "developer": { - "title": "Developer", - "description": "Create a subgraph or use existing subgraphs in a dapp" + "title": "Nhà phát triển", + "description": "Tạo một subgraph hoặc sử dụng các subgraph hiện có trong một dapp" }, "indexer": { "title": "Indexer", - "description": "Operate a node to index data and serve queries" + "description": "Vận hành một nút để lập chỉ mục dữ liệu và phục vụ các truy vấn" }, "curator": { "title": "Curator", - "description": "Organize data by signaling on subgraphs" + "description": "Tổ chức dữ liệu bằng cách báo hiệu trên các subgraph" }, "delegator": { "title": "Delegator", - "description": "Secure the network by delegating GRT to Indexers" + "description": "Bảo mật mạng bằng cách ủy quyền GRT cho Indexers" } } }, - "readMore": "Read more", + "readMore": "Đọc thêm", "products": { - "title": "Products", + "title": "Các sản phẩm", "products": { "subgraphStudio": { "title": "Subgraph Studio", - "description": "Create, manage and publish subgraphs and API keys" + "description": "Tạo, quản lý và xuất bản các subgraph và khóa API" }, "graphExplorer": { - "title": "Graph Explorer", - "description": "Explore subgraphs and interact with the protocol" + "title": "Trình khám phá Graph", + "description": "Khám phá các subgraph và tương tác với giao thức" }, "hostedService": { - "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "title": "Dịch vụ được lưu trữ", + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { - "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "title": "Mạng lưới được hỗ trợ", + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/vi/managing/deprecating-a-subgraph.mdx b/website/pages/vi/managing/deprecating-a-subgraph.mdx index e6adfccad368..6a158f2e8c7b 100644 --- a/website/pages/vi/managing/deprecating-a-subgraph.mdx +++ b/website/pages/vi/managing/deprecating-a-subgraph.mdx @@ -1,10 +1,10 @@ --- -title: Deprecating a Subgraph +title: Ngừng một Subgraph --- -So you'd like to deprecate your subgraph on The Graph Explorer. You've come to the right place! Follow the steps below: +Vậy là, bạn muốn ngừng subgraph của mình trên The Graph Explorer. Bạn đã đến đúng nơi! Hãy làm theo các bước dưới đây: -1. Visit the contract address [here](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) +1. Ghé thăm địa chỉ hợp đồng [tại đây](https://etherscan.io/address/0xadca0dd4729c8ba3acf3e99f3a9f471ef37b6825#writeProxyContract) 2. Call `deprecateSubgraph` with your `SubgraphID` as your argument. 3. Voilà! Your subgraph will no longer show up on searches on The Graph Explorer. @@ -13,6 +13,6 @@ Please note the following: - The `deprecateSubgraph` function should be called by the owner's wallet. - Curators will not be able to signal on the subgraph anymore. - Curators that already signaled on the subgraph will be able to withdraw their signal at an average share price. -- Deprecated subgraphs will be indicated with an error message. +- Các subgraph đã bị ngừng sẽ được chỉ báo bằng một thông báo lỗi. If you interacted with the deprecated subgraph, you'll be able to find it in your user profile under the "Subgraphs", "Indexing", or "Curating" tab, respectively. diff --git a/website/pages/vi/mips-faqs.mdx b/website/pages/vi/mips-faqs.mdx index 73efe82662cb..89bcf6131bd7 100644 --- a/website/pages/vi/mips-faqs.mdx +++ b/website/pages/vi/mips-faqs.mdx @@ -2,7 +2,9 @@ title: MIPs FAQs --- -## Introduction +## Giới thiệu + +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. diff --git a/website/pages/vi/network/benefits.mdx b/website/pages/vi/network/benefits.mdx index 839a0a7b9cf7..b2fa328ed82f 100644 --- a/website/pages/vi/network/benefits.mdx +++ b/website/pages/vi/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -34,7 +34,7 @@ Query costs may vary; the quoted cost is the average at time of publication (Dec | Engineering time | $400 per month | None, built into the network with globally distributed Indexers | | Queries per month | Limited to infra capabilities | 30,000 (autoscaling) | | Cost per query | $0 | $0.0005 | -| Infrastructure | Centralized | Decentralized | +| Cơ sở hạ tầng | Centralized | Decentralized | | Geographic redundancy | $750+ per additional node | Included | | Uptime | Varies | 99.9%+ | | Total Monthly Costs | $750+ | ~$15 | @@ -48,7 +48,7 @@ Query costs may vary; the quoted cost is the average at time of publication (Dec | Engineering time | $800 per month | None, built into the network with globally distributed Indexers | | Queries per month | Limited to infra capabilities | 3,000,000+ | | Cost per query | $0 | $0.00025 | -| Infrastructure | Centralized | Decentralized | +| Cơ sở hạ tầng | Centralized | Decentralized | | Engineering expense | $200 per hour | Included | | Geographic redundancy | $1,200 in total costs per additional node | Included | | Uptime | Varies | 99.9%+ | @@ -64,7 +64,7 @@ Query costs may vary; the quoted cost is the average at time of publication (Dec | Engineering time | $6,000 or more per month | None, built into the network with globally distributed Indexers | | Queries per month | Limited to infra capabilities | 30,000,000+ | | Cost per query | $0 | $0.00015 | -| Infrastructure | Centralized | Decentralized | +| Cơ sở hạ tầng | Centralized | Decentralized | | Geographic redundancy | $1,200 in total costs per additional node | Included | | Uptime | Varies | 99.9%+ | | Total Monthly Costs | $11,000+ | $4,500 | @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/vi/network/curating.mdx b/website/pages/vi/network/curating.mdx index 797d9b9dd896..e26962471c0a 100644 --- a/website/pages/vi/network/curating.mdx +++ b/website/pages/vi/network/curating.mdx @@ -6,77 +6,77 @@ Curators are critical to the Graph decentralized economy. They use their knowled When signaling, curators can decide to signal on a specific version of the subgraph or to signal using auto-migrate. When signaling using auto-migrate, a Curator’s shares will always be migrated to the latest version published by the developer. If you decide to signal on a specific version instead, shares will always stay on this specific version. -Remember that curation is risky. Please do your diligence to make sure you curate on subgraphs you trust. Creating a subgraph is permissionless, so people can create subgraphs and call them any name they'd like. For more guidance on curation risks, check out [The Graph Academy's Curation Guide.](https://thegraph.academy/curators/) +Hãy nhớ rằng việc curate là rất rủi ro. Hãy thẩm định kỹ càng trong khả năng của bạn để đảm bảo rằng bạn đang curate các subgraph mà bạn tin tưởng. Việc tạo một subgraph là không cần cấp phép, vì vậy mọi người có thể tạo các subgraph và gọi chúng bằng bất kỳ tên nào họ muốn. Để biết thêm hướng dẫn về rủi ro khi curate, hãy xem [Hướng dẫn curate của Graph Academy.](https://thegraph.academy/curators/) -## Bonding Curve 101 +## Căn bản về Bonding Curve First, we take a step back. Each subgraph has a bonding curve on which curation shares are minted when a user adds signal **into** the curve. Each subgraph’s bonding curve is unique. The bonding curves are architected so that the price to mint a curation share on a subgraph increases linearly, over the number of shares minted. -![Price per shares](/img/price-per-share.png) +![Giá mỗi cổ phần](/img/price-per-share.png) -As a result, price increases linearly, meaning that it will get more expensive to purchase a share over time. Here’s an example of what we mean, see the bonding curve below: +Kết quả là, giá tăng tuyến tính, có nghĩa là giá mua cổ phần sẽ đắt hơn theo thời gian. Dưới đây là một ví dụ về ý của chúng tôi, hãy xem đường cong liên kết bên dưới: -![Bonding curve](/img/bonding-curve.png) +![Đường cong liên kết](/img/bonding-curve.png) -Consider we have two curators that mint shares for a subgraph: +Hãy xem xét chúng ta có hai người curator cùng đúc cổ phần của một subgraph: -- Curator A is the first to signal on the subgraph. By adding 120,000 GRT into the curve, they are able to mint 2000 shares. -- Curator B’s signal is on the subgraph at some point in time later. To receive the same amount of shares as Curator A, they would have to add 360,000 GRT into the curve. -- Since both curators hold half the total of curation shares, they would receive an equal amount of curator royalties. -- If any of the curators were now to burn their 2000 curation shares, they would receive 360,000 GRT. -- The remaining curator would now receive all the curator royalties for that subgraph. If they were to burn their shares to withdraw GRT, they would receive 120,000 GRT. +- Curator A là người đầu tiên phát tín hiệu trên subgraph này. Bằng cách thêm 120,000 GRT vào đường cong, anh ấy có thể kiếm được 2000 cổ phần. +- Tín hiệu của người curator B xuất hiện trên subgraph vào một thời điểm nào đó sau đó. Để nhận được cùng một lượng cổ phần như người curator A, thì B sẽ phải thêm 360,000 GRT vào đường cong. +- Vì cả hai người curator này đều nắm giữ một nửa tổng số cổ phần curate, họ sẽ nhận được một số tiền bản quyền của curator bằng nhau. +- Nếu bây giờ bất kỳ 1 trong 2 người đốt 2000 cổ phần curate của họ, họ sẽ nhận được 360,000 GRT. +- Người curator còn lại bây giờ sẽ nhận được tất cả tiền bản quyền của curator cho subgraph đó. Nếu đốt cổ phần để rút GRT, anh ấy sẽ nhận được 120,000 GRT. - **TLDR:** The GRT valuation of curation shares is determined by the bonding curve and can be volatile. There is potential to incur big losses. Signaling early means you put in less GRT for each share. By extension, this means you earn more curator royalties per GRT than later curators for the same subgraph. -In general, a bonding curve is a mathematical curve that defines the relationship between token supply and asset price. In the specific case of subgraph curation, **the price of each subgraph share increases with each token invested** and the **price of each share decreases with each token sold.** +Nói chung, đường cong liên kết là một đường cong toán học xác định mối quan hệ giữa lượng cung token và giá tài sản. Trong trường hợp cụ thể của việc curate subgraph, **giá của mỗi cổ phần subgraph tăng lên với mỗi token được đầu tư** và **giá của mỗi cổ phần giảm xuống với mỗi token được bán.** -In the case of The Graph, [Bancor’s implementation of a bonding curve formula](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) is leveraged. +Trong trường hợp của The Graph, [Triển khai của Bancor về công thức đường cong liên kết](https://drive.google.com/file/d/0B3HPNP-GDn7aRkVaV3dkVl9NS2M/view?resourcekey=0-mbIgrdd0B9H8dPNRaeB_TA) được sử dụng. -## How to Signal +## Làm thế nào để phát tín hiệu Now that we’ve covered the basics about how the bonding curve works, this is how you will proceed to signal on a subgraph. Within the Curator tab on the Graph Explorer, curators will be able to signal and unsignal on certain subgraphs based on network stats. For a step-by-step overview of how to do this in the Explorer, [click here.](/network/explorer) -A curator can choose to signal on a specific subgraph version, or they can choose to have their signal automatically migrate to the newest production build of that subgraph. Both are valid strategies and come with their own pros and cons. +Curator có thể chọn phát tín hiệu trên một phiên bản subgraph cụ thể hoặc họ có thể chọn để tín hiệu của họ tự động chuyển sang bản dựng sản xuất mới nhất của subgraph đó. Cả hai đều là những chiến lược hợp lệ và đi kèm với những ưu và nhược điểm của riêng chúng. Signaling on a specific version is especially useful when one subgraph is used by multiple dApps. One dApp might need to regularly update the subgraph with new features. Another dApp might prefer to use an older, well-tested subgraph version. Upon initial curation, a 1% standard tax is incurred. Having your signal automatically migrate to the newest production build can be valuable to ensure you keep accruing query fees. Every time you curate, a 1% curation tax is incurred. You will also pay a 0.5% curation tax on every migration. Subgraph developers are discouraged from frequently publishing new versions - they have to pay a 0.5% curation tax on all auto-migrated curation shares. -> **Note**: The first address to signal a particular subgraph is considered the first curator and will have to do much more gas-intensive work than the rest of the following curators because the first curator initializes the curation share tokens, initializes the bonding curve, and also transfers tokens into the Graph proxy. +> **Lưu ý**: Địa chỉ đầu tiên báo hiệu một subgraph cụ thể được coi là người curator đầu tiên và sẽ phải làm công việc tốn nhiều phí gas hơn nhiều so với phần còn lại của những người curator sau vì người curator đầu tiên phải khởi tạo token cổ phần curate, khởi tạo đường cong liên kết và cũng chuyển token vào proxy the Graph. -## What does Signaling mean for The Graph Network? +## Việc phát tín hiệu có nghĩa là gì đối với Mạng The Graph? -For end consumers to be able to query a subgraph, the subgraph must first be indexed. Indexing is a process where files, data, and metadata are looked at, cataloged, and then indexed so that results can be found faster. In order for a subgraph’s data to be searchable, it needs to be organized. +Để người tiêu dùng cuối có thể truy vấn một subgraph, subgraph đó trước tiên phải được lập chỉ mục. Indexing (Lập chỉ mục) là một quá trình mà các tệp, dữ liệu và siêu dữ liệu được xem xét, lập danh mục và sau đó được lập chỉ mục để có thể tìm thấy kết quả nhanh hơn. Để dữ liệu của một subgraph có thể tìm kiếm được, nó cần được sắp xếp. And so, if Indexers had to guess which subgraphs they should index, there would be a low chance that they would earn robust query fees because they’d have no way of validating which subgraphs are good quality. Enter curation. Curators make The Graph network efficient and signaling is the process that curators use to let Indexers know that a subgraph is good to index, where GRT is added to a bonding curve for a subgraph. Indexers can inherently trust the signal from a curator because upon signaling, curators mint a curation share for the subgraph, entitling them to a portion of future query fees that the subgraph drives. Curator signal is represented as ERC20 tokens called Graph Curation Shares (GCS). Curators that want to earn more query fees should signal their GRT to subgraphs that they predict will generate a strong flow of fees to the network. Curators cannot be slashed for bad behavior, but there is a deposit tax on Curators to disincentivize poor decision-making that could harm the integrity of the network. Curators also earn fewer query fees if they choose to curate on a low-quality Subgraph since there will be fewer queries to process or fewer Indexers to process those queries. See the diagram below! -![Signaling diagram](/img/curator-signaling.png) +![Sơ đồ báo hiệu](/img/curator-signaling.png) -Indexers can find subgraphs to index based on curation signals they see in The Graph Explorer (screenshot below). +Indexer có thể tìm các subgraph để lập chỉ mục dựa trên các tín hiệu sắp xếp mà họ thấy trong Graph Explorer (ảnh chụp màn hình bên dưới). -![Explorer subgraphs](/img/explorer-subgraphs.png) +![Trình khám phá subgraph](/img/explorer-subgraphs.png) -## Risks +## Những rủi ro 1. The query market is inherently young at The Graph and there is risk that your %APY may be lower than you expect due to nascent market dynamics. 2. Curation Fee - when a Curator signals GRT on a subgraph, they incur a 1% curation tax. This fee is burned and the rest is deposited into the reserve supply of the bonding curve. 3. When curators burn their shares to withdraw GRT, the GRT valuation of the remaining shares will be reduced. Be aware that in some cases, curators may decide to burn their shares **all at once**. This situation may be common if a dApp developer stops versioning/improving and querying their subgraph or if a subgraph fails. As a result, remaining curators might only be able to withdraw a fraction of their initial GRT. For a network role with a lower risk profile, see [Delegators](/network/delegating). -4. A subgraph can fail due to a bug. A failed subgraph does not accrue query fees. As a result, you’ll have to wait until the developer fixes the bug and deploys a new version. - - If you are subscribed to the newest version of a subgraph, your shares will auto-migrate to that new version. This will incur a 0.5% curation tax. +4. Một subgraph có thể thất bại do một lỗi. Một subgraph thất bại không tích lũy phí truy vấn. Do đó, bạn sẽ phải đợi cho đến khi nhà phát triển sửa lỗi và triển khai phiên bản mới. + - Nếu bạn đã đăng ký phiên bản mới nhất của một subgraph, các cổ phần của bạn sẽ tự động chuyển sang phiên bản mới đó. Điều này sẽ phát sinh một khoản thuế curation 0.5%. - If you have signaled on a specific subgraph version and it fails, you will have to manually burn your curation shares. Note that you may receive more or less GRT than you initially deposited into the curation curve, which is a risk associated with being a curator. You can then signal on the new subgraph version, thus incurring a 1% curation tax. -## Curation FAQs +## Câu hỏi thường gặp về Curation -### 1. What % of query fees do Curators earn? +### 1. Curator kiếm được bao nhiêu % phí truy vấn? By signalling on a subgraph, you will earn a share of all the query fees that this subgraph generates. 10% of all query fees goes to the Curators pro-rata to their curation shares. This 10% is subject to governance. -### 2. How do I decide which subgraphs are high quality to signal on? +### 2. Làm cách nào để tôi quyết định xem các subgraph nào có chất lượng cao để báo hiệu? Finding high-quality subgraphs is a complex task, but it can be approached in many different ways. As a Curator, you want to look for trustworthy subgraphs that are driving query volume. A trustworthy subgraph may be valuable if it is complete, accurate, and supports a dApp’s data needs. A poorly architected subgraph might need to be revised or re-published, and can also end up failing. It is critical for Curators to review a subgraph’s architecture or code in order to assess if a subgraph is valuable. As a result: -- Curators can use their understanding of a network to try and predict how an individual subgraph may generate a higher or lower query volume in the future +- Curator có thể sử dụng sự hiểu biết của họ về mạng để thử và dự đoán cách một subgraph riêng lẻ có thể tạo ra khối lượng truy vấn cao hơn hoặc thấp hơn trong tương lai - Curators should also understand the metrics that are available through The Graph Explorer. Metrics like past query volume and who the subgraph developer is can help determine whether or not a subgraph is worth signalling on. ### 3. What’s the cost of updating a subgraph? @@ -87,10 +87,10 @@ Migrating your curation shares to a new subgraph version incurs a curation tax o It’s suggested that you don’t update your subgraphs too frequently. See the question above for more details. -### 5. Can I sell my curation shares? +### 5. Tôi có thể bán cổ phần curation của mình không? Curation shares cannot be "bought" or "sold" like other ERC20 tokens that you may be familiar with. They can only be minted (created) or burned (destroyed) along the bonding curve for a particular subgraph. The amount of GRT needed to mint a new signal, and the amount of GRT you receive when you burn your existing signal are determined by that bonding curve. As a Curator, you need to know that when you burn your curation shares to withdraw GRT, you can end up with more or less GRT than you initially deposited. -Still confused? Check out our Curation video guide below: +Vẫn còn thắc mắc? Xem video hướng dẫn Curation của chúng tôi bên dưới: diff --git a/website/pages/vi/network/delegating.mdx b/website/pages/vi/network/delegating.mdx index 4a6d6e00b73e..9c289e941505 100644 --- a/website/pages/vi/network/delegating.mdx +++ b/website/pages/vi/network/delegating.mdx @@ -2,70 +2,70 @@ title: Delegating --- -Delegators are network participants who delegate (i.e., "stake") GRT to one or more Indexers. Delegators contribute to securing the network without running a Graph Node themselves. +Delegator (Người ủy quyền) không bị phạt cắt tài sản (slash) vì hành vi xấu, nhưng có một khoản thuế đặt cọc đối với Delegator để ngăn cản việc đưa ra quyết định kém có thể làm tổn hại đến tính toàn vẹn của mạng. -By delegating to an Indexer, Delegators earn a portion of the Indexer's query fees and rewards. The amount of queries an Indexer can process depends on the Indexer's own (and delegated) stake and the price the Indexer charges for each query, so the more stake that is allocated to an Indexer, the more potential queries they can process. +Hướng dẫn này sẽ giải thích cách trở thành delegator hiệu quả trong Mạng The Graph. Delegator chia sẻ thu nhập của giao thức cùng với tất cả những indexer trên cổ phần stake được ủy quyền của họ. Delegator phải sử dụng phán đoán tốt nhất của họ để chọn Indexer dựa trên nhiều yếu tố. Xin lưu ý rằng hướng dẫn này sẽ không bao gồm các bước như thiết lập Metamask đúng cách, vì thông tin đó có sẵn rộng rãi trên internet. Có ba phần trong hướng dẫn này: -## Delegator Guide +## Hướng dẫn Delegator This guide will explain how to be an effective Delegator in the Graph Network. Delegators share earnings of the protocol alongside all Indexers based on their delegated stake. A Delegator must use their best judgment to choose Indexers based on multiple factors. Please note this guide will not go over steps such as setting up Metamask properly, as that information is widely available on the internet. There are three sections in this guide: -- The risks of delegating tokens in The Graph Network -- How to calculate expected returns as a Delegator +- Rủi ro của việc ủy quyền token trong Mạng The Graph +- Cách tính lợi nhuận kỳ vọng với tư cách là delegator - A video guide showing the steps to delegate in the Graph Network UI -## Delegation Risks +## Rủi ro Ủy quyền -Listed below are the main risks of being a Delegator in the protocol. +Lưu ý quan trọng là mỗi lần bạn ủy quyền, bạn sẽ bị tính phí 0.5%. Nghĩa là nếu bạn đang ủy quyền 1000 GRT, bạn sẽ tự động đốt 5 GRT. -### The delegation tax +### Phí ủy quyền Delegators cannot be slashed for bad behavior, but there is a tax on Delegators to disincentivize poor decision-making that could harm the integrity of the network. -It is important to understand that every time you delegate, you will be charged 0.5%. This means if you are delegating 1000 GRT, you will automatically burn 5 GRT. +Bất cứ khi nào Delegator muốn hủy ủy quyền, token của họ phải chịu khoảng thời gian 28 ngày bỏ ràng buộc ủy quyền. Điều này có nghĩa là họ không thể chuyển token của mình hoặc kiếm bất kỳ phần thưởng nào trong 28 ngày. -This means that to be safe, a Delegator should calculate what their return will be by delegating to an Indexer. For example, a Delegator might calculate how many days it will take before they have earned back the 0.5% tax on their delegation. +Một điều nữa cũng cần xem xét là lựa chọn Indexer một cách khôn ngoan. Nếu bạn chọn một Indexer không đáng tin cậy hoặc không hoàn thành tốt công việc, bạn sẽ muốn hủy ủy quyền, khi đó bạn sẽ mất rất nhiều cơ hội kiếm được phần thưởng, cũng tệ như việc đốt đi GRT vậy. -### The delegation unbonding period +### Khoảng thời gian bỏ ràng buộc ủy quyền Whenever a Delegator wants to undelegate, their tokens are subject to a 28-day unbonding period. This means they cannot transfer their tokens, or earn any rewards for 28 days. One thing to consider as well is choosing an Indexer wisely. If you choose an Indexer who was not trustworthy, or not doing a good job, you will want to undelegate, which means you will be losing a lot of opportunities to earn rewards, which can be just as bad as burning GRT.
    - ![Delegation unbonding](/img/Delegation-Unbonding.png) _Note the 0.5% fee in the Delegation UI, as well as the 28 day - unbonding period._ + ![Delegation unbonding](/img/Delegation-Unbonding.png) Lưu ý khoản phí 0.5% trong Giao diện người dùng Ủy quyền, cũng + như khoảng thời gian 28 ngày bỏ ràng buộc ủy quyền.
    -### Choosing a trustworthy Indexer with a fair reward payout for Delegators +### Chọn một indexer đáng tin cậy với phần thưởng hợp lý cho delegator -This is an important part to understand. First let's discuss three very important values, which are the Delegation Parameters. +Như bạn có thể thấy, có rất nhiều suy nghĩ phải cân nhắc khi lựa chọn Indexer phù hợp. Đây là lý do tại sao chúng tôi thực sự khuyên bạn nên khám phá The Graph Discord để xác định ai là Indexers có danh tiếng xã hội và danh tiếng kỹ thuật tốt nhất, để thưởng cho delegator trên cơ sở nhất quán. Nhiều Indexers rất tích cực trong Discord và sẽ sẵn lòng trả lời câu hỏi của bạn. Nhiều người trong số họ đã Indexing trong nhiều tháng testnet và đang cố gắng hết sức để giúp những các delegator kiếm được lợi nhuận tốt, vì nó cải thiện sức khỏe và sự thành công của mạng. Indexing Reward Cut - The indexing reward cut is the portion of the rewards that the Indexer will keep for themselves. That means if it is set to 100%, as a Delegator you will get 0 indexing rewards. If you see 80% in the UI, that means as a Delegator, you will receive 20%. An important note - at the beginning of the network, Indexing Rewards will account for the majority of the rewards.
    - ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) *The top Indexer is giving Delegators 90% of the rewards. The - middle one is giving Delegators 20%. The bottom one is giving Delegators ~83%.* + ![Indexing Reward Cut](/img/Indexing-Reward-Cut.png) Indexer hàng đầu đang trao cho delegator 90% phần thưởng. Những + Indexer tầm trung đang trao cho delegator 20%. Những Indexer dưới cùng đang trao cho delegator khoản 83%.
    -- Query Fee Cut - This works exactly like the Indexing Reward Cut. However, this is specifically for returns on the query fees the Indexer collects. It should be noted that at the start of the network, returns from query fees will be very small compared to the indexing reward. It is recommended to pay attention to the network to determine when the query fees in the network will start to be more significant. +- Phần cắt Phí Truy vấn - cũng như Phần cắt Thưởng Indexing. Tuy nhiên, điều này đặc biệt dành cho lợi nhuận từ phí truy vấn mà Indexer thu thập. Cần lưu ý rằng khi bắt đầu mạng, lợi nhuận từ phí truy vấn sẽ rất nhỏ so với phần thưởng indexing. Bạn nên chú ý đến mạng lưới để xác định khi nào phí truy vấn trong mạng sẽ bắt đầu đáng kể hơn. As you can see, there is a lot of thought that must go into choosing the right Indexer. This is why we highly recommend you explore The Graph Discord to determine who the Indexers are with the best social reputation, and technical reputation, to reward Delegators consistently. Many of the Indexers are very active in Discord and will be happy to answer your questions. Many of them have been Indexing for months in the testnet, and are doing their best to help Delegators earn a good return, as it improves the health and success of the network. -### Calculating Delegators expected return +### Tính toán lợi nhuận dự kiến của Delegator -A Delegator has to consider a lot of factors when determining the return. These include: +Một Delegator phải xem xét rất nhiều yếu tố khi xác định lợi nhuận. Như là: -- A technical Delegator can also look at the Indexer's ability to use the Delegated tokens available to them. If an Indexer is not allocating all the tokens available, they are not earning the maximum profit they could be for themselves or their Delegators. -- Right now in the network an Indexer can choose to close an allocation and collect rewards anytime between 1 and 28 days. So it is possible that an Indexer has a lot of rewards they have not collected yet, and thus, their total rewards are low. This should be taken into consideration in the early days. +- Một Delegator có trình độ kỹ thuật cũng có thể xem xét cách mà Indexer sử dụng các token được Ủy quyền khả dụng cho họ. Nếu một indexer không phân bổ tất cả các token khả dụng, họ sẽ không kiếm được lợi nhuận tối đa mà họ có thể dành cho chính họ hoặc Delegator của họ. +- Ngay bây giờ trong mạng lưới, Indexer có thể chọn đóng phân bổ và nhận phần thưởng bất kỳ lúc nào trong khoảng thời gian từ 1 đến 28 ngày. Vì vậy, có thể một Indexer có rất nhiều phần thưởng mà họ chưa thu thập được, và do đó, tổng phần thưởng của họ thấp. Điều này cần được xem xét từ những ngày đầu. -### Considering the query fee cut and indexing fee cut +### Xem xét Phần cắt Phí Truy vấn và Phần cắt Phí indexing As described in the above sections, you should choose an Indexer that is transparent and honest about setting their Query Fee Cut and Indexing Fee Cuts. A Delegator should also look at the Parameters Cooldown time to see how much of a time buffer they have. After that is done, it is fairly simple to calculate the amount of rewards the Delegators are getting. The formula is: ![Delegation Image 3](/img/Delegation-Reward-Formula.png) -### Considering the Indexer's delegation pool +### Xem xét Delegation pool của Indexer Another thing a Delegator has to consider is what proportion of the Delegation Pool they own. All delegation rewards are shared evenly, with a simple rebalancing of the pool determined by the amount the Delegator has deposited into the pool. This gives the Delegator a share of the pool: @@ -75,13 +75,13 @@ Using this formula, we can see that it is actually possible for an Indexer who i A Delegator can therefore do the math to determine that the Indexer offering 20% to Delegators, is offering a better return. -### Considering the delegation capacity +### Xem xét Delegation Capacity (Năng lực Ủy quyền) Another thing to consider is the delegation capacity. Currently, the Delegation Ratio is set to 16. This means that if an Indexer has staked 1,000,000 GRT, their Delegation Capacity is 16,000,000 GRT of Delegated tokens that they can use in the protocol. Any delegated tokens over this amount will dilute all the Delegator rewards. Imagine an Indexer has 100,000,000 GRT delegated to them, and their capacity is only 16,000,000 GRT. This means effectively, 84,000,000 GRT tokens are not being used to earn tokens. And all the Delegators, and the Indexer, are earning way less rewards than they could be. -Therefore a Delegator should always consider the Delegation Capacity of an Indexer, and factor it into their decision making. +Do đó, Delegator phải luôn xem xét Năng lực Ủy quyền của Indexer và cân nhắc nó trong quá trình ra quyết định của họ. ## Delegator FAQs and Bugs diff --git a/website/pages/vi/network/explorer.mdx b/website/pages/vi/network/explorer.mdx index b3a549900b83..7ad4e0e0184c 100644 --- a/website/pages/vi/network/explorer.mdx +++ b/website/pages/vi/network/explorer.mdx @@ -1,8 +1,8 @@ --- -title: Graph Explorer +title: Trình khám phá Graph --- -Welcome to the Graph Explorer, or as we like to call it, your decentralized portal into the world of subgraphs and network data. 👩🏽‍🚀 The Graph Explorer consists of multiple parts where you can interact with other subgraph developers, dapp developers, Curators, Indexers, and Delegators. For a general overview of the Graph Explorer, check out the video below (or keep reading below): +Chào mừng bạn đến với Graph Explorer, hay như chúng tôi thường gọi, cổng thông tin phi tập trung của bạn vào thế giới subgraphs và dữ liệu mạng. 👩🏽‍🚀 Graph Explorer bao gồm nhiều phần để bạn có thể tương tác với các nhà phát triển subgraph khác, nhà phát triển dapp, Curators, Indexers, và Delegators. Để biết tổng quan chung về Graph Explorer, hãy xem video bên dưới (hoặc tiếp tục đọc bên dưới): @@ -12,24 +12,24 @@ First things first, if you just finished deploying and publishing your subgraph ![Explorer Image 1](/img/Subgraphs-Explorer-Landing.png) -When you click into a subgraph, you’ll be able to test queries in the playground and be able to leverage network details to make informed decisions. You’ll also be able to signal GRT on your own subgraph or the subgraphs of others to make indexers aware of its importance and quality. This is critical because signaling on a subgraph incentivizes it to be indexed, which means that it’ll surface on the network to eventually serve queries. +Khi bạn nhấp vào một subgraph, bạn sẽ có thể thử các truy vấn trong playground và có thể tận dụng chi tiết mạng để đưa ra quyết định sáng suốt. Bạn cũng sẽ có thể báo hiệu GRT trên subgraph của riêng bạn hoặc các subgraph của người khác để làm cho các indexer nhận thức được tầm quan trọng và chất lượng của nó. Điều này rất quan trọng vì việc báo hiệu trên một subgraph khuyến khích nó được lập chỉ mục, có nghĩa là nó sẽ xuất hiện trên mạng để cuối cùng phục vụ các truy vấn. ![Explorer Image 2](/img/Subgraph-Details.png) -On each subgraph’s dedicated page, several details are surfaced. These include: +Trên trang chuyên dụng của mỗi subgraph, một số chi tiết được hiển thị. Bao gồm: -- Signal/Un-signal on subgraphs -- View more details such as charts, current deployment ID, and other metadata -- Switch versions to explore past iterations of the subgraph -- Query subgraphs via GraphQL -- Test subgraphs in the playground -- View the Indexers that are indexing on a certain subgraph -- Subgraph stats (allocations, Curators, etc) -- View the entity who published the subgraph +- Báo hiệu / Hủy báo hiệu trên subgraph +- Xem thêm chi tiết như biểu đồ, ID triển khai hiện tại và siêu dữ liệu khác +- Chuyển đổi giữa các phiên bản để khám phá các lần bản trước đây của subgraph +- Truy vấn subgraph qua GraphQL +- Thử subgraph trong playground +- Xem các Indexers đang lập chỉ mục trên một subgraph nhất định +- Thống kê Subgraph (phân bổ, Curators, v.v.) +- Xem pháp nhân đã xuất bản subgraph ![Explorer Image 3](/img/Explorer-Signal-Unsignal.png) -## Participants +## Những người tham gia Within this tab, you’ll get a bird’s eye view of all the people that are participating in the network activities, such as Indexers, Delegators, and Curators. Below, we’ll go into an in-depth review of what each tab means for you. @@ -37,18 +37,18 @@ Within this tab, you’ll get a bird’s eye view of all the people that are par ![Explorer Image 4](/img/Indexer-Pane.png) -Let’s start with the Indexers. Indexers are the backbone of the protocol, being the ones that stake on subgraphs, index them, and serve queries to anyone consuming subgraphs. In the Indexers table, you’ll be able to see an Indexers’ delegation parameters, their stake, how much they have staked to each subgraph, and how much revenue they have made off of query fees and indexing rewards. Deep dives below: +Hãy bắt đầu với Indexers (Người lập chỉ mục). Các Indexers là xương sống của giao thức, là những người đóng góp vào các subgraph, lập chỉ mục chúng và phục vụ các truy vấn cho bất kỳ ai sử dụng subgraph. Trong bảng Indexers, bạn sẽ có thể thấy các thông số ủy quyền của Indexer, lượng stake của họ, số lượng họ đã stake cho mỗi subgraph và doanh thu mà họ đã kiếm được từ phí truy vấn và phần thưởng indexing. Đi sâu hơn: -- Query Fee Cut - the % of the query fee rebates that the Indexer keeps when splitting with Delegators -- Effective Reward Cut - the indexing reward cut applied to the delegation pool. If it’s negative, it means that the Indexer is giving away part of their rewards. If it’s positive, it means that the Indexer is keeping some of their rewards -- Cooldown Remaining - the time remaining until the Indexer can change the above delegation parameters. Cooldown periods are set up by Indexers when they update their delegation parameters -- Owned - This is the Indexer’s deposited stake, which may be slashed for malicious or incorrect behavior -- Delegated - Stake from Delegators which can be allocated by the Indexer, but cannot be slashed -- Allocated - Stake that Indexers are actively allocating towards the subgraphs they are indexing +- Phần Cắt Phí Truy vấn - là % hoàn phí truy vấn mà Indexer giữ lại khi ăn chia với Delegators +- Phần Cắt Thưởng Hiệu quả - phần thưởng indexing được áp dụng cho nhóm ủy quyền (delegation pool). Nếu là âm, điều đó có nghĩa là Indexer đang cho đi một phần phần thưởng của họ. Nếu là dương, điều đó có nghĩa là Indexer đang giữ lại một số phần thưởng của họ +- Cooldown Remaining (Thời gian chờ còn lại) - thời gian còn lại cho đến khi Indexer có thể thay đổi các thông số ủy quyền ở trên. Thời gian chờ Cooldown được Indexers thiết lập khi họ cập nhật thông số ủy quyền của mình +- Được sở hữu - Đây là tiền stake Indexer đã nạp vào, có thể bị phạt cắt giảm (slashed) nếu có hành vi độc hại hoặc không chính xác +- Được ủy quyền - Lượng stake từ các Delegator có thể được Indexer phân bổ, nhưng không thể bị phạt cắt giảm +- Được phân bổ - phần stake mà Indexers đang tích cực phân bổ cho các subgraph mà họ đang lập chỉ mục - Available Delegation Capacity - the amount of delegated stake the Indexers can still receive before they become over-delegated - Max Delegation Capacity - the maximum amount of delegated stake the Indexer can productively accept. An excess delegated stake cannot be used for allocations or rewards calculations. -- Query Fees - this is the total fees that end users have paid for queries from an Indexer over all time -- Indexer Rewards - this is the total indexer rewards earned by the Indexer and their Delegators over all time. Indexer rewards are paid through GRT issuance. +- Phí Truy vấn - đây là tổng số phí mà người dùng cuối đã trả cho các truy vấn từ Indexer đến hiện tại +- Thưởng Indexer - đây là tổng phần thưởng indexer mà Indexer và các Delegator của họ kiếm được cho đến hiện tại. Phần thưởng Indexer được trả thông qua việc phát hành GRT. Indexers can earn both query fees and indexing rewards. Functionally, this happens when network participants delegate GRT to an Indexer. This enables Indexers to receive query fees and rewards depending on their Indexer parameters. Indexing parameters are set by clicking on the right-hand side of the table, or by going into an Indexer’s profile and clicking the “Delegate” button. @@ -60,11 +60,11 @@ To learn more about how to become an Indexer, you can take a look at the [offici Curators analyze subgraphs to identify which subgraphs are of the highest quality. Once a Curator has found a potentially attractive subgraph, they can curate it by signaling on its bonding curve. In doing so, Curators let Indexers know which subgraphs are high quality and should be indexed. -Curators can be community members, data consumers, or even subgraph developers who signal on their own subgraphs by depositing GRT tokens into a bonding curve. By depositing GRT, Curators mint curation shares of a subgraph. As a result, Curators are eligible to earn a portion of the query fees that the subgraph they have signaled on generates. The bonding curve incentivizes Curators to curate the highest quality data sources. The Curator table in this section will allow you to see: +Curators có thể là các thành viên cộng đồng, người tiêu dùng dữ liệu hoặc thậm chí là nhà phát triển subgraph, những người báo hiệu trên subgraph của chính họ bằng cách nạp token GRT vào một đường cong liên kết. Bằng cách nạp GRT, Curator đúc ra cổ phần curation của một subgraph. Kết quả là, Curators có đủ điều kiện để kiếm một phần phí truy vấn mà subgraph mà họ đã báo hiệu tạo ra. Đường cong liên kết khuyến khích Curators quản lý các nguồn dữ liệu chất lượng cao nhất. Bảng Curator trong phần này sẽ cho phép bạn xem: -- The date the Curator started curating -- The number of GRT that was deposited -- The number of shares a Curator owns +- Ngày Curator bắt đầu curate +- Số GRT đã được nạp +- Số cổ phần một Curator sở hữu ![Explorer Image 6](/img/Curation-Overview.png) @@ -72,42 +72,42 @@ If you want to learn more about the Curator role, you can do so by visiting the ### 3. Delegators -Delegators play a key role in maintaining the security and decentralization of The Graph Network. They participate in the network by delegating (i.e., “staking”) GRT tokens to one or multiple indexers. Without Delegators, Indexers are less likely to earn significant rewards and fees. Therefore, Indexers seek to attract Delegators by offering them a portion of the indexing rewards and query fees that they earn. +Delegators (Người Ủy quyền) đóng một vai trò quan trọng trong việc duy trì tính bảo mật và phân quyền của Mạng The Graph. Họ tham gia vào mạng bằng cách ủy quyền (tức là "staking") token GRT cho một hoặc nhiều indexer. Không có những Delegator, các Indexer ít có khả năng kiếm được phần thưởng và phí đáng kể. Do đó, Indexer tìm cách thu hút Delegator bằng cách cung cấp cho họ một phần của phần thưởng lập chỉ mục và phí truy vấn mà họ kiếm được. Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![Explorer Image 7](/img/Delegation-Overview.png) -The Delegators table will allow you to see the active Delegators in the community, as well as metrics such as: +Bảng Delegators sẽ cho phép bạn xem các Delegator đang hoạt động trong cộng đồng, cũng như các chỉ số như: -- The number of Indexers a Delegator is delegating towards -- A Delegator’s original delegation -- The rewards they have accumulated but have not withdrawn from the protocol -- The realized rewards they withdrew from the protocol -- Total amount of GRT they have currently in the protocol -- The date they last delegated at +- Số lượng Indexers mà một Delegator đang ủy quyền cho +- Ủy quyền ban đầu của Delegator +- Phần thưởng họ đã tích lũy nhưng chưa rút khỏi giao thức +- Phần thưởng đã ghi nhận ra mà họ rút khỏi giao thức +- Tổng lượng GRT mà họ hiện có trong giao thức +- Ngày họ ủy quyền lần cuối cùng If you want to learn more about how to become a Delegator, look no further! All you have to do is to head over to the [official documentation](/network/delegating) or [The Graph Academy](https://docs.thegraph.academy/official-docs/delegator/choosing-indexers). -## Network +## Mạng lưới -In the Network section, you will see global KPIs as well as the ability to switch to a per-epoch basis and analyze network metrics in more detail. These details will give you a sense of how the network is performing over time. +Trong phần Mạng lưới, bạn sẽ thấy các KPI toàn cầu cũng như khả năng chuyển sang cơ sở từng epoch và phân tích các chỉ số mạng chi tiết hơn. Những chi tiết này sẽ cho bạn biết mạng hoạt động như thế nào theo thời gian. -### Activity +### Hoạt động -The activity section has all the current network metrics as well as some cumulative metrics over time. Here you can see things like: +Phần hoạt động có tất cả các chỉ số mạng hiện tại cũng như một số chỉ số tích lũy theo thời gian. Ở đây bạn có thể thấy những thứ như: -- The current total network stake -- The stake split between the Indexers and their Delegators -- Total supply, minted, and burned GRT since the network inception -- Total Indexing rewards since the inception of the protocol -- Protocol parameters such as curation reward, inflation rate, and more -- Current epoch rewards and fees +- Tổng stake mạng hiện tại +- Phần chia stake giữa Indexer và các Delegator của họ +- Tổng cung GRT, lượng được đúc và đốt kể từ khi mạng lưới thành lập +- Tổng phần thưởng Indexing kể từ khi bắt đầu giao thức +- Các thông số giao thức như phần thưởng curation, tỷ lệ lạm phát,... +- Phần thưởng và phí của epoch hiện tại -A few key details that are worth mentioning: +Một vài chi tiết quan trọng đáng được đề cập: -- **Query fees represent the fees generated by the consumers**, and they can be claimed (or not) by the Indexers after a period of at least 7 epochs (see below) after their allocations towards the subgraphs have been closed and the data they served has been validated by the consumers. -- **Indexing rewards represent the amount of rewards the Indexers claimed from the network issuance during the epoch.** Although the protocol issuance is fixed, the rewards only get minted once the Indexers close their allocations towards the subgraphs they’ve been indexing. Thus the per-epoch number of rewards varies (ie. during some epochs, Indexers might’ve collectively closed allocations that have been open for many days). +- **Phí truy vấn đại diện cho phí do người tiêu dùng tạo ra**, và chúng có thể được Indexer yêu cầu (hoặc không) sau một khoảng thời gian ít nhất 7 epochs (xem bên dưới) sau khi việc phân bổ của họ cho các subgraph đã được đóng lại và dữ liệu mà chúng cung cấp đã được người tiêu dùng xác thực. +- **Phần thưởng Indexing đại diện cho số phần thưởng mà Indexer đã yêu cầu được từ việc phát hành mạng trong epoch đó.** Mặc dù việc phát hành giao thức đã được cố định, nhưng phần thưởng chỉ nhận được sau khi Indexer đóng phân bổ của họ cho các subgraph mà họ đã lập chỉ mục. Do đó, số lượng phần thưởng theo từng epoch khác nhau (nghĩa là trong một số epoch, Indexer có thể đã đóng chung các phân bổ đã mở trong nhiều ngày). ![Explorer Image 8](/img/Network-Stats.png) @@ -115,89 +115,89 @@ A few key details that are worth mentioning: In the Epochs section, you can analyze on a per-epoch basis, metrics such as: -- Epoch start or end block -- Query fees generated and indexing rewards collected during a specific epoch -- Epoch status, which refers to the query fee collection and distribution and can have different states: - - The active epoch is the one in which Indexers are currently allocating stake and collecting query fees - - The settling epochs are the ones in which the state channels are being settled. This means that the Indexers are subject to slashing if the consumers open disputes against them. - - The distributing epochs are the epochs in which the state channels for the epochs are being settled and Indexers can claim their query fee rebates. - - The finalized epochs are the epochs that have no query fee rebates left to claim by the Indexers, thus being finalized. +- Khối bắt đầu hoặc kết thúc của Epoch +- Phí truy vấn được tạo và phần thưởng indexing được thu thập trong một epoch cụ thể +- Trạng thái Epoch, đề cập đến việc thu và phân phối phí truy vấn và có thể có các trạng thái khác nhau: + - Epoch đang hoạt động là epoch mà Indexer hiện đang phân bổ cổ phần và thu phí truy vấn + - Epoch đang giải quyết là những epoch mà các kênh trạng thái đang được giải quyết. Điều này có nghĩa là Indexers có thể bị phạt cắt giảm nếu người tiêu dùng công khai tranh chấp chống lại họ. + - Epoch đang phân phối là epoch trong đó các kênh trạng thái cho các epoch đang được giải quyết và Indexer có thể yêu cầu hoàn phí truy vấn của họ. + - Epoch được hoàn tất là những epoch không còn khoản hoàn phí truy vấn nào để Indexer yêu cầu, do đó sẽ được hoàn thiện. ![Explorer Image 9](/img/Epoch-Stats.png) -## Your User Profile +## Hồ sơ Người dùng của bạn Now that we’ve talked about the network stats, let’s move on to your personal profile. Your personal profile is the place for you to see your network activity, no matter how you’re participating on the network. Your crypto wallet will act as your user profile, and with the User Dashboard, you’ll be able to see: -### Profile Overview +### Tổng quan Hồ sơ -This is where you can see any current actions you took. This is also where you can find your profile information, description, and website (if you added one). +Đây là nơi bạn có thể xem bất kỳ hành động hiện tại nào bạn đã thực hiện. Đây cũng là nơi bạn có thể tìm thấy thông tin hồ sơ, mô tả và trang web của mình (nếu bạn đã thêm). ![Explorer Image 10](/img/Profile-Overview.png) -### Subgraphs Tab +### Tab Subgraphs -If you click into the Subgraphs tab, you’ll see your published subgraphs. This will not include any subgraphs deployed with the CLI for testing purposes – subgraphs will only show up when they are published to the decentralized network. +Nếu bạn nhấp vào tab Subgraphs, bạn sẽ thấy các subgraph đã xuất bản của mình. Điều này sẽ không bao gồm bất kỳ subgraph nào được triển khai với CLI cho mục đích thử nghiệm - các subgraph sẽ chỉ hiển thị khi chúng được xuất bản lên mạng phi tập trung. ![Explorer Image 11](/img/Subgraphs-Overview.png) -### Indexing Tab +### Tab Indexing -If you click into the Indexing tab, you’ll find a table with all the active and historical allocations towards the subgraphs, as well as charts that you can analyze and see your past performance as an Indexer. +Nếu bạn nhấp vào tab Indexing, bạn sẽ tìm thấy một bảng với tất cả các phân bổ hiện hoạt và lịch sử cho các subgraph, cũng như các biểu đồ mà bạn có thể phân tích và xem hiệu suất trước đây của mình với tư cách là Indexer. -This section will also include details about your net Indexer rewards and net query fees. You’ll see the following metrics: +Phần này cũng sẽ bao gồm thông tin chi tiết về phần thưởng Indexer ròng của bạn và phí truy vấn ròng. Bạn sẽ thấy các số liệu sau: -- Delegated Stake - the stake from Delegators that can be allocated by you but cannot be slashed +- Stake được ủy quyền - phần stake từ Delegator có thể được bạn phân bổ nhưng không thể bị phạt cắt giảm (slashed) - Total Query Fees - the total fees that users have paid for queries served by you over time -- Indexer Rewards - the total amount of Indexer rewards you have received, in GRT -- Fee Cut - the % of query fee rebates that you will keep when you split with Delegators -- Rewards Cut - the % of Indexer rewards that you will keep when splitting with Delegators -- Owned - your deposited stake, which could be slashed for malicious or incorrect behavior +- Phần thưởng Indexer - tổng số phần thưởng Indexer bạn đã nhận được, tính bằng GRT +- Phần Cắt Phí - lượng % hoàn phí phí truy vấn mà bạn sẽ giữ lại khi ăn chia với Delegator +- Phần Cắt Thưởng - lượng % phần thưởng Indexer mà bạn sẽ giữ lại khi ăn chia với Delegator +- Được sở hữu - số stake đã nạp của bạn, có thể bị phạt cắt giảm (slashed) vì hành vi độc hại hoặc không chính xác ![Explorer Image 12](/img/Indexer-Stats.png) -### Delegating Tab +### Tab Delegating -Delegators are important to the Graph Network. A Delegator must use their knowledge to choose an Indexer that will provide a healthy return on rewards. Here you can find details of your active and historical delegations, along with the metrics of the Indexers that you delegated towards. +Delegator rất quan trọng đối với Mạng The Graph. Một Delegator phải sử dụng kiến thức của họ để chọn một Indexer sẽ mang lại lợi nhuận lành mạnh từ các phần thưởng. Tại đây, bạn có thể tìm thấy thông tin chi tiết về các ủy quyền đang hoạt động và trong lịch sử của mình, cùng với các chỉ số của Indexer mà bạn đã ủy quyền. -In the first half of the page, you can see your delegation chart, as well as the rewards-only chart. To the left, you can see the KPIs that reflect your current delegation metrics. +Trong nửa đầu của trang, bạn có thể thấy biểu đồ ủy quyền của mình, cũng như biểu đồ chỉ có phần thưởng. Ở bên trái, bạn có thể thấy các KPI phản ánh các chỉ số ủy quyền hiện tại của bạn. -The Delegator metrics you’ll see here in this tab include: +Các chỉ số Delegator mà bạn sẽ thấy ở đây trong tab này bao gồm: - Total delegation rewards -- Total unrealized rewards -- Total realized rewards +- Tổng số phần thưởng chưa ghi nhận +- Tổng số phần thưởng đã ghi được -In the second half of the page, you have the delegations table. Here you can see the Indexers that you delegated towards, as well as their details (such as rewards cuts, cooldown, etc). +Trong nửa sau của trang, bạn có bảng ủy quyền. Tại đây, bạn có thể thấy các Indexer mà bạn đã ủy quyền, cũng như thông tin chi tiết của chúng (chẳng hạn như phần cắt thưởng, thời gian chờ, v.v.). -With the buttons on the right side of the table, you can manage your delegation - delegate more, undelegate, or withdraw your delegation after the thawing period. +Với các nút ở bên phải của bảng, bạn có thể quản lý ủy quyền của mình - ủy quyền nhiều hơn, hủy bỏ hoặc rút lại ủy quyền của bạn sau khoảng thời gian rã đông (thawing period). -Keep in mind that this chart is horizontally scrollable, so if you scroll all the way to the right, you can also see the status of your delegation (delegating, undelegating, withdrawable). +Lưu ý rằng biểu đồ này có thể cuộn theo chiều ngang, vì vậy nếu bạn cuộn hết cỡ sang bên phải, bạn cũng có thể thấy trạng thái ủy quyền của mình (ủy quyền, hủy ủy quyền, có thể rút lại). ![Explorer Image 13](/img/Delegation-Stats.png) -### Curating Tab +### Tab Curating -In the Curation tab, you’ll find all the subgraphs you’re signaling on (thus enabling you to receive query fees). Signaling allows Curators to highlight to Indexers which subgraphs are valuable and trustworthy, thus signaling that they need to be indexed on. +Trong tab Curation, bạn sẽ tìm thấy tất cả các subgraph mà bạn đang báo hiệu (do đó cho phép bạn nhận phí truy vấn). Báo hiệu cho phép Curator đánh dấu cho Indexer biết những subgraph nào có giá trị và đáng tin cậy, do đó báo hiệu rằng chúng cần được lập chỉ mục. -Within this tab, you’ll find an overview of: +Trong tab này, bạn sẽ tìm thấy tổng quan về: -- All the subgraphs you're curating on with signal details -- Share totals per subgraph -- Query rewards per subgraph -- Updated at date details +- Tất cả các subgraph bạn đang quản lý với các chi tiết về tín hiệu +- Tổng cổ phần trên mỗi subgraph +- Phần thưởng truy vấn cho mỗi subgraph +- Chi tiết ngày được cập nhật ![Explorer Image 14](/img/Curation-Stats.png) -## Your Profile Settings +## Cài đặt Hồ sơ của bạn -Within your user profile, you’ll be able to manage your personal profile details (like setting up an ENS name). If you’re an Indexer, you have even more access to settings at your fingertips. In your user profile, you’ll be able to set up your delegation parameters and operators. +Trong hồ sơ người dùng của mình, bạn sẽ có thể quản lý chi tiết hồ sơ cá nhân của mình (như thiết lập tên ENS). Nếu bạn là Indexer, bạn thậm chí có nhiều quyền truy cập hơn vào các cài đặt trong tầm tay của mình. Trong hồ sơ người dùng của mình, bạn sẽ có thể thiết lập các tham số ủy quyền và operator của mình. -- Operators take limited actions in the protocol on the Indexer's behalf, such as opening and closing allocations. Operators are typically other Ethereum addresses, separate from their staking wallet, with gated access to the network that Indexers can personally set -- Delegation parameters allow you to control the distribution of GRT between you and your Delegators. +- Operators (Người vận hành) thực hiện các hành động được hạn chế trong giao thức thay mặt cho Indexer, chẳng hạn như mở và đóng phân bổ. Operators thường là các địa chỉ Ethereum khác, tách biệt với ví đặt staking của họ, với quyền truy cập được kiểm soát vào mạng mà Indexer có thể cài đặt cá nhân +- Tham số ủy quyền cho phép bạn kiểm soát việc phân phối GRT giữa bạn và các Delegator của bạn. ![Explorer Image 15](/img/Profile-Settings.png) -As your official portal into the world of decentralized data, The Graph Explorer allows you to take a variety of actions, no matter your role in the network. You can get to your profile settings by opening the dropdown menu next to your address, then clicking on the Settings button. +Là cổng thông tin chính thức của bạn vào thế giới dữ liệu phi tập trung, Graph Explorer cho phép bạn thực hiện nhiều hành động khác nhau, bất kể vai trò của bạn trong mạng. Bạn có thể truy cập cài đặt hồ sơ của mình bằng cách mở menu thả xuống bên cạnh địa chỉ của bạn, sau đó nhấp vào nút Cài đặt.
    ![Wallet details](/img/Wallet-Details.png)
    diff --git a/website/pages/vi/network/indexing.mdx b/website/pages/vi/network/indexing.mdx index c40fd87a22fe..8365be2856b1 100644 --- a/website/pages/vi/network/indexing.mdx +++ b/website/pages/vi/network/indexing.mdx @@ -2,15 +2,15 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. -Indexers select subgraphs to index based on the subgraph’s curation signal, where Curators stake GRT in order to indicate which subgraphs are high-quality and should be prioritized. Consumers (eg. applications) can also set parameters for which Indexers process queries for their subgraphs and set preferences for query fee pricing. +Indexer chọn các subgraph để index dựa trên tín hiệu curation của subgraph, trong đó Curator stake GRT để chỉ ra subgraph nào có chất lượng cao và cần được ưu tiên. Bên tiêu dùng (ví dụ: ứng dụng) cũng có thể đặt các tham số (parameter) mà Indexer xử lý các truy vấn cho các subgraph của họ và đặt các tùy chọn cho việc định giá phí truy vấn. -## FAQ +## CÂU HỎI THƯỜNG GẶP ### What is the minimum stake required to be an Indexer on the network? @@ -28,21 +28,21 @@ Indexing rewards come from protocol inflation which is set to 3% annual issuance Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. -### What is a proof of indexing (POI)? +### Bằng chứng lập chỉ mục (proof of indexing - POI) là gì? POIs are used in the network to verify that an Indexer is indexing the subgraphs they have allocated on. A POI for the first block of the current epoch must be submitted when closing an allocation for that allocation to be eligible for indexing rewards. A POI for a block is a digest for all entity store transactions for a specific subgraph deployment up to and including that block. -### When are indexing rewards distributed? +### Khi nào Phần thưởng indexing được phân phối? Allocations are continuously accruing rewards while they're active and allocated within 28 epochs. Rewards are collected by the Indexers, and distributed whenever their allocations are closed. That happens either manually, whenever the Indexer wants to force close them, or after 28 epochs a Delegator can close the allocation for the Indexer, but this results in no rewards. 28 epochs is the max allocation lifetime (right now, one epoch lasts for ~24h). ### Can pending indexing rewards be monitored? -The RewardsManager contract has a read-only [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) function that can be used to check the pending rewards for a specific allocation. +Hợp đồng RewardsManager có có một chức năng [getRewards](https://github.com/graphprotocol/contracts/blob/master/contracts/rewards/RewardsManager.sol#L317) chỉ đọc có thể được sử dụng để kiểm tra phần thưởng đang chờ để phân bổ cụ thể. -Many of the community-made dashboards include pending rewards values and they can be easily checked manually by following these steps: +Nhiều trang tổng quan (dashboard) do cộng đồng tạo bao gồm các giá trị phần thưởng đang chờ xử lý và bạn có thể dễ dàng kiểm tra chúng theo cách thủ công bằng cách làm theo các bước sau: -1. Query the [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) to get the IDs for all active allocations: +1. Truy vấn [mainnet subgraph](https://thegraph.com/hosted-service/subgraph/graphprotocol/graph-network-mainnet) để nhận ID cho tất cả phần phân bổ đang hoạt động: ```graphql query indexerAllocations { @@ -58,66 +58,66 @@ query indexerAllocations { } ``` -Use Etherscan to call `getRewards()`: +Sử dụng Etherscan để gọi `getRewards()`: -- Navigate to [Etherscan interface to Rewards contract](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) +- Điều hướng đến [giao diện Etherscan đến hợp đồng Rewards](https://etherscan.io/address/0x9Ac758AB77733b4150A901ebd659cbF8cB93ED66#readProxyContract) -* To call `getRewards()`: +* Để gọi `getRewards()`: - Expand the **10. getRewards** dropdown. - - Enter the **allocationID** in the input. + - Nhập **allocationID** trong đầu vào. - Click the **Query** button. -### What are disputes and where can I view them? +### Tranh chấp là gì và tôi có thể xem chúng ở đâu? -Indexer's queries and allocations can both be disputed on The Graph during the dispute period. The dispute period varies, depending on the type of dispute. Queries/attestations have 7 epochs dispute window, whereas allocations have 56 epochs. After these periods pass, disputes cannot be opened against either of allocations or queries. When a dispute is opened, a deposit of a minimum of 10,000 GRT is required by the Fishermen, which will be locked until the dispute is finalized and a resolution has been given. Fisherman are any network participants that open disputes. +Các truy vấn và phần phân bổ của Indexer đều có thể bị tranh chấp trên The Graph trong thời gian tranh chấp. Thời hạn tranh chấp khác nhau, tùy thuộc vào loại tranh chấp. Truy vấn / chứng thực có cửa sổ tranh chấp 7 epoch (kỷ nguyên), trong khi phần phân bổ có 56 epoch. Sau khi các giai đoạn này trôi qua, không thể mở các tranh chấp đối với phần phân bổ hoặc truy vấn. Khi một tranh chấp được mở ra, các Fisherman yêu cầu một khoản stake tối thiểu là 10.000 GRT, sẽ bị khóa cho đến khi tranh chấp được hoàn tất và giải pháp đã được đưa ra. Fisherman là bất kỳ người tham gia mạng nào mà đã mở ra tranh chấp. -Disputes have **three** possible outcomes, so does the deposit of the Fishermen. +Tranh chấp có **ba** kết quả có thể xảy ra, phần tiền gửi của Fisherman cũng vậy. -- If the dispute is rejected, the GRT deposited by the Fishermen will be burned, and the disputed Indexer will not be slashed. -- If the dispute is settled as a draw, the Fishermen's deposit will be returned, and the disputed Indexer will not be slashed. -- If the dispute is accepted, the GRT deposited by the Fishermen will be returned, the disputed Indexer will be slashed and the Fishermen will earn 50% of the slashed GRT. +- Nếu tranh chấp bị từ chối, GRT do Fisherman gửi sẽ bị đốt, và Indexer tranh chấp sẽ không bị phạt cắt giảm (slashed). +- Nếu tranh chấp được giải quyết dưới dạng hòa, tiền gửi của Fisherman sẽ được trả lại, và Indexer bị tranh chấp sẽ không bị phạt cắt giảm (slashed). +- Nếu tranh chấp được chấp nhận, lượng GRT do Fisherman đã gửi sẽ được trả lại, Indexer bị tranh chấp sẽ bị cắt và Fisherman sẽ kiếm được 50% GRT đã bị phạt cắt giảm (slashed). -Disputes can be viewed in the UI in an Indexer's profile page under the `Disputes` tab. +Tranh chấp có thể được xem trong giao diện người dùng trong trang hồ sơ của Indexer trong mục `Tranh chấp`. -### What are query fee rebates and when are they distributed? +### Các khoản hoàn phí truy vấn là gì và chúng được phân phối khi nào? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. -### What is query fee cut and indexing reward cut? +### Cắt giảm phí truy vấn và cắt giảm phần thưởng indexing là gì? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? -Indexers may differentiate themselves by applying advanced techniques for making subgraph indexing decisions but to give a general idea we'll discuss several key metrics used to evaluate subgraphs in the network: +Indexer có thể tự phân biệt bản thân bằng cách áp dụng các kỹ thuật nâng cao để đưa ra quyết định index subgraph nhưng để đưa ra ý tưởng chung, chúng ta sẽ thảo luận một số số liệu chính được sử dụng để đánh giá các subgraph trong mạng: -- **Curation signal** - The proportion of network curation signal applied to a particular subgraph is a good indicator of the interest in that subgraph, especially during the bootstrap phase when query voluming is ramping up. +- **Tín hiệu curation** - Tỷ lệ tín hiệu curation mạng được áp dụng cho một subgraph cụ thể là một chỉ báo tốt về mức độ quan tâm đến subgraph đó, đặc biệt là trong giai đoạn khởi động khi khối lượng truy vấn đang tăng lên. -- **Query fees collected** - The historical data for volume of query fees collected for a specific subgraph is a good indicator of future demand. +- **Phí truy vấn đã thu** - Dữ liệu lịch sử về khối lượng phí truy vấn được thu thập cho một subgraph cụ thể là một chỉ báo tốt về nhu cầu trong tương lai. - **Amount staked** - Monitoring the behavior of other Indexers or looking at proportions of total stake allocated towards specific subgraphs can allow an Indexer to monitor the supply side for subgraph queries to identify subgraphs that the network is showing confidence in or subgraphs that may show a need for more supply. -- **Subgraphs with no indexing rewards** - Some subgraphs do not generate indexing rewards mainly because they are using unsupported features like IPFS or because they are querying another network outside of mainnet. You will see a message on a subgraph if it is not generating indexing rewards. +- **Subgraph không có phần thưởng indexing** - Một số subgraph không tạo ra phần thưởng indexing chủ yếu vì chúng đang sử dụng các tính năng không được hỗ trợ như IPFS hoặc vì chúng đang truy vấn một mạng khác bên ngoài mainnet. Bạn sẽ thấy một thông báo trên một subgraph nếu nó không tạo ra phần thưởng indexing. -### What are the hardware requirements? +### Có các yêu cầu gì về phần cứng (hardware)? -- **Small** - Enough to get started indexing several subgraphs, will likely need to be expanded. -- **Standard** - Default setup, this is what is used in the example k8s/terraform deployment manifests. +- **Nhỏ** - Đủ để bắt đầu index một số subgraph, có thể sẽ cần được mở rộng. +- **Tiêu chuẩn** - Thiết lập mặc định, đây là những gì được sử dụng trong bản kê khai (manifest) triển khai mẫu k8s/terraform. - **Medium** - Production Indexer supporting 100 subgraphs and 200-500 requests per second. -- **Large** - Prepared to index all currently used subgraphs and serve requests for the related traffic. +- **Lớn** - Được chuẩn bị để index tất cả các subgraph hiện đang được sử dụng và phục vụ các yêu cầu cho lưu lượng truy cập liên quan. -| Setup | Postgres
    (CPUs) | Postgres
    (memory in GBs) | Postgres
    (disk in TBs) | VMs
    (CPUs) | VMs
    (memory in GBs) | +| Cài đặt | Postgres
    (CPUs) | Postgres
    (bộ nhớ tính bằng GB) | Postgres
    (đĩa tính bằng TB) | VMs
    (CPUs) | VMs
    (bộ nhớ tính bằng GB) | | --- | :-: | :-: | :-: | :-: | :-: | -| Small | 4 | 8 | 1 | 4 | 16 | -| Standard | 8 | 30 | 1 | 12 | 48 | -| Medium | 16 | 64 | 2 | 32 | 64 | -| Large | 72 | 468 | 3.5 | 48 | 184 | +| Nhỏ | 4 | 8 | 1 | 4 | 16 | +| Tiêu chuẩn | 8 | 30 | 1 | 12 | 48 | +| Trung bình | 16 | 64 | 2 | 32 | 64 | +| Lớn | 72 | 468 | 3.5 | 48 | 184 | ### What are some basic security precautions an Indexer should take? @@ -125,7 +125,7 @@ Indexers may differentiate themselves by applying advanced techniques for making - **Firewall** - Only the Indexer service needs to be exposed publicly and particular attention should be paid to locking down admin ports and database access: the Graph Node JSON-RPC endpoint (default port: 8030), the Indexer management API endpoint (default port: 18000), and the Postgres database endpoint (default port: 5432) should not be exposed. -## Infrastructure +## Cơ sở hạ tầng At the center of an Indexer's infrastructure is the Graph Node which monitors the indexed networks, extracts and loads data per a subgraph definition and serves it as a [GraphQL API](/about/#how-the-graph-works). The Graph Node needs to be connected to an endpoint exposing data from each indexed network; an IPFS node for sourcing data; a PostgreSQL database for its store; and Indexer components which facilitate its interactions with the network. @@ -141,56 +141,56 @@ At the center of an Indexer's infrastructure is the Graph Node which monitors th - **Prometheus metrics server** - The Graph Node and Indexer components log their metrics to the metrics server. -Note: To support agile scaling, it is recommended that query and indexing concerns are separated between different sets of nodes: query nodes and index nodes. +Lưu ý: Để hỗ trợ mở rộng quy mô nhanh, bạn nên tách các mối quan tâm về truy vấn và indexing giữa các nhóm node khác nhau: node truy vấn và node index. -### Ports overview +### Tổng quan về các cổng > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC and the Indexer management endpoints detailed below. #### Graph Node -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | Máy chủ GraphQL HTTP
    (cho các truy vấn subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (cho các đăng ký subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (để quản lý triển khai) | / | --admin-port | - | +| 8030 | API trạng thái lập chỉ mục Subgraph | /graphql | --index-node-port | - | +| 8040 | Số liệu Prometheus | /metrics | --metrics-port | - | -#### Indexer Service +#### Dịch vụ Indexer -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | | --- | --- | --- | --- | --- | -| 7600 | GraphQL HTTP server
    (for paid subgraph queries) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | -| 7300 | Prometheus metrics | /metrics | --metrics-port | - | +| 7600 | Máy chủ GraphQL HTTP
    (cho các truy vấn subgraph có trả phí) | /subgraphs/id/...
    /status
    /channel-messages-inbox | --port | `INDEXER_SERVICE_PORT` | +| 7300 | Số liệu Prometheus | /metrics | --metrics-port | - | -#### Indexer Agent +#### Đại lý Indexer -| Port | Purpose | Routes | CLI Argument | Environment Variable | -| ---- | ---------------------- | ------ | ------------------------- | --------------------------------------- | -| 8000 | Indexer management API | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | +| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | +| ---- | ------------------- | ----- | ------------------------- | --------------------------------------- | +| 8000 | API quản lý Indexer | / | --indexer-management-port | `INDEXER_AGENT_INDEXER_MANAGEMENT_PORT` | -### Setup server infrastructure using Terraform on Google Cloud +### Thiết lập cơ sở hạ tầng máy chủ bằng Terraform trên Google Cloud > Note: Indexers can alternatively use AWS, Microsoft Azure, or Alibaba. -#### Install prerequisites +#### Cài đặt điều kiện tiên quyết - Google Cloud SDK -- Kubectl command line tool +- Công cụ dòng lệnh Kubectl - Terraform -#### Create a Google Cloud Project +#### Tạo một dự án Google Cloud - Clone or navigate to the Indexer repository. -- Navigate to the ./terraform directory, this is where all commands should be executed. +- Điều hướng đến thư mục ./terraform, đây là nơi tất cả các lệnh sẽ được thực thi. ```sh cd terraform ``` -- Authenticate with Google Cloud and create a new project. +- Xác thực với Google Cloud và tạo một dự án mới. ```sh gcloud auth login @@ -198,9 +198,9 @@ project= gcloud projects create --enable-cloud-apis $project ``` -- Use the Google Cloud Console's billing page to enable billing for the new project. +- Sử dụng \[billing page\](billing page) của Google Cloud Consolde để cho phép thanh toán cho dự án mới. -- Create a Google Cloud configuration. +- Tạo một cấu hình Google Cloud. ```sh proj_id=$(gcloud projects list --format='get(project_id)' --filter="name=$project") @@ -210,7 +210,7 @@ gcloud config set compute/region us-central1 gcloud config set compute/zone us-central1-a ``` -- Enable required Google Cloud APIs. +- Bật các API Google Cloud được yêu cầu. ```sh gcloud services enable compute.googleapis.com @@ -219,7 +219,7 @@ gcloud services enable servicenetworking.googleapis.com gcloud services enable sqladmin.googleapis.com ``` -- Create a service account. +- Tạo một tài khoản dịch vụ. ```sh svc_name= @@ -237,7 +237,7 @@ gcloud projects add-iam-policy-binding $proj_id \ --role roles/editor ``` -- Enable peering between database and Kubernetes cluster that will be created in the next step. +- Bật tính năng ngang hàng (peering) giữa cơ sở dữ liệu và cụm Kubernetes sẽ được tạo trong bước tiếp theo. ```sh gcloud compute addresses create google-managed-services-default \ @@ -251,7 +251,7 @@ gcloud services vpc-peerings connect \ --ranges=google-managed-services-default ``` -- Create minimal terraform configuration file (update as needed). +- Tạo tệp cấu hình terraform tối thiểu (cập nhật nếu cần). ```sh indexer= @@ -262,24 +262,24 @@ database_password = "" EOF ``` -#### Use Terraform to create infrastructure +#### Sử dụng Terraform để tạo cơ sở hạ tầng -Before running any commands, read through [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) and create a file `terraform.tfvars` in this directory (or modify the one we created in the last step). For each variable where you want to override the default, or where you need to set a value, enter a setting into `terraform.tfvars`. +Trước khi chạy bất kỳ lệnh nào, hãy đọc qua [variables.tf](https://github.com/graphprotocol/indexer/blob/main/terraform/variables.tf) và tạo một tệp `terraform.tfvars` trong thư mục này (hoặc sửa đổi thư mục chúng ta đã tạo ở bước vừa rồi). Đối với mỗi biến mà bạn muốn ghi đè mặc định hoặc nơi bạn cần đặt giá trị, hãy nhập cài đặt vào `terraform.tfvars`. -- Run the following commands to create the infrastructure. +- Chạy các lệnh sau để tạo cơ sở hạ tầng. ```sh -# Install required plugins +# Cài đặt các Plugins được yêu cầu terraform init -# View plan for resources to be created +# Xem kế hoạch cho các tài nguyên sẽ được tạo terraform plan -# Create the resources (expect it to take up to 30 minutes) +# Tạo tài nguyên (dự kiến mất đến 30 phút) terraform apply ``` -Download credentials for the new cluster into `~/.kube/config` and set it as your default context. +Tải xuống thông tin đăng nhập cho cụm mới vào `~/.kube/config` và đặt nó làm ngữ cảnh mặc định của bạn. ```sh gcloud container clusters get-credentials $indexer @@ -289,19 +289,19 @@ kubectl config use-context $(kubectl config get-contexts --output='name' #### Creating the Kubernetes components for the Indexer -- Copy the directory `k8s/overlays` to a new directory `$dir,` and adjust the `bases` entry in `$dir/kustomization.yaml` so that it points to the directory `k8s/base`. +- Sao chép thư mục `k8s/overlays` đến một thư mục mới `$dir,` và điều chỉnh `bases` vào trong `$dir/kustomization.yaml` để nó chỉ đến thư mục `k8s/base`. - Read through all the files in `$dir` and adjust any values as indicated in the comments. -Deploy all resources with `kubectl apply -k $dir`. +Triển khai tất cả các tài nguyên với `kubectl apply -k $dir`. ### Graph Node [Graph Node](https://github.com/graphprotocol/graph-node) is an open source Rust implementation that event sources the Ethereum blockchain to deterministically update a data store that can be queried via the GraphQL endpoint. Developers use subgraphs to define their schema, and a set of mappings for transforming the data sourced from the block chain and the Graph Node handles syncing the entire chain, monitoring for new blocks, and serving it via a GraphQL endpoint. -#### Getting started from source +#### Bắt đầu từ nguồn -#### Install prerequisites +#### Cài đặt điều kiện tiên quyết - **Rust** @@ -309,15 +309,15 @@ Deploy all resources with `kubectl apply -k $dir`. - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Yêu cầu bổ sung cho người dùng Ubuntu** - Để chạy Graph Node trên Ubuntu, có thể cần một số gói bổ sung. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Cài đặt -1. Start a PostgreSQL database server +1. Khởi động máy chủ cơ sở dữ liệu PostgreSQL ```sh initdb -D .postgres @@ -325,9 +325,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Nhân bản [Graph Node](https://github.com/graphprotocol/graph-node) repo và xây dựng nguồn bằng cách chạy `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Bây giờ tất cả các phụ thuộc đã được thiết lập, hãy khởi động Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -336,48 +336,48 @@ cargo run -p graph-node --release -- \ --ipfs https://ipfs.network.thegraph.com ``` -#### Getting started using Docker +#### Bắt đầu sử dụng Docker -#### Prerequisites +#### Điều kiện tiên quyết -- **Ethereum node** - By default, the docker compose setup will use mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) to connect to the Ethereum node on your host machine. You can replace this network name and url by updating `docker-compose.yaml`. +- **Ethereum node** - Theo mặc định, thiết lập soạn thư docker sẽ sử dụng mainnet: [http://host.docker.internal:8545](http://host.docker.internal:8545) để kết nối với node Ethereum trên máy chủ của bạn. Bạn có thể thay thế tên và url mạng này bằng cách cập nhật `docker-compose.yaml`. -#### Setup +#### Cài đặt -1. Clone Graph Node and navigate to the Docker directory: +1. Nhân bản Graph Node và điều hướng đến thư mục Docker: ```sh git clone https://github.com/graphprotocol/graph-node cd graph-node/docker ``` -2. For linux users only - Use the host IP address instead of `host.docker.internal` in the `docker-compose.yaml`using the included script: +2. Chỉ dành cho người dùng linux - Sử dụng địa chỉ IP máy chủ thay vì `host.docker.internal` trong `docker-compose.yaml` bằng cách sử dụng tập lệnh bao gồm: ```sh ./setup.sh ``` -3. Start a local Graph Node that will connect to your Ethereum endpoint: +3. Bắt đầu một Graph Node cục bộ sẽ kết nối với điểm cuối Ethereum của bạn: ```sh docker-compose up ``` -### Indexer components +### Các thành phần của Indexer To successfully participate in the network requires almost constant monitoring and interaction, so we've built a suite of Typescript applications for facilitating an Indexers network participation. There are three Indexer components: - **Indexer agent** - The agent monitors the network and the Indexer's own infrastructure and manages which subgraph deployments are indexed and allocated towards on chain and how much is allocated towards each. -- **Indexer service** - The only component that needs to be exposed externally, the service passes on subgraph queries to the graph node, manages state channels for query payments, shares important decision making information to clients like the gateways. +- **Dịch vụ Indexer** - Thành phần duy nhất cần được hiển thị bên ngoài, dịch vụ chuyển các truy vấn subgraph đến graph node, quản lý các kênh trạng thái cho các khoản thanh toán truy vấn, chia sẻ thông tin ra quyết định quan trọng cho máy khách như các cổng. - **Indexer CLI** - The command line interface for managing the Indexer agent. It allows Indexers to manage cost models, manual allocations, actions queue, and indexing rules. -#### Getting started +#### Bắt đầu The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! -#### From NPM packages +#### Từ các gói NPM ```sh npm install -g @graphprotocol/indexer-service @@ -400,17 +400,17 @@ graph indexer connect http://localhost:18000/ graph indexer ... ``` -#### From source +#### Từ nguồn ```sh -# From Repo root directory +# Từ Repo root directory yarn -# Indexer Service +# Dịch vụ Indexer cd packages/indexer-service ./bin/graph-indexer-service start ... -# Indexer agent +# Đại lý Indexer cd packages/indexer-agent ./bin/graph-indexer-service start ... @@ -420,31 +420,31 @@ cd packages/indexer-cli ./bin/graph-indexer-cli indexer ... ``` -#### Using docker +#### Sử dụng docker -- Pull images from the registry +- Kéo hình ảnh từ sổ đăng ký ```sh docker pull ghcr.io/graphprotocol/indexer-service:latest docker pull ghcr.io/graphprotocol/indexer-agent:latest ``` -Or build images locally from source +Hoặc xây dựng hình ảnh cục bộ từ nguồn ```sh -# Indexer service +# Dịch vụ Indexer docker build \ --build-arg NPM_TOKEN= \ -f Dockerfile.indexer-service \ -t indexer-service:latest \ -# Indexer agent +# Đại lý Indexer docker build \ --build-arg NPM_TOKEN= \ -f Dockerfile.indexer-agent \ -t indexer-agent:latest \ ``` -- Run the components +- Chạy các thành phần ```sh docker run -p 7600:7600 -it indexer-service:latest ... @@ -453,15 +453,15 @@ docker run -p 18000:8000 -it indexer-agent:latest ... **NOTE**: After starting the containers, the Indexer service should be accessible at [http://localhost:7600](http://localhost:7600) and the Indexer agent should be exposing the Indexer management API at [http://localhost:18000/](http://localhost:18000/). -#### Using K8s and Terraform +#### Sử dụng K8s and Terraform See the [Setup Server Infrastructure Using Terraform on Google Cloud](/network/indexing#setup-server-infrastructure-using-terraform-on-google-cloud) section -#### Usage +#### Sử dụng -> **NOTE**: All runtime configuration variables may be applied either as parameters to the command on startup or using environment variables of the format `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). +> **LƯU Ý**: Tất cả các biến cấu hình thời gian chạy có thể được áp dụng dưới dạng tham số cho lệnh khi khởi động hoặc sử dụng các biến môi trường của định dạng `COMPONENT_NAME_VARIABLE_NAME`(ex. `INDEXER_AGENT_ETHEREUM`). -#### Indexer agent +#### Đại lý Indexer ```sh graph-indexer-agent start \ @@ -490,7 +490,7 @@ graph-indexer-agent start \ | pino-pretty ``` -#### Indexer service +#### Dịch vụ Indexer ```sh SERVER_HOST=localhost \ @@ -518,7 +518,7 @@ graph-indexer-service start \ #### Indexer CLI -The Indexer CLI is a plugin for [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) accessible in the terminal at `graph indexer`. +Indexer CLI là một plugin dành cho [`@graphprotocol/graph-cli`](https://www.npmjs.com/package/@graphprotocol/graph-cli) có thể truy cập trong terminal tại `graph indexer`. ```sh graph indexer connect http://localhost:18000 @@ -529,7 +529,7 @@ graph indexer status The suggested tool for interacting with the **Indexer Management API** is the **Indexer CLI**, an extension to the **Graph CLI**. The Indexer agent needs input from an Indexer in order to autonomously interact with the network on the behalf of the Indexer. The mechanism for defining Indexer agent behavior are **allocation management** mode and **indexing rules**. Under auto mode, an Indexer can use **indexing rules** to apply their specific strategy for picking subgraphs to index and serve queries for. Rules are managed via a GraphQL API served by the agent and known as the Indexer Management API. Under manual mode, an Indexer can create allocation actions using **actions queue** and explicitly approve them before they get executed. Under oversight mode, **indexing rules** are used to populate **actions queue** and also require explicit approval for execution. -#### Usage +#### Sử dụng The **Indexer CLI** connects to the Indexer agent, typically through port-forwarding, so the CLI does not need to run on the same server or cluster. To help you get started, and to provide some context, the CLI will briefly be described here. @@ -537,7 +537,7 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer rules get [options] [ ...]` - Get one or more indexing rules using `all` as the `` to get all rules, or `global` to get the global defaults. An additional argument `--merged` can be used to specify that deployment specific rules are merged with the global rule. This is how they are applied in the Indexer agent. -- `graph indexer rules set [options] ...` - Set one or more indexing rules. +- `graph indexer rules set [options] ...` - Đặt một hoặc nhiều quy tắc indexing. - `graph indexer rules start [options] ` - Start indexing a subgraph deployment if available and set its `decisionBasis` to `always`, so the Indexer agent will always choose to index it. If the global rule is set to always then all available subgraphs on the network will be indexed. @@ -559,15 +559,15 @@ The **Indexer CLI** connects to the Indexer agent, typically through port-forwar - `graph indexer actions execute approve` - Force the worker to execute approved actions immediately -All commands which display rules in the output can choose between the supported output formats (`table`, `yaml`, and `json`) using the `-output` argument. +Tất cả các lệnh hiển thị quy tắc trong đầu ra có thể chọn giữa các định dạng đầu ra được hỗ trợ (`table`, `yaml`, and `json`) bằng việc sử dụng đối số `-output`. -#### Indexing rules +#### Các quy tắc indexing Indexing rules can either be applied as global defaults or for specific subgraph deployments using their IDs. The `deployment` and `decisionBasis` fields are mandatory, while all other fields are optional. When an indexing rule has `rules` as the `decisionBasis`, then the Indexer agent will compare non-null threshold values on that rule with values fetched from the network for the corresponding deployment. If the subgraph deployment has values above (or below) any of the thresholds it will be chosen for indexing. -For example, if the global rule has a `minStake` of **5** (GRT), any subgraph deployment which has more than 5 (GRT) of stake allocated to it will be indexed. Threshold rules include `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, and `minAverageQueryFees`. +Ví dụ: nếu quy tắc chung có `minStake` của **5** (GRT), bất kỳ triển khai subgraph nào có hơn 5 (GRT) stake được phân bổ cho nó sẽ được index. Các quy tắc ngưỡng bao gồm `maxAllocationPercentage`, `minSignal`, `maxSignal`, `minStake`, và `minAverageQueryFees`. -Data model: +Mô hình dữ liệu: ```graphql type IndexingRule { @@ -626,7 +626,7 @@ The action execution worker will only grab items from the queue to execute if th - If an action is successful the worker will ensure that there is an indexing rule present that tells the agent how to manage the allocation moving forward, useful when taking manual actions while the agent is in `auto` or `oversight` mode. - The indexer can monitor the action queue to see a history of action execution and if needed re-approve and update action items if they failed execution. The action queue provides a history of all actions queued and taken. -Data model: +Mô hình dữ liệu: ```graphql Type ActionInput { @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -706,38 +706,38 @@ Note that supported action types for allocation management have different input - poi - force (forces using the provided POI even if it doesn’t match what the graph-node provides) -#### Cost models +#### Các mô hình chi phí Cost models provide dynamic pricing for queries based on market and query attributes. The Indexer Service shares a cost model with the gateways for each subgraph for which they intend to respond to queries. The gateways, in turn, use the cost model to make Indexer selection decisions per query and to negotiate payment with chosen Indexers. #### Agora -The Agora language provides a flexible format for declaring cost models for queries. An Agora price model is a sequence of statements that execute in order for each top-level query in a GraphQL query. For each top-level query, the first statement which matches it determines the price for that query. +Ngôn ngữ Agora cung cấp một định dạng linh hoạt để khai báo các mô hình chi phí cho các truy vấn. Mô hình giá Agora là một chuỗi các câu lệnh thực thi theo thứ tự cho mỗi truy vấn cấp cao nhất trong một truy vấn GraphQL. Đối với mỗi truy vấn cấp cao nhất, câu lệnh đầu tiên phù hợp với nó xác định giá cho truy vấn đó. -A statement is comprised of a predicate, which is used for matching GraphQL queries, and a cost expression which when evaluated outputs a cost in decimal GRT. Values in the named argument position of a query may be captured in the predicate and used in the expression. Globals may also be set and substituted in for placeholders in an expression. +Một câu lệnh bao gồm một vị từ (predicate), được sử dụng để đối sánh các truy vấn GraphQL và một biểu thức chi phí mà khi được đánh giá sẽ xuất ra chi phí ở dạng GRT thập phân. Các giá trị ở vị trí đối số được đặt tên của một truy vấn có thể được ghi lại trong vị từ và được sử dụng trong biểu thức. Các Globals có thể được đặt và thay thế cho các phần giữ chỗ trong một biểu thức. -Example cost model: +Mô hình chi phí mẫu: ``` -# This statement captures the skip value, -# uses a boolean expression in the predicate to match specific queries that use `skip` -# and a cost expression to calculate the cost based on the `skip` value and the SYSTEM_LOAD global +# Câu lệnh này ghi lại giá trị bỏ qua (skip), +# sử dụng biểu thức boolean trong vị từ để khớp với các truy vấn cụ thể sử dụng `skip` +# và một biểu thức chi phí để tính toán chi phí dựa trên giá trị `skip` và SYSTEM_LOAD global query { pairs(skip: $skip) { id } } when $skip > 2000 => 0.0001 * $skip * $SYSTEM_LOAD; -# This default will match any GraphQL expression. -# It uses a Global substituted into the expression to calculate cost +# Mặc định này sẽ khớp với bất kỳ biểu thức GraphQL nào. +# Nó sử dụng một Global được thay thế vào biểu thức để tính toán chi phí default => 0.1 * $SYSTEM_LOAD; ``` Example query costing using the above model: -| Query | Price | +| Truy vấn | Giá | | ---------------------------------------------------------------------------- | ------- | | { pairs(skip: 5000) { id } } | 0.5 GRT | | { tokens { symbol } } | 0.1 GRT | | { pairs(skip: 5000) { id { tokens } symbol } } | 0.6 GRT | -#### Applying the cost model +#### Áp dụng mô hình chi phí Cost models are applied via the Indexer CLI, which passes them to the Indexer Management API of the Indexer agent for storing in the database. The Indexer Service will then pick them up and serve the cost models to gateways whenever they ask for them. @@ -746,41 +746,41 @@ indexer cost set variables '{ "SYSTEM_LOAD": 1.4 }' indexer cost set model my_model.agora ``` -## Interacting with the network +## Tương tác với mạng -### Stake in the protocol +### Stake trong giao thức -The first steps to participating in the network as an Indexer are to approve the protocol, stake funds, and (optionally) set up an operator address for day-to-day protocol interactions. _ **Note**: For the purposes of these instructions Remix will be used for contract interaction, but feel free to use your tool of choice ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), and [MyCrypto](https://www.mycrypto.com/account) are a few other known tools)._ +Các bước đầu tiên để tham gia vào mạng với tư cách là Indexer là phê duyệt giao thức, stake tiền và (tùy chọn) thiết lập địa chỉ operator cho các tương tác giao thức hàng ngày. _ **Lưu ý**: Đối với các mục đích của các hướng dẫn này, Remix sẽ được sử dụng để tương tác hợp đồng, nhưng hãy thoải mái sử dụng công cụ bạn chọn ([OneClickDapp](https://oneclickdapp.com/), [ABItopic](https://abitopic.io/), và [MyCrypto](https://www.mycrypto.com/account) là một vài công cụ được biết đến khác)._ Once an Indexer has staked GRT in the protocol, the [Indexer components](/network/indexing#indexer-components) can be started up and begin their interactions with the network. -#### Approve tokens +#### Phê duyệt các token -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Mở [Remix app](https://remix.ethereum.org/) trong một trình duyệt -2. In the `File Explorer` create a file named **GraphToken.abi** with the [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). +2. Trong `File Explorer` tạo một tệp tên **GraphToken.abi** với [token ABI](https://raw.githubusercontent.com/graphprotocol/contracts/mainnet-deploy-build/build/abis/GraphToken.json). -3. With `GraphToken.abi` selected and open in the editor, switch to the Deploy and `Run Transactions` section in the Remix interface. +3. Với `GraphToken.abi` đã chọn và mở trong trình chỉnh sửa, chuyển sang Deploy (Triển khai) và `Run Transactions` trong giao diện Remix. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Set the GraphToken contract address - Paste the GraphToken contract address (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) next to `At Address` and click the `At address` button to apply. +5. Đặt địa chỉ hợp đồng GraphToken - Dán địa chỉ hợp đồng GraphToken(`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) kế bên `At Address` và nhấp vào nút `At address` để áp dụng. -6. Call the `approve(spender, amount)` function to approve the Staking contract. Fill in `spender` with the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) and `amount` with the tokens to stake (in wei). +6. Gọi chức năng `approve(spender, amount)` để phê duyệt hợp đồng Staking. Điền phần `spender` bằng địa chỉ hợp đồng Staking (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) và điền `amount` bằng số token để stake (tính bằng wei). -#### Stake tokens +#### Stake các token -1. Open the [Remix app](https://remix.ethereum.org/) in a browser +1. Mở [Remix app](https://remix.ethereum.org/) trong một trình duyệt 2. In the `File Explorer` create a file named **Staking.abi** with the staking ABI. -3. With `Staking.abi` selected and open in the editor, switch to the `Deploy` and `Run Transactions` section in the Remix interface. +3. Với `Staking.abi` đã chọn và mở trong trình chỉnh sửa, chuyển sang `Deploy` và `Run Transactions` trong giao diện Remix. 4. Under environment select `Injected Web3` and under `Account` select your Indexer address. -5. Set the Staking contract address - Paste the Staking contract address (`0xF55041E37E12cD407ad00CE2910B8269B01263b9`) next to `At Address` and click the `At address` button to apply. +5. Đặt địa chỉ hợp đồng Staking - Dán địa chỉ hợp đồng Staking (`0xc944E90C64B2c07662A292be6244BDf05Cda44a7`) kế bên `At Address` và nhấp vào nút `At address` để áp dụng. -6. Call `stake()` to stake GRT in the protocol. +6. Gọi lệnh `stake()` để stake GRT vào giao thức. 7. (Optional) Indexers may approve another address to be the operator for their Indexer infrastructure in order to separate the keys that control the funds from those that are performing day to day actions such as allocating on subgraphs and serving (paid) queries. In order to set the operator call `setOperator()` with the operator address. @@ -790,7 +790,7 @@ Once an Indexer has staked GRT in the protocol, the [Indexer components](/networ setDelegationParameters(950000, 600000, 500) ``` -### The life of an allocation +### Tuổi thọ của một phân bổ After being created by an Indexer a healthy allocation goes through four states. @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/vi/network/overview.mdx b/website/pages/vi/network/overview.mdx index bee546908372..bcf11c552377 100644 --- a/website/pages/vi/network/overview.mdx +++ b/website/pages/vi/network/overview.mdx @@ -2,11 +2,11 @@ title: Network Overview --- -The Graph Network is a decentralized indexing protocol for organizing blockchain data. Applications use GraphQL to query open APIs called subgraphs, to retrieve data that is indexed on the network. With The Graph, developers can build serverless applications that run entirely on public infrastructure. +Mạng The Graph là một giao thức lập chỉ mục phi tập trung để tổ chức dữ liệu blockchain. Các ứng dụng sử dụng GraphQL để truy vấn các API mở được gọi là subgraph, để truy xuất dữ liệu được lập chỉ mục trên mạng. Với The Graph, các nhà phát triển có thể xây dựng các ứng dụng không máy chủ chạy hoàn toàn trên cơ sở hạ tầng công cộng. -## Overview +## Tổng quan -The Graph Network consists of Indexers, Curators and Delegators that provide services to the network, and serve data to Web3 applications. Consumers use the applications and consume the data. +Mạng The Graph bao gồm Indexers, Curators và Delegators cung cấp các dịch vụ cho mạng và cung cấp dữ liệu cho các ứng dụng Web3. Người tiêu dùng sử dụng các ứng dụng và sử dụng dữ liệu. ![Token Economics](/img/Network-roles@2x.png) diff --git a/website/pages/vi/new-chain-integration.mdx b/website/pages/vi/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/vi/new-chain-integration.mdx +++ b/website/pages/vi/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/vi/operating-graph-node.mdx b/website/pages/vi/operating-graph-node.mdx index 832b6cccf347..7112da586680 100644 --- a/website/pages/vi/operating-graph-node.mdx +++ b/website/pages/vi/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes @@ -32,9 +32,9 @@ Subgraph deployment metadata is stored on the IPFS network. The Graph Node prima To enable monitoring and reporting, Graph Node can optionally log metrics to a Prometheus metrics server. -### Getting started from source +### Bắt đầu từ nguồn -#### Install prerequisites +#### Cài đặt điều kiện tiên quyết - **Rust** @@ -42,15 +42,15 @@ To enable monitoring and reporting, Graph Node can optionally log metrics to a P - **IPFS** -- **Additional Requirements for Ubuntu users** - To run a Graph Node on Ubuntu a few additional packages may be needed. +- **Yêu cầu bổ sung cho người dùng Ubuntu** - Để chạy Graph Node trên Ubuntu, có thể cần một số gói bổ sung. ```sh sudo apt-get install -y clang libpg-dev libssl-dev pkg-config ``` -#### Setup +#### Cài đặt -1. Start a PostgreSQL database server +1. Khởi động máy chủ cơ sở dữ liệu PostgreSQL ```sh initdb -D .postgres @@ -58,9 +58,9 @@ pg_ctl -D .postgres -l logfile start createdb graph-node ``` -2. Clone [Graph Node](https://github.com/graphprotocol/graph-node) repo and build the source by running `cargo build` +2. Nhân bản [Graph Node](https://github.com/graphprotocol/graph-node) repo và xây dựng nguồn bằng cách chạy `cargo build` -3. Now that all the dependencies are setup, start the Graph Node: +3. Bây giờ tất cả các phụ thuộc đã được thiết lập, hãy khởi động Graph Node: ```sh cargo run -p graph-node --release -- \ @@ -77,13 +77,13 @@ A complete Kubernetes example configuration can be found in the [indexer reposit When it is running Graph Node exposes the following ports: -| Port | Purpose | Routes | CLI Argument | Environment Variable | +| Cổng | Mục đích | Tuyến | Đối số CLI | Biến môi trường | | --- | --- | --- | --- | --- | -| 8000 | GraphQL HTTP server
    (for subgraph queries) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | -| 8001 | GraphQL WS
    (for subgraph subscriptions) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | -| 8020 | JSON-RPC
    (for managing deployments) | / | --admin-port | - | -| 8030 | Subgraph indexing status API | /graphql | --index-node-port | - | -| 8040 | Prometheus metrics | /metrics | --metrics-port | - | +| 8000 | Máy chủ GraphQL HTTP
    (cho các truy vấn subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --http-port | - | +| 8001 | GraphQL WS
    (cho các đăng ký subgraph) | /subgraphs/id/...
    /subgraphs/name/.../... | --ws-port | - | +| 8020 | JSON-RPC
    (để quản lý triển khai) | / | --admin-port | - | +| 8030 | API trạng thái lập chỉ mục Subgraph | /graphql | --index-node-port | - | +| 8040 | Số liệu Prometheus | /metrics | --metrics-port | - | > **Important**: Be careful about exposing ports publicly - **administration ports** should be kept locked down. This includes the the Graph Node JSON-RPC endpoint. diff --git a/website/pages/vi/publishing/publishing-a-subgraph.mdx b/website/pages/vi/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..546991c84816 100644 --- a/website/pages/vi/publishing/publishing-a-subgraph.mdx +++ b/website/pages/vi/publishing/publishing-a-subgraph.mdx @@ -6,19 +6,19 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). ## Publishing a subgraph -Subgraphs can be published to the decentralized network directly from the Subgraph Studio dashboard by clicking on the **Publish** button. Once a subgraph is published, it will be available to view in the [Graph Explorer](https://thegraph.com/explorer/). +Các Subgraph có thể được xuất bản lên mạng phi tập trung trực tiếp từ bảng điều khiển Subgraph Studio bằng cách nhấp vào nút **Xuất bản** (Publish). Khi một subgraph được xuất bản, nó sẽ khả dụng để xem trong [Graph Explorer](https://thegraph.com/explorer/). - Subgraphs can be published to Goerli, Arbitrum goerli, Arbitrum One, or Ethereum mainnet. - Regardless of the network the subgraph was published on, it can index data on any of the [supported networks](/developing/supported-networks). -- When publishing a new version for an existing subgraph the same rules apply as above. +- Khi xuất bản một phiên bản mới cho một subgraph hiện có, các quy tắc tương tự sẽ được áp dụng như trên. ## Curating your subgraph diff --git a/website/pages/vi/querying/distributed-systems.mdx b/website/pages/vi/querying/distributed-systems.mdx index 85337206bfd3..d65f3457982a 100644 --- a/website/pages/vi/querying/distributed-systems.mdx +++ b/website/pages/vi/querying/distributed-systems.mdx @@ -1,37 +1,37 @@ --- -title: Distributed Systems +title: Hệ thống phân tán --- -The Graph is a protocol implemented as a distributed system. +The Graph là một giao thức được thực hiện như một hệ thống phân tán. -Connections fail. Requests arrive out of order. Different computers with out-of-sync clocks and states process related requests. Servers restart. Re-orgs happen between requests. These problems are inherent to all distributed systems but are exacerbated in systems operating at a global scale. +Kết nối không thành công. Yêu cầu đến không theo thứ tự. Các máy tính khác nhau có đồng hồ và trạng thái không đồng bộ sẽ xử lý các yêu cầu liên quan. Máy chủ khởi động lại. Tổ chức lại xảy ra giữa các yêu cầu. Những vấn đề này vốn có đối với tất cả các hệ thống phân tán nhưng ngày càng trầm trọng hơn trong các hệ thống hoạt động ở quy mô toàn cầu. -Consider this example of what may occur if a client polls an Indexer for the latest data during a re-org. +Hãy xem xét ví dụ này về những gì có thể xảy ra nếu một máy khách thăm dò một Indexer để biết dữ liệu mới nhất trong một lần tổ chức lại (re-org). -1. Indexer ingests block 8 -2. Request served to the client for block 8 -3. Indexer ingests block 9 -4. Indexer ingests block 10A -5. Request served to the client for block 10A -6. Indexer detects reorg to 10B and rolls back 10A -7. Request served to the client for block 9 -8. Indexer ingests block 10B -9. Indexer ingests block 11 -10. Request served to the client for block 11 +1. Indexer nhập khối 8 +2. Yêu cầu được cung cấp cho máy khách đối với khối 8 +3. Indexer nhập khối 9 +4. Indexer nhập khối 10A +5. Yêu cầu được cung cấp cho máy khách đối với khối 10A +6. Indexer phát hiện reorg đến 10B và cuộn lại 10A +7. Yêu cầu được cung cấp cho máy khách đối với khối 9 +8. Indexer nhập khối 10B +9. Indexer nhập khối 11 +10. Yêu cầu được cung cấp cho máy khách đối với khối 11 -From the point of view of the Indexer, things are progressing forward logically. Time is moving forward, though we did have to roll back an uncle block and play the block under consensus forward on top of it. Along the way, the Indexer serves requests using the latest state it knows about at that time. +Theo quan điểm của Indexer, mọi thứ đang tiến triển về phía trước một cách hợp lý. Thời gian đang trôi về phía trước, mặc dù chúng tôi đã phải quay lại một khối chú và chơi khối dưới sự đồng thuận về phía trên của nó. Trên đường đi, Trình lập chỉ mục phục vụ các yêu cầu bằng cách sử dụng trạng thái mới nhất mà nó biết tại thời điểm đó. -From the point of view of the client, however, things appear chaotic. The client observes that the responses were for blocks 8, 10, 9, and 11 in that order. We call this the "block wobble" problem. When a client experiences block wobble, data may appear to contradict itself over time. The situation worsens when we consider that Indexers do not all ingest the latest blocks simultaneously, and your requests may be routed to multiple Indexers. +Tuy nhiên, từ quan điểm của máy khách, mọi thứ có vẻ hỗn loạn. Máy khách nhận thấy rằng các câu trả lời dành cho các khối 8, 10, 9 và 11 theo thứ tự đó. Chúng ta gọi đây là vấn đề "khối chao đảo". Khi khách hàng gặp phải trường hợp khối bị lung lay, dữ liệu có thể mâu thuẫn với chính nó theo thời gian. Tình hình trở nên tồi tệ hơn khi chúng ta nhận thấy rằng các Indexer không nhập các khối mới nhất đồng thời và các yêu cầu của bạn có thể được chuyển đến nhiều Indexer. -It is the responsibility of the client and server to work together to provide consistent data to the user. Different approaches must be used depending on the desired consistency as there is no one right program for every problem. +Máy khách và máy chủ có trách nhiệm làm việc cùng nhau để cung cấp dữ liệu nhất quán cho người dùng. Các cách tiếp cận khác nhau phải được sử dụng tùy thuộc vào tính nhất quán mong muốn vì không có một chương trình phù hợp cho mọi vấn đề. -Reasoning through the implications of distributed systems is hard, but the fix may not be! We've established APIs and patterns to help you navigate some common use-cases. The following examples illustrate those patterns but still elide details required by production code (like error handling and cancellation) to not obfuscate the main ideas. +Việc lý luận thông qua các tác động của hệ thống phân tán là khó, nhưng có thể không khắc phục được! Chúng tôi đã thiết lập các API và mẫu để giúp bạn điều hướng một số trường hợp sử dụng phổ biến. Các ví dụ sau minh họa các mẫu đó nhưng vẫn giải thích các chi tiết theo yêu cầu của mã sản xuất (như xử lý lỗi và hủy bỏ) để không làm xáo trộn các ý chính. -## Polling for updated data +## Thăm dò dữ liệu cập nhật -The Graph provides the `block: { number_gte: $minBlock }` API, which ensures that the response is for a single block equal or higher to `$minBlock`. If the request is made to a `graph-node` instance and the min block is not yet synced, `graph-node` will return an error. If `graph-node` has synced min block, it will run the response for the latest block. If the request is made to an Edge & Node Gateway, the Gateway will filter out any Indexers that have not yet synced min block and make the request for the latest block the Indexer has synced. +Graph cung cấp API `block: { number_gte: $minBlock }` đảm bảo rằng phản hồi dành cho một khối duy nhất bằng hoặc cao hơn `$minBlock`. Nếu yêu cầu được thực hiện tới một cá thể `graph-node` và khối min chưa được đồng bộ hóa, thì `graph-node` sẽ trả về lỗi. Nếu `graph-node` đã đồng bộ hóa khối min, nó sẽ chạy phản hồi cho khối mới nhất. Nếu yêu cầu được thực hiện với Edge & Node Gateway, Gateway sẽ lọc ra bất kỳ Indexer nào chưa đồng bộ hóa khối min và đưa ra yêu cầu đối với khối mới nhất mà Indexer đã đồng bộ hóa. -We can use `number_gte` to ensure that time never travels backward when polling for data in a loop. Here is an example: +Chúng tôi có thể sử dụng `number_gte` để đảm bảo rằng thời gian không bao giờ quay ngược lại khi thăm dò dữ liệu trong một vòng lặp. Đây là một ví dụ: ```javascript /// Updates the protocol.paused variable to the latest @@ -74,11 +74,11 @@ async function updateProtocolPaused() { } ``` -## Fetching a set of related items +## Tìm nạp một tập hợp các mục liên quan -Another use-case is retrieving a large set or, more generally, retrieving related items across multiple requests. Unlike the polling case (where the desired consistency was to move forward in time), the desired consistency is for a single point in time. +Một ca sử dụng khác là truy xuất một tập hợp lớn hay nói chung là truy xuất các mục có liên quan qua nhiều yêu cầu. Không giống như trường hợp thăm dò ý kiến (trong đó tính nhất quán mong muốn được tiến hành kịp thời), tính nhất quán mong muốn dành cho một thời điểm duy nhất. -Here we will use the `block: { hash: $blockHash }` argument to pin all of our results to the same block. +Ở đây chúng ta sẽ sử dụng đối số `block: { hash: $blockHash }` để ghim tất cả các kết quả của chúng ta vào cùng một khối. ```javascript /// Gets a list of domain names from a single block using pagination @@ -131,4 +131,4 @@ async function getDomainNames() { } ``` -Note that in case of a re-org, the client will need to retry from the first request to update the block hash to a non-uncle block. +Lưu ý rằng trong trường hợp tổ chức lại (re-org), khách hàng sẽ cần thử lại từ yêu cầu đầu tiên để cập nhật băm khối thành một khối không phải là non-uncle block. diff --git a/website/pages/vi/querying/graphql-api.mdx b/website/pages/vi/querying/graphql-api.mdx index 89cda460d58f..d15b7659d204 100644 --- a/website/pages/vi/querying/graphql-api.mdx +++ b/website/pages/vi/querying/graphql-api.mdx @@ -2,15 +2,15 @@ title: GraphQL API --- -This guide explains the GraphQL Query API that is used for the Graph Protocol. +Hướng dẫn này giải thích API truy vấn GraphQL được sử dụng cho Giao thức The Graph. -## Queries +## Các truy vấn -In your subgraph schema you define types called `Entities`. For each `Entity` type, an `entity` and `entities` field will be generated on the top-level `Query` type. Note that `query` does not need to be included at the top of the `graphql` query when using The Graph. +Trong lược đồ subgraph của bạn, bạn xác định các loại được gọi là `Entities`. Với mỗi loại `Entity`, một trường `entity` và `entities` sẽ được tạo ở loại `Query` cấp cao nhất. Lưu ý là `query` không cần phải được bao gồm ở đầu truy vấn `graphql` khi sử dụng The Graph. ### Examples -Query for a single `Token` entity defined in your schema: +Truy vấn cho một thực thể `Token` được xác định trong lược đồ của bạn: ```graphql { @@ -38,7 +38,7 @@ Query all `Token` entities: When querying a collection, the `orderBy` parameter may be used to sort by a specific attribute. Additionally, the `orderDirection` can be used to specify the sort direction, `asc` for ascending or `desc` for descending. -#### Example +#### Ví dụ ```graphql { @@ -282,7 +282,7 @@ The result of such a query will not change over time, i.e., querying at a certai Note that the current implementation is still subject to certain limitations that might violate these gurantees. The implementation can not always tell that a given block hash is not on the main chain at all, or that the result of a query by block hash for a block that can not be considered final yet might be influenced by a block reorganization running concurrently with the query. They do not affect the results of queries by block hash when the block is final and known to be on the main chain. [This issue](https://github.com/graphprotocol/graph-node/issues/1405) explains what these limitations are in detail. -#### Example +#### Ví dụ ```graphql { @@ -298,7 +298,7 @@ Note that the current implementation is still subject to certain limitations tha This query will return `Challenge` entities, and their associated `Application` entities, as they existed directly after processing block number 8,000,000. -#### Example +#### Ví dụ ```graphql { @@ -322,12 +322,12 @@ Fulltext search queries have one required field, `text`, for supplying search te Fulltext search operators: -| Symbol | Operator | Description | +| Biểu tượng | Toán tử | Miêu tả | | --- | --- | --- | -| `&` | `And` | For combining multiple search terms into a filter for entities that include all of the provided terms | -| | | `Or` | Queries with multiple search terms separated by the or operator will return all entities with a match from any of the provided terms | -| `<->` | `Follow by` | Specify the distance between two words. | -| `:*` | `Prefix` | Use the prefix search term to find words whose prefix match (2 characters required.) | +| `&` | `And` | Để kết hợp nhiều cụm từ tìm kiếm thành một bộ lọc cho các thực thể bao gồm tất cả các cụm từ được cung cấp | +| | | `Or` | Các truy vấn có nhiều cụm từ tìm kiếm được phân tách bằng toán tử hoặc sẽ trả về tất cả các thực thể có kết quả khớp với bất kỳ cụm từ nào được cung cấp | +| `<->` | `Follow by` | Chỉ định khoảng cách giữa hai từ. | +| `:*` | `Prefix` | Sử dụng cụm từ tìm kiếm tiền tố để tìm các từ có tiền tố khớp với nhau (yêu cầu 2 ký tự.) | #### Examples @@ -374,7 +374,7 @@ Combine fulltext operators to make more complex filters. With a pretext search o Graph Node implements [specification-based](https://spec.graphql.org/October2021/#sec-Validation) validation of the GraphQL queries it receives using [graphql-tools-rs](https://github.com/dotansimha/graphql-tools-rs#validation-rules), which is based on the [graphql-js reference implementation](https://github.com/graphql/graphql-js/tree/main/src/validation). Queries which fail a validation rule do so with a standard error - visit the [GraphQL spec](https://spec.graphql.org/October2021/#sec-Validation) to learn more. -## Schema +## Lược đồ The schema of your data source--that is, the entity types, values, and relationships that are available to query--are defined through the [GraphQL Interface Definition Langauge (IDL)](https://facebook.github.io/graphql/draft/#sec-Type-System). diff --git a/website/pages/vi/querying/querying-from-an-application.mdx b/website/pages/vi/querying/querying-from-an-application.mdx index 30b6c2264d64..a9be2793e8b8 100644 --- a/website/pages/vi/querying/querying-from-an-application.mdx +++ b/website/pages/vi/querying/querying-from-an-application.mdx @@ -1,26 +1,26 @@ --- -title: Querying from an Application +title: Truy vấn từ một ứng dụng --- Once a subgraph is deployed to the Subgraph Studio or to The Graph Explorer, you will be given the endpoint for your GraphQL API that should look something like this: -**Subgraph Studio (testing endpoint)** +**Subgraph Studio (điểm cuối thử nghiệm)** ```sh Queries (HTTP) https://api.studio.thegraph.com/query/// ``` -**Graph Explorer** +**Trình khám phá Graph** ```sh Queries (HTTP) https://gateway.thegraph.com/api//subgraphs/id/ ``` -Using the GraphQL endpoint, you can use various GraphQL Client libraries to query the subgraph and populate your app with the data indexed by the subgraph. +Sử dụng điểm cuối GraphQL, bạn có thể sử dụng các thư viện GraphQL Client khác nhau để truy vấn subgraph và điền vào ứng dụng của bạn với dữ liệu được lập chỉ mục bởi subgraph. -Here are a couple of the more popular GraphQL clients in the ecosystem and how to use them: +Dưới đây là một số ứng dụng khách GraphQL phổ biến hơn trong hệ sinh thái và cách sử dụng chúng: ## GraphQL clients @@ -160,7 +160,7 @@ First, install `@apollo/client` and `graphql`: npm install @apollo/client graphql ``` -Then you can query the API with the following code: +Sau đó, bạn có thể truy vấn API bằng mã sau: ```javascript import { ApolloClient, InMemoryCache, gql } from '@apollo/client' @@ -241,7 +241,7 @@ First, install `urql` and `graphql`: npm install urql graphql ``` -Then you can query the API with the following code: +Sau đó, bạn có thể truy vấn API bằng mã sau: ```javascript import { createClient } from 'urql' diff --git a/website/pages/vi/querying/querying-the-graph.mdx b/website/pages/vi/querying/querying-the-graph.mdx index af9dcaaf2477..58472767a645 100644 --- a/website/pages/vi/querying/querying-the-graph.mdx +++ b/website/pages/vi/querying/querying-the-graph.mdx @@ -2,13 +2,13 @@ title: Querying The Graph --- -With the subgraph deployed, visit the [Graph Explorer](https://thegraph.com/explorer) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +Với subgraph được triển khai, hãy truy cập [Graph Explorer](https://thegraph.com/explorer) để mở ra một [GraphiQL](https://github.com/graphql/graphiql) giao diện nơi bạn có thể khám phá API GraphQL được triển khai cho subgraph bằng cách đưa ra các truy vấn và xem lược đồ. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. -## Example +## Ví dụ -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Truy vấn này liệt kê tất cả các bộ đếm mà ánh xạ của chúng tôi đã tạo. Vì chúng tôi chỉ tạo một, kết quả sẽ chỉ chứa một `default-counter`: ```graphql { @@ -21,7 +21,7 @@ This query lists all the counters our mapping has created. Since we only create ## Using The Graph Explorer -Each subgraph published to the decentralized Graph Explorer has a unique query URL that you can find by navigating to the subgraph details page and clicking on the "Query" button on the top right corner. This will open a side pane that will give you the unique query URL of the subgraph as well as some instructions about how to query it. +Mỗi subgraph được xuất bản đến Trình khám phá Graph phi tập trung có một URL truy vấn duy nhất mà bạn có thể tìm thấy bằng cách điều hướng đến trang chi tiết subgraph và nhấp vào nút "Truy vấn" ở góc trên cùng bên phải. Thao tác này sẽ mở ra một ngăn bên cung cấp cho bạn URL truy vấn duy nhất của subgraph cũng như một số hướng dẫn về cách truy vấn nó. ![Query Subgraph Pane](/img/query-subgraph-pane.png) @@ -29,4 +29,4 @@ As you can notice, this query URL must use a unique API key. You can create and Querying subgraphs using your API keys will generate query fees that will be paid in GRT. You can learn more about billing [here](/billing). -You can also use the GraphQL playground in the "Playground" tab to query a subgraph within The Graph Explorer. +Bạn cũng có thể sử dụng sân chơi GraphQL trong tab "Sân chơi" (Playground) để truy vấn một subgraph trong The Graph Explorer. diff --git a/website/pages/vi/querying/querying-the-hosted-service.mdx b/website/pages/vi/querying/querying-the-hosted-service.mdx index 14777da41247..779cefc0028b 100644 --- a/website/pages/vi/querying/querying-the-hosted-service.mdx +++ b/website/pages/vi/querying/querying-the-hosted-service.mdx @@ -2,13 +2,13 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. -## Example +## Ví dụ -This query lists all the counters our mapping has created. Since we only create one, the result will only contain our one `default-counter`: +Truy vấn này liệt kê tất cả các bộ đếm mà ánh xạ của chúng tôi đã tạo. Vì chúng tôi chỉ tạo một, kết quả sẽ chỉ chứa một `default-counter`: ```graphql { @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/vi/querying/querying-with-python.mdx b/website/pages/vi/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/vi/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/vi/quick-start.mdx b/website/pages/vi/quick-start.mdx new file mode 100644 index 000000000000..e806caadf0b4 --- /dev/null +++ b/website/pages/vi/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Bắt đầu nhanh +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Tệp kê khai (subgraph.yaml) - Tệp kê khai xác định nguồn dữ liệu mà các subgraph của bạn sẽ lập chỉ mục. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- Ánh xạ AssemblyScript (mapping.ts) - Đây là mã dịch dữ liệu từ các nguồn dữ liệu của bạn sang các thực thể được xác định trong lược đồ. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Xác thực và triển khai subgraph của bạn. Bạn có thể tìm thấy khóa triển khai trên trang Subgraph trong Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/vi/release-notes/assemblyscript-migration-guide.mdx b/website/pages/vi/release-notes/assemblyscript-migration-guide.mdx index 85f6903a6c69..69c36218d8af 100644 --- a/website/pages/vi/release-notes/assemblyscript-migration-guide.mdx +++ b/website/pages/vi/release-notes/assemblyscript-migration-guide.mdx @@ -1,50 +1,50 @@ --- -title: AssemblyScript Migration Guide +title: Hướng dẫn Di chuyển AssemblyScript --- -Up until now, subgraphs have been using one of the [first versions of AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Finally we've added support for the [newest one available](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 +Cho đến nay, các subgraph đang sử dụng một trong các [phiên bản đầu tiên của AssemblyScript](https://github.com/AssemblyScript/assemblyscript/tree/v0.6) (v0.6). Cuối cùng, chúng tôi đã thêm hỗ trợ cho [bản mới nhất hiện có](https://github.com/AssemblyScript/assemblyscript/tree/v0.19.10) (v0.19.10)! 🎉 -That will enable subgraph developers to use newer features of the AS language and standard library. +Điều đó sẽ cho phép các nhà phát triển subgrap sử dụng các tính năng mới hơn của ngôn ngữ AS và thư viện chuẩn. -This guide is applicable for anyone using `graph-cli`/`graph-ts` below version `0.22.0`. If you're already at a higher than (or equal) version to that, you've already been using version `0.19.10` of AssemblyScript 🙂 +Hướng dẫn này có thể áp dụng cho bất kỳ ai sử dụng `graph-cli`/`graph-ts` dưới phiên bản `0.22.0`. Nếu bạn đã ở phiên bản cao hơn (hoặc bằng) với phiên bản đó, bạn đã sử dụng phiên bản`0.19.10` của AssemblyScript 🙂 -> Note: As of `0.24.0`, `graph-node` can support both versions, depending on the `apiVersion` specified in the subgraph manifest. +> Lưu ý: Kể từ `0.24.0`, `graph-node` có thể hỗ trợ cả hai phiên bản, tùy thuộc vào `apiVersion` được chỉ định trong tệp kê khai subgraph. -## Features +## Các đặc điểm -### New functionality +### Chức năng mới -- `TypedArray`s can now be built from `ArrayBuffer`s by using the [new `wrap` static method](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) -- New standard library functions: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Added support for x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) -- Added `StaticArray`, a more efficient array variant ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) -- Added `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Implemented `radix` argument on `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) -- Added support for separators in floating point literals ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) -- Added support for first class functions ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) +- `TypedArray`s bây giờ có thể được xây dựng từ `ArrayBuffer`s bằng cách sử dụng [phương pháp tĩnh `wrap` mới](https://www.assemblyscript.org/stdlib/typedarray.html#static-members) ([v0.8.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.8.1)) +- Các chức năng thư viện tiêu chuẩn mới: `String#toUpperCase`, `String#toLowerCase`, `String#localeCompare`and `TypedArray#set` ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Đã thêm hỗ trợ cho x instanceof GenericClass ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Đã thêm `StaticArray`, một biến thể mảng hiệu quả hơn ([v0.9.3](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.3)) +- Đã thêm `Array#flat` ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Đã thực hiện đối số `radix` trên `Number#toString` ([v0.10.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.1)) +- Đã thêm hỗ trợ cho dấu phân cách trong các ký tự dấu phẩy động ([v0.13.7](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.13.7)) +- Đã thêm hỗ trợ cho các chức năng hạng nhất ([v0.14.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.0)) - Add builtins: `i32/i64/f32/f64.add/sub/mul` ([v0.14.13](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.13)) -- Implement `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -- Added support for template literal strings ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) -- Add `encodeURI(Component)` and `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) -- Add `toString`, `toDateString` and `toTimeString` to `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) -- Add `toUTCString` for `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) +- Thực hiện `Array/TypedArray/String#at` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Đã thêm hỗ trợ cho chuỗi ký tự mẫu ([v0.18.17](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.17)) +- Thêm `encodeURI(Component)` và `decodeURI(Component)` ([v0.18.27](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.27)) +- Thêm `toString`, `toDateString` và `toTimeString` vào `Date` ([v0.18.29](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.29)) +- Thêm `toUTCString` cho `Date` ([v0.18.30](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.30)) - Add `nonnull/NonNullable` builtin type ([v0.19.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.19.2)) -### Optimizations +### Tối ưu hóa -- `Math` functions such as `exp`, `exp2`, `log`, `log2` and `pow` have been replaced by faster variants ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Slightly optimize `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) -- Cache more field accesses in std Map and Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) -- Optimize for powers of two in `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) +- Các chức năng `Math` như `exp`, `exp2`, `log`, `log2` và `pow` đã được thay thế bằng các biến thể nhanh hơn ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Tối ưu hóa một chút `Math.mod` ([v0.17.1](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.1)) +- Lưu vào bộ nhớ cache các truy cập trường khác trong std Map và Set ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Tối ưu hóa cho sức mạnh của hai trong `ipow32/64` ([v0.18.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.18.2)) -### Other +### Khác -- The type of an array literal can now be inferred from its contents ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) -- Updated stdlib to Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Kiểu của một ký tự mảng bây giờ có thể được suy ra từ nội dung của nó ([v0.9.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.0)) +- Đã cập nhật stdlib thành Unicode 13.0.0 ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -## How to upgrade? +## Làm thế nào để nâng cấp? -1. Change your mappings `apiVersion` in `subgraph.yaml` to `0.0.6`: +1. Thay đổi `apiVersion` ánh xạ của bạn trong `subgraph.yaml` thành `0.0.6`: ```yaml ... @@ -56,30 +56,30 @@ dataSources: ... ``` -2. Update the `graph-cli` you're using to the `latest` version by running: +2. Cập nhật `graph-cli` bạn đang sử dụng thành phiên bản `latest` bằng cách chạy: ```bash -# if you have it globally installed +# nếu bạn đã cài đặt nó trên toàn cầu npm install --global @graphprotocol/graph-cli@latest -# or in your subgraph if you have it as a dev dependency +# hoặc trong subgraph của bạn nếu bạn có nó như một phụ thuộc của nhà phát triển npm install --save-dev @graphprotocol/graph-cli@latest ``` -3. Do the same for `graph-ts`, but instead of installing globally, save it in your main dependencies: +3. Làm tương tự đối với `graph-ts`, nhưng thay vì cài đặt trên toàn cầu, hãy lưu nó trong các phần phụ thuộc chính của bạn: ```bash npm install --save @graphprotocol/graph-ts@latest ``` -4. Follow the rest of the guide to fix the language breaking changes. -5. Run `codegen` and `deploy` again. +4. Làm theo phần còn lại của hướng dẫn để sửa các thay đổi về lỗi ngôn ngữ. +5. Chạy `codegen` và `deploy` lại. -## Breaking changes +## Thay đổi đột phá -### Nullability +### Vô hiệu -On the older version of AssemblyScript, you could create code like this: +Trên phiên bản AssemblyScript cũ hơn, bạn có thể tạo mã như sau: ```typescript function load(): Value | null { ... } @@ -88,7 +88,7 @@ let maybeValue = load(); maybeValue.aMethod(); ``` -However on the newer version, because the value is nullable, it requires you to check, like this: +Tuy nhiên, trên phiên bản mới hơn, vì giá trị là nullable, nó yêu cầu bạn kiểm tra, như sau: ```typescript let maybeValue = load() @@ -98,7 +98,7 @@ if (maybeValue) { } ``` -Or force it like this: +Hoặc buộc nó như thế này: ```typescript let maybeValue = load()! // breaks in runtime if value is null @@ -106,11 +106,11 @@ let maybeValue = load()! // breaks in runtime if value is null maybeValue.aMethod() ``` -If you are unsure which to choose, we recommend always using the safe version. If the value doesn't exist you might want to just do an early if statement with a return in you subgraph handler. +Nếu bạn không chắc nên chọn cái nào, chúng tôi khuyên bạn nên luôn sử dụng phiên bản an toàn. Nếu giá trị không tồn tại, bạn có thể chỉ muốn thực hiện câu lệnh if sớm với trả về trong trình xử lý subgraph của bạn. -### Variable Shadowing +### Variable Shadowing (Che khuất Biến) -Before you could do [variable shadowing](https://en.wikipedia.org/wiki/Variable_shadowing) and code like this would work: +Trước khi bạn có thể thực hiện [che biến](https://en.wikipedia.org/wiki/Variable_shadowing) và mã như thế này sẽ hoạt động: ```typescript let a = 10 @@ -118,7 +118,7 @@ let b = 20 let a = a + b ``` -However now this isn't possible anymore, and the compiler returns this error: +Tuy nhiên, bây giờ điều này không còn khả thi nữa và trình biên dịch trả về lỗi này: ```typescript ERROR TS2451: Cannot redeclare block-scoped variable 'a' @@ -128,11 +128,11 @@ ERROR TS2451: Cannot redeclare block-scoped variable 'a' in assembly/index.ts(4,3) ``` -You'll need to rename your duplicate variables if you had variable shadowing. +Bạn sẽ cần đổi tên các biến trùng lặp của mình nếu bạn có che biến. -### Null Comparisons +### So sánh Null -By doing the upgrade on your subgraph, sometimes you might get errors like these: +Bằng cách thực hiện nâng cấp trên subgraph của bạn, đôi khi bạn có thể gặp các lỗi như sau: ```typescript ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' is not assignable to type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt'. @@ -141,7 +141,7 @@ ERROR TS2322: Type '~lib/@graphprotocol/graph-ts/common/numbers/BigInt | null' i in src/mappings/file.ts(41,21) ``` -To solve you can simply change the `if` statement to something like this: +Để giải quyết, bạn có thể chỉ cần thay đổi câu lệnh `if` thành một cái gì đó như sau: ```typescript if (!decimals) { @@ -151,26 +151,26 @@ To solve you can simply change the `if` statement to something like this: if (decimals === null) { ``` -The same applies if you're doing != instead of ==. +Điều tương tự cũng áp dụng nếu bạn đang làm != Thay vì ==. -### Casting +### Ép kiểu (Casting) -The common way to do casting before was to just use the `as` keyword, like this: +Cách phổ biến để thực hiện ép kiểu trước đây là chỉ sử dụng từ khóa `as`, như sau: ```typescript let byteArray = new ByteArray(10) let uint8Array = byteArray as Uint8Array // equivalent to: byteArray ``` -However this only works in two scenarios: +Tuy nhiên, điều này chỉ hoạt động trong hai trường hợp: -- Primitive casting (between types such as `u8`, `i32`, `bool`; eg: `let b: isize = 10; b as usize`); -- Upcasting on class inheritance (subclass → superclass) +- Ép kiểu nguyên bản (giữa các kiểu như `u8`, `i32`, `bool`; ví dụ: `let b: isize = 10; b as usize`); +- Upcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) -Examples: +Các ví dụ: ```typescript -// primitive casting +// primitive casting (ép kiểu nguyên bản) let a: usize = 10 let b: isize = 5 let c: usize = a + (b as usize) @@ -184,10 +184,10 @@ let bytes = new Bytes(2) // bytes // same as: bytes as Uint8Array ``` -There are two scenarios where you may want to cast, but using `as`/`var` **isn't safe**: +Có hai trường hợp mà bạn có thể muốn ép kiểu, nhưng việc sử dụng `as`/`var` **không an toàn**: -- Downcasting on class inheritance (superclass → subclass) -- Between two types that share a superclass +- Downcasting về kế thừa lớp (lớp con → lớp cha) (subclass → superclass) +- Giữa hai loại chia sẻ lớp cha ```typescript // downcasting on class inheritance @@ -206,10 +206,10 @@ let bytes = new Bytes(2) // bytes // breaks in runtime :( ``` -For those cases, you can use the `changetype` function: +Đối với những trường hợp đó, bạn có thể sử dụng hàm `changetype`: ```typescript -// downcasting on class inheritance +// downcasting về kế thừa lớp class Bytes extends Uint8Array {} let uint8Array = new Uint8Array(2) @@ -225,10 +225,10 @@ let bytes = new Bytes(2) changetype(bytes) // works :) ``` -If you just want to remove nullability, you can keep using the `as` operator (or `variable`), but make sure you know that value can't be null, otherwise it will break. +Nếu bạn chỉ muốn loại bỏ khả năng vô hiệu, bạn có thể tiếp tục sử dụng toán tử `as` (hoặc `variable`), nhưng hãy đảm bảo rằng bạn biết rằng giá trị không được rỗng (null), nếu không nó sẽ bị vỡ. ```typescript -// remove nullability +// loại bỏ khả năng vô hiệu let previousBalance = AccountBalance.load(balanceId) // AccountBalance | null if (previousBalance != null) { @@ -238,25 +238,25 @@ if (previousBalance != null) { let newBalance = new AccountBalance(balanceId) ``` -For the nullability case we recommend taking a look at the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks), it will make your code cleaner 🙂 +Đối với trường hợp vô hiệu, chúng tôi khuyên bạn nên xem xét [tính năng kiểm tra khả năng vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), nó sẽ giúp mã của bạn sạch hơn 🙂 -Also we've added a few more static methods in some types to ease casting, they are: +Ngoài ra, chúng tôi đã thêm một vài phương thức tĩnh trong một số kiểu để dễ dàng ép kiểu, chúng là: - Bytes.fromByteArray - Bytes.fromUint8Array - BigInt.fromByteArray - ByteArray.fromBigInt -### Nullability check with property access +### Kiểm tra tính vô hiệu với quyền truy cập thuộc tính -To use the [nullability check feature](https://www.assemblyscript.org/basics.html#nullability-checks) you can use either `if` statements or the ternary operator (`?` and `:`) like this: +Để sử dụng [tính năng kiểm tra tính vô hiệu](https://www.assemblyscript.org/basics.html#nullability-checks), bạn có thể sử dụng câu lệnh `if` hoặc câu lệnh ba toán tử (`?` and `:`) như thế này: ```typescript let something: string | null = 'data' let somethingOrElse = something ? something : 'else' -// or +// hoặc let somethingOrElse @@ -267,7 +267,7 @@ if (something) { } ``` -However that only works when you're doing the `if` / ternary on a variable, not on a property access, like this: +Tuy nhiên, điều đó chỉ hoạt động khi bạn đang thực hiện `if` / ternary trên một biến, không phải trên quyền truy cập thuộc tính, như sau: ```typescript class Container { @@ -277,10 +277,10 @@ class Container { let container = new Container() container.data = 'data' -let somethingOrElse: string = container.data ? container.data : 'else' // doesn't compile +let somethingOrElse: string = container.data ? container.data : 'else' // không biên dịch ``` -Which outputs this error: +Đầu ra lỗi này: ```typescript ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/string/String'. @@ -289,7 +289,7 @@ ERROR TS2322: Type '~lib/string/String | null' is not assignable to type '~lib/s ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ``` -To fix this issue, you can create a variable for that property access so that the compiler can do the nullability check magic: +Để khắc phục sự cố này, bạn có thể tạo một biến cho quyền truy cập thuộc tính đó để trình biên dịch có thể thực hiện phép thuật kiểm tra tính nullability: ```typescript class Container { @@ -301,12 +301,12 @@ container.data = 'data' let data = container.data -let somethingOrElse: string = data ? data : 'else' // compiles just fine :) +let somethingOrElse: string = data ? data : 'else' // biên dịch tốt :) ``` -### Operator overloading with property access +### Quá tải toán tử với quyền truy cập thuộc tính -If you try to sum (for example) a nullable type (from a property access) with a non nullable one, the AssemblyScript compiler instead of giving a compile time error warning that one of the values is nullable, it just compiles silently, giving chance for the code to break at runtime. +Nếu bạn cố gắng tính tổng (ví dụ) một kiểu nullable (từ quyền truy cập thuộc tính) với một kiểu không thể nullable, trình biên dịch AssemblyScript thay vì đưa ra cảnh báo lỗi thời gian biên dịch rằng một trong các giá trị là nullable, nó chỉ biên dịch âm thầm, tạo cơ hội để mã bị phá vỡ trong thời gian chạy. ```typescript class BigInt extends Uint8Array { @@ -330,7 +330,7 @@ let wrapper = new Wrapper(y) wrapper.n = wrapper.n + x // doesn't give compile time errors as it should ``` -We've opened a issue on the AssemblyScript compiler for this, but for now if you do these kind of operations in your subgraph mappings, you should change them to do a null check before it. +Chúng tôi đã giải quyết vấn đề trên trình biên dịch AssemblyScript cho vấn đề này, nhưng hiện tại nếu bạn thực hiện các loại hoạt động này trong ánh xạ subgraph của mình, bạn nên thay đổi chúng để thực hiện kiểm tra rỗng trước nó. ```typescript let wrapper = new Wrapper(y) @@ -342,9 +342,9 @@ if (!wrapper.n) { wrapper.n = wrapper.n + x // now `n` is guaranteed to be a BigInt ``` -### Value initialization +### Khởi tạo giá trị -If you have any code like this: +Nếu bạn có bất kỳ mã nào như thế này: ```typescript var value: Type // null @@ -352,7 +352,7 @@ value.x = 10 value.y = 'content' ``` -It will compile but break at runtime, that happens because the value hasn't been initialized, so make sure your subgraph has initialized their values, like this: +Nó sẽ biên dịch nhưng bị hỏng trong thời gian chạy, điều đó xảy ra vì giá trị chưa được khởi tạo, vì vậy hãy đảm bảo rằng subgraph của bạn đã khởi tạo các giá trị của chúng, như sau: ```typescript var value = new Type() // initialized @@ -360,7 +360,7 @@ value.x = 10 value.y = 'content' ``` -Also if you have nullable properties in a GraphQL entity, like this: +Ngoài ra, nếu bạn có thuộc tính nullable trong thực thể GraphQL, như sau: ```graphql type Total @entity { @@ -369,7 +369,7 @@ type Total @entity { } ``` -And you have code similar to this: +Và bạn có mã tương tự như sau: ```typescript let total = Total.load('latest') @@ -381,7 +381,7 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -You'll need to make sure to initialize the `total.amount` value, because if you try to access like in the last line for the sum, it will crash. So you either initialize it first: +Bạn cần đảm bảo khởi tạo giá trị `total.amount`, bởi vì nếu bạn cố gắng truy cập như ở dòng cuối cùng cho tổng, nó sẽ bị lỗi. Vì vậy, bạn có thể khởi tạo nó trước: ```typescript let total = Total.load('latest') @@ -394,7 +394,7 @@ if (total === null) { total.tokens = total.tokens + BigInt.fromI32(1) ``` -Or you can just change your GraphQL schema to not use a nullable type for this property, then we'll initialize it as zero on the `codegen` step 😉 +Hoặc bạn chỉ có thể thay đổi lược đồ GraphQL của mình để không sử dụng kiểu nullable cho thuộc tính này, sau đó chúng tôi sẽ khởi tạo nó bằng 0 ở bước `codegen` 😉 ```graphql type Total @entity { @@ -413,9 +413,9 @@ if (total === null) { total.amount = total.amount + BigInt.fromI32(1) ``` -### Class property initialization +### Khởi tạo thuộc tính lớp -If you export any classes with properties that are other classes (declared by you or by the standard library) like this: +Nếu bạn xuất bất kỳ lớp nào có thuộc tính là các lớp khác (do bạn hoặc thư viện chuẩn khai báo) như thế này: ```typescript class Thing {} @@ -425,7 +425,7 @@ export class Something { } ``` -The compiler will error because you either need to add an initializer for the properties that are classes, or add the `!` operator: +Trình biên dịch sẽ bị lỗi vì bạn cần thêm bộ khởi tạo cho các thuộc tính là các lớp hoặc thêm toán tử `!`: ```typescript export class Something { @@ -513,12 +513,12 @@ type MyEntity @entity { This changed because of nullability differences between AssemblyScript versions, and it's related to the `src/generated/schema.ts` file (default path, you might have changed this). -### Other +### Khác -- Aligned `Map#set` and `Set#add` with the spec, returning `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) +- Căn chỉnh `Map#set` và `Set#add` với thông số kỹ thuật, trả về `this` ([v0.9.2](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.9.2)) - Arrays no longer inherit from ArrayBufferView, but are now distinct ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- Classes initialized from object literals can no longer define a constructor ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) -- The result of a `**` binary operation is now the common denominator integer if both operands are integers. Previously, the result was a float as if calling `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) -- Coerce `NaN` to `false` when casting to `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) -- When shifting a small integer value of type `i8`/`u8` or `i16`/`u16`, only the 3 respectively 4 least significant bits of the RHS value affect the result, analogous to the result of an `i32.shl` only being affected by the 5 least significant bits of the RHS value. Example: `someI8 << 8` previously produced the value `0`, but now produces `someI8` due to masking the RHS as `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) -- Bug fix of relational string comparisons when sizes differ ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) +- Các lớp được khởi tạo từ các ký tự đối tượng không còn có thể xác định một phương thức khởi tạo nữa ([v0.10.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.10.0)) +- Kết quả của phép toán nhị phân `**` bây giờ là số nguyên mẫu số chung nếu cả hai toán hạng đều là số nguyên. Trước đây, kết quả là một float như thể đang gọi `Math/f.pow` ([v0.11.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.11.0)) +- Ép buộc `NaN` thành `false` khi ép kiểu thành `bool` ([v0.14.9](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.14.9)) +- Khi dịch chuyển một giá trị số nguyên nhỏ của kiểu `i8`/`u8` hoặc `i16`/`u16`, chỉ 3 bit tương ứng 4 bit ít quan trọng nhất của giá trị RHS ảnh hưởng đến kết quả, tương tự như kết quả của một `i32.shl` chỉ bị ảnh hưởng bởi 5 bit ít quan trọng nhất của giá trị RHS. Ví dụ: `someI8 << 8` trước đây đã tạo ra giá trị `0`, nhưng bây giờ tạo ra`someI8` do che dấu RHS là `8 & 7 = 0` (3 bits) ([v0.17.0](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.0)) +- Sửa lỗi so sánh chuỗi quan hệ khi kích thước khác nhau ([v0.17.8](https://github.com/AssemblyScript/assemblyscript/releases/tag/v0.17.8)) diff --git a/website/pages/vi/substreams.mdx b/website/pages/vi/substreams.mdx index d0354f06bab1..2a06de8ac868 100644 --- a/website/pages/vi/substreams.mdx +++ b/website/pages/vi/substreams.mdx @@ -2,8 +2,43 @@ title: Substreams --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -Visit the [substreams documentation](https://substreams.streamingfast.io/) to learn more and to get started building substreams. +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/vi/sunrise.mdx b/website/pages/vi/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/vi/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/vi/tokenomics.mdx b/website/pages/vi/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/vi/tokenomics.mdx +++ b/website/pages/vi/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/yo/arbitrum/arbitrum-faq.mdx b/website/pages/yo/arbitrum/arbitrum-faq.mdx index 849d08c92b93..41f7ca653d30 100644 --- a/website/pages/yo/arbitrum/arbitrum-faq.mdx +++ b/website/pages/yo/arbitrum/arbitrum-faq.mdx @@ -2,77 +2,77 @@ title: Arbitrum FAQ --- -Click [here](#billing-on-arbitrum-faqs) if you would like to skip to the Arbitrum Billing FAQs. +Te [ibi yìí]\(#ìwé ọ̀rọ̀ lórí àwọn Ìbéèrè loorekoore tí Arbitrum) Tí ó ba fe jásí Àwọn ìbéèrè tí àwọn ènìyàn sáábà máa ń Béèrè Lórí Arbitrum. -## Why is The Graph implementing an L2 Solution? +## Kíni ìdí tí The Graph ń ṣé ìmúṣẹ ojútùú L2? -By scaling The Graph on L2, network participants can expect: +Nípa wí wọn The Graph lórí L2, àwọn olukopa nẹtiwọọki lè nírètí: -- Upwards of 26x savings on gas fees +- Ìṣirò ọ̀nà mẹ́rìndínlọ́gbọ̀n itulara lórí owo gaasi -- Faster transaction speed +- Iyara idunadura -- Security inherited from Ethereum +- Ààbò jíjógún lati ọdọ Ethereum -Scaling the protocol smart contracts onto L2 allows network participants to interact more frequently at a reduced cost in gas fees. For example, Indexers could open and close allocations to index a greater number of subgraphs with greater frequency, developers could deploy and update subgraphs with greater ease, Delegators could delegate GRT with increased frequency, and Curators could add or remove signal to a larger number of subgraphs–actions previously considered too cost-prohibitive to perform frequently due to gas. +Gidiwọn àwọn àdéhùn jíjáfáfá ìlànà lórí L2 ngbanilaaye àwọn olukopa nẹtiwọọki làti ṣé ajọṣepọ nígbà gbogbo ní ìdíyelé idinku nínú àwọn ìdíyelé gaasi. Fún àpẹẹrẹ, Àwọn alatọka lè kopa ninu ṣíṣí ati títí awọn ipin si atọ́ka nọmba ti o tóbi ju ti àwọn Subgrafu pẹlu igbohunsafẹfẹ nla, àwọn olupilẹṣẹ lè ràn lọ àti imudojuiwọn àwọn Subgrafu pẹ̀lú irọrun ńlá, Àwọn aṣojú lè ṣé aṣojú GRT pẹ̀lú igbohunsafẹfẹ ti o pọ si, ati Awọn olutọpa le ṣafikun tabi yọ ami ifihan kuro si nọmba nla ti awọn Subgrafu-awọn iṣe ti a ti ro tẹlẹ ju iye owo idinamọ lati ṣe nigbagbogbo nitori gaasi. -The Graph community decided to move forward with Arbitrum last year after the outcome of the [GIP-0031](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) discussion. +Agbegbe The Graph pinnu lati lọ siwaju pẹlu Arbitrum ni ọdun to kọja lẹhin abajade ti ijiroro [GIP-0031] \(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) -## What do I need to do to use The Graph on L2? +## Kini mo nilo lati se lati lo The Graph lori L2? -Users bridge their GRT and ETH  using one of the following methods: +Àwọn olumulo ṣé afárá GRT wọn àti ETH ní lilo ọkàn nínú àwọn ọnà wọ̀nyí: -- [The Graph Bridge on Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) -- [TransferTo](https://transferto.xyz/swap) -- [Connext Bridge](https://bridge.connext.network/) +- [Afara The Graph Lori Arbitrum](https://bridge.arbitrum.io/?l2ChainId=42161) +- [Gbe lọ si](https://transferto.xyz/swap) +- [Afara Connext](https://bridge.connext.network/) - [Hop Exchange](https://app.hop.exchange/#/send?token=ETH) -To take advantage of using The Graph on L2, use this dropdown switcher to toggle between chains. +Lati lo anfani ti lilo Bola lori L2, lo switcher dropdown yii lati yi laarin awọn ẹwọn. -![Dropdown switcher to toggle Arbitrum](/img/arbitrum-screenshot-toggle.png) +![Faa silẹ switcher lati yi Arbitrum](/img/arbitrum-screenshot-toggle.png) -## As a subgraph developer, data consumer, Indexer, Curator, or Delegator, what do I need to do now? +## Gẹgẹbi olupilẹṣẹ Subgrafu, Olumulo data, Atọka, Curator, tabi Aṣoju, kini Mo nilo lati ṣe ni bayi? -There is no immediate action required, however, network participants are encouraged to begin moving to Arbitrum to take advantage of the benefits of L2. +Ko si igbese lẹsẹkẹsẹ ti o nílò láti ṣe, sibẹsibẹ, awọn olukopa nẹtiwọọki ni iwuri lati bẹrẹ gbigbe si Arbitrum lati lo awọn anfani ti L2 -Core developer teams are working to create L2 transfer tools that will make it significantly easier to move delegation, curation, and subgraphs to Arbitrum. Network participants can expect L2 transfer tools to be available by summer of 2023. +Awọn ẹgbẹ olupilẹṣẹ imojuto n ṣiṣẹ lati ṣẹda awọn irinṣẹ gbigbe L2 ti yoo jẹ ki o rọrun pupọ lati gbe aṣoju, itọju, ati awọn ipin si Arbitrum. Awọn olukopa nẹtiwọki le nireti awọn irinṣẹ gbigbe L2 lati wa nipasẹ ooru ti 2023 -As of April 10th, 2023, 5% of all indexing rewards are being minted on Arbitrum. As network participation increases, and as the Council approves it, indexing rewards will gradually shift from Ethereum to Arbitrum, eventually moving entirely to Arbitrum. +Ni Oṣu Kẹrin Ọjọ Kẹ̀wá tí Ọdun 2023, 5% ti gbogbo awọn ere alatoka ni a nṣe lori Arbitrum. Bi ikopa nẹtiwọọki ti n pọ si, ati bi Igbimọ ṣe fọwọsi rẹ, awọn ere itọka yoo yipada ni kutukutu lati Ethereum si Arbitrum, nikẹhin gbigbe patapata si Arbitrum. -## If I would like to participate in the network on L2, what should I do? +## Ti èmi yóò fẹ́ láti kópa nínú nẹtiwọki lórí L2, kíni ó yẹ kí ń ṣé? -Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). +Jọwọ ṣe iranlọwọ [ṣe idanwo netiwọki](https://testnet.thegraph.com/explorer) lori L2 ki o jabo esi nipa iriri rẹ ni [Discord](https://discord.gg/graphprotocol). -## Are there any risks associated with scaling the network to L2? +## Ṣe awọn ewu eyikeyi wa ni nkan ṣe pẹlu iwọn nẹtiwọọki si L2? -All smart contracts have been thoroughly [audited](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). +Gbogbo awọn adehun ọlọgbọn ti jẹ daradara [ayewo pipe](https://github.com/graphprotocol/contracts/blob/dev/audits/OpenZeppelin/2022-07-graph-arbitrum-bridge-audit.pdf). -Everything has been tested thoroughly, and a contingency plan is in place to ensure a safe and seamless transition. Details can be found [here](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). +Ohùn gbogbo tí ní ìdánwò dáradára, àti pé èrò airotẹlẹ kàn wá ní ayé láti ríi dájú wípé ìyípadà ailewu àti ailẹgbẹ. Àwọn àlàyé lè ṣé rí [níbi yìí](https://forum.thegraph.com/t/gip-0037-the-graph-arbitrum-deployment-with-linear-rewards-minted-in-l2/3551#risks-and-security-considerations-20). -## Will existing subgraphs on Ethereum continue to work? +## Ǹjẹ́ àwọn Subgrafu tí o wà tẹlẹ lórí Ethereum yóò tẹsiwaju láti ṣiṣẹ́? -Yes, The Graph Network contracts will operate in parallel on both Ethereum and Arbitrum until moving fully to Arbitrum at a later date. +Bẹẹni, Àwọn àdéhùn Nẹtiwọọki lórí The Graph yóò ṣiṣẹ́ ní afiwe lórí méjèèjì Ethereum àti Arbitrum títí gbígbé ní kíkún sì Arbitrum ni ọjọ mìíràn. -## Will GRT have a new smart contract deployed on Arbitrum? +## Se GRT a ni awọn adehun ọlọgbọn ransogun lori Arbitrum? -Yes, GRT has an additional [smart contract on Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). However, the Ethereum mainnet [GRT contract](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) will remain operational. +Bẹẹni, GRT ni afikun [adehun ọlọgbọn lori Arbitrum](https://arbiscan.io/address/0x9623063377ad1b27544c965ccd7342f7ea7e88c7). Sibẹsibẹ, mainnet ti Ethereum[adehun GRT](https://etherscan.io/token/0xc944e90c64b2c07662a292be6244bdf05cda44a7) yoo wa ni ṣiṣiṣẹ. -## Billing on Arbitrum FAQs +## Awọn ibeere Nigbagbogbo nípa Ìdíyelé Lórí Arbitrum -## What do I need to do about the GRT in my billing balance? +## Kini mo nilo lati ṣe nipa GRT ni iwọntunwọnsi ìdíyelé mi? -Nothing! Your GRT has been securely migrated to Arbitrum and is being used to pay for queries as you read this. +Ko si nkankan! GRT rẹ ti lọ ni aabo si Arbitrum ati pe o nlo lati sanwo fun awọn ibeere bi o ṣe n ka eyi. -## How do I know my funds have migrated securely to Arbitrum? +## Báwo ní mo ṣé má mọ̀ pe àwọn owó mí tí lọ ní ààbò sì Arbitrum? -All GRT billing balances have already been successfully migrated to Arbitrum. You can view the billing contract on Arbitrum [here](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). +Gbogbo àwọn iwọntunwọnsi ìdíyelé GRT tí ṣàṣeyọrí tẹ́lẹ̀ lọ sí Arbitrum. Ó lè wò ìwé àdéhùn ìdíyelé lórí Arbitrum [níbi yìí](https://arbiscan.io/address/0x1B07D3344188908Fb6DEcEac381f3eE63C48477a). -## How do I know the Arbitrum bridge is secure? +## Báwo ní mó ṣé má mọ pé Afárá Arbitrum wá ní ààbò? -The bridge has been [heavily audited](https://code4rena.com/contests/2022-10-the-graph-l2-bridge-contest) to ensure safety and security for all users. +Afárá náà tí [ṣe ayẹwo pupọ](https://code4rena.com/contest/2022-10-the-graph-l2-bridge-contest) láti ríi dájú wípé ààbò tó péye tí wá fún gbogbo àwọn olumulo. -## What do I need to do if I'm adding fresh GRT from my Ethereum mainnet wallet? +## Kíni mo nílò láti ṣé tí mo bá n ṣafikun GRT tuntun lati apamọwọ mainnet Ethereum? -Adding GRT to your Arbitrum billing balance can be done with a one-click experience in [Subgraph Studio](https://thegraph.com/studio/). You'll be able to easily bridge your GRT to Arbitrum and fill your API keys in one transaction. +Ṣafikun GRT si iwọntunwọnsi ìdíyelé Arbitrum le ṣee ṣe pẹlu iriri titẹ ẹyọkan ni [Studio Subgrafu](https://thegraph.com/studio/). Iwọ yoo ni irọrun di GRT rẹ si Arbitrum ki o kun awọn bọtini API rẹ ni iṣowo kan. -Visit the [Billing page](https://thegraph.com/docs/en/billing/) for more detailed instructions on adding, withdrawing, or acquiring GRT. +Ṣabẹwo [oju-iwe ìdíyelé](https://thegraph.com/docs/en/billing/) fún àwọn ìlànà àlàyé díẹ̀ síi lórí fífi kún, yíyọ kúrò, tàbí gbígbà GRT. diff --git a/website/pages/yo/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/yo/arbitrum/l2-transfer-tools-faq.mdx index 356ce0ff6c47..c882f7db0bd8 100644 --- a/website/pages/yo/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/yo/arbitrum/l2-transfer-tools-faq.mdx @@ -2,314 +2,410 @@ title: L2 Transfer Tools FAQ --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +## General -## What are L2 Transfer Tools? +### Kini Awọn irinṣẹ Gbigbe L2? -The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. For each protocol participant, a set of transfer helpers will be shared to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## Can I use the same wallet I use on Ethereum mainnet? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. -If you are using an [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account) wallet you can use the same address. If your Ethereum mainnet wallet is a contract (e.g. a multisig) then you must specify an [Arbitrum wallet address](/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the-graph-on-l2) where your transfer will be sent. Please check the address carefully as any transfers to an incorrect address can result in permanent loss. If you'd like to use a multisig on L2, make sure you deploy a multisig contract on Arbitrum One. +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. -## Subgraph Transfer +### Ṣe Mo le lo apamọwọ kanna ti Mo lo lori mainnet Ethereum? -## How do I transfer my subgraph? +Ti o ba nlo apamọwọ [EOA] \(https://ethereum.org/en/developers/docs/accounts/#types-of-account) o le lo adirẹsi kanna. Ti apamọwọ mainnet Ethereum jẹ adehun (fun apẹẹrẹ multisig) lẹhinna o gbọdọ pato [adirẹsi apamọwọ Arbitrum] \(/arbitrum/arbitrum-faq/#what-do-i-need-to-do-to-use-the- graph-on-l2) nibiti gbigbe rẹ yoo ti firanṣẹ. Jọwọ ṣayẹwo adirẹsi naa ni pẹkipẹki nitori eyikeyi gbigbe si adirẹsi ti ko tọ le ja si isonu ayeraye. Ti o ba fẹ lati lo multisig lori L2, rii daju pe o ran adehun multisig kan lori Arbitrum Ọkan. -To transfer your subgraph, you will need to complete the following steps: +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. -1. Initiate the transfer on Ethereum mainnet +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. -2. Wait 20 minutes for confirmation +### Kini yoo ṣẹlẹ ti Emi ko ba pari gbigbe mi ni awọn ọjọ meje? -3. Confirm subgraph transfer on Arbitrum\* +Awọn Irinṣẹ Gbigbe L2 lo ẹrọ abinibi Arbitrum lati firanṣẹ awọn ifiranṣẹ lati L1 si L2. Ilana yii ni a pe ni “tiketi ti a tun le gbiyanju” ati pe gbogbo awọn afara ami abinibi lo, pẹlu Afara Arbitrum GRT. O le ka diẹ sii nipa awọn tikẹti atungbiyanju ninu [Arbitrum docs] \(https://docs.arbitrum.io/arbos/l1-to-l2-messaging). -4. Finish publishing subgraph on Arbitrum +Nigbati o ba gbe awọn ohun-ini rẹ (ipin-ilẹ, igi, aṣoju tabi itọju) si L2, ifiranṣẹ kan ni a firanṣẹ nipasẹ Afara Arbitrum GRT eyiti o ṣẹda tikẹti atunda ni L2. Ọpa gbigbe pẹlu diẹ ninu iye ETH ninu idunadura naa, ti o lo lati okan) sanwo lati ṣẹda tikẹti ati meji) sanwo fun gaasi lati ṣiṣẹ tikẹti ni L2. Bibẹẹkọ, nitori awọn idiyele gaasi le yatọ ni akoko titi tikẹti naa yoo ṣetan lati ṣiṣẹ ni L2, o ṣee ṣe pe igbiyanju ipaniyan adaṣe kuna. Nigbati iyẹn ba ṣẹlẹ, Afara Arbitrum yoo jẹ ki tikẹti atunwi laaye fun awọn ọjọ meje, ati pe ẹnikẹni le tun gbiyanju “rapada” tikẹti naa (eyiti o nilo apamọwọ pẹlu diẹ ninu ETH bridged si Arbitrum). -5. Update Query URL (recommended) +Eyi ni ohun ti a pe ni igbesẹ “Jẹrisi” ni gbogbo awọn irinṣẹ gbigbe - yoo ṣiṣẹ laifọwọyi ni ọpọlọpọ awọn ọran, nitori ipaniyan adaṣe nigbagbogbo ṣaṣeyọri, ṣugbọn o ṣe pataki pe ki o ṣayẹwo pada lati rii daju pe o kọja. Ti ko ba ṣaṣeyọri ati pe ko si awọn igbiyanju aṣeyọri ni awọn ọjọ meje, Afara Arbitrum yoo sọ tikẹti naa silẹ, ati pe awọn ohun-ini rẹ (alapaya, igi, aṣoju tabi itọju) yoo sọnu ati pe ko le gba pada. Awọn devs core Graph ni eto ibojuwo ni aye lati ṣawari awọn ipo wọnyi ati gbiyanju lati ra awọn tikẹti ṣaaju ki o to pẹ, ṣugbọn o jẹ ojuṣe rẹ nikẹhin lati rii daju pe gbigbe rẹ ti pari ni akoko. Ti o ba ni wahala lati jẹrisi idunadura rẹ, jọwọ de ọdọ ni lilo [fọọmu yii](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms) ati core devs yoo wa nibẹ ran o. + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + +## Gbigbe Subgraph + +### Bawo ni Mo ṣe gbe ipin ipin mi? + + + +Lati gbe subgraph rẹ, iwọ yoo nilo lati pari awọn igbesẹ wọnyi: + +1. Bẹrẹ gbigbe lori mainnet Ethereum + +2. Duro ogun iṣẹju fun ìmúdájú + +3. Jẹrisi gbigbe subgraph lori Arbitrum\* + +4. Pari titẹjade ipin lori Arbitrum + +5. Ṣe imudojuiwọn URL ibeere (a ṣeduro) \*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Where should I initiate my transfer from? +### Nibo ni Mo yẹ ki n bẹrẹ gbigbe mi lati? -You can initiate your transfer from the [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) or any subgraph details page. Click the "Transfer Subgraph" button in the subgraph details page to start the transfer. +O le pilẹṣẹ gbigbe rẹ lati [Subgraph Studio](https://thegraph.com/studio/), [Explorer,](https://thegraph.com/explorer) tabi eyikeyi oju-iwe awọn alaye ipin. Tẹ bọtini “Gbigbe lọ si ibomii” ni oju-iwe awọn alaye ipin lati bẹrẹ gbigbe. -## How long do I need to wait until my subgraph is transferred +### Bawo ni Mo nilo lati duro titi ti ipin mi yoo fi gbe lọ -The transfer time takes approximately 20 minutes. The Arbitrum bridge is working in the background to complete the bridge transfer automatically. In some cases, gas costs may spike and you will need to confirm the transaction again. +Akoko gbigbe gba to iṣẹju 20. Afara Arbitrum n ṣiṣẹ ni abẹlẹ lati pari gbigbe afara laifọwọyi. Ni awọn igba miiran, awọn idiyele gaasi le pọ si ati pe iwọ yoo nilo lati jẹrisi idunadura naa lẹẹkansi. -## Will my subgraph still be discoverable after I transfer it to L2? +### Njẹ ipin-ipin mi yoo tun jẹ awari lẹhin ti Mo gbe lọ si L2? -Your subgraph will only be discoverable on the network it is published to. For example, if your subgraph is on Arbitrum One, then you can only find it in Explorer on Arbitrum One and will not be able to find it on Ethereum. Please ensure that you have Arbitrum One selected in the network switcher at the top of the page to ensure you are on the correct network.  After the transfer, the L1 subgraph will appear as deprecated. +Ipin-ipin rẹ yoo jẹ awari nikan lori nẹtiwọki ti o ti gbejade si. Fun apẹẹrẹ, ti ipin rẹ ba wa lori Arbitrum Ọkan, lẹhinna o le rii nikan ni Explorer lori Arbitrum Ọkan ati pe kii yoo ni anfani lati rii lori Ethereum. Jọwọ rii daju pe o ni Arbitrum Ọkan ti a yan ni switcher nẹtiwọki ni oke oju-iwe lati rii daju pe o wa lori nẹtiwọki to pe. Lẹhin gbigbe, ipin L1 yoo han bi a ti parẹ. -## Does my subgraph need to be published to transfer it? +### Ṣe ipin mi nilo lati ṣe atẹjade lati gbe lọ bi? -To take advantage of the subgraph transfer tool, your subgraph must be already published to Ethereum mainnet and must have some curation signal owned by the wallet that owns the subgraph. If your subgraph is not published, it is recommended you simply publish directly on Arbitrum One - the associated gas fees will be considerably lower. If you want to transfer a published subgraph but the owner account hasn't curated any signal on it, you can signal a small amount (e.g. 1 GRT) from that account; make sure to choose "auto-migrating" signal. +Lati lo anfani ohun elo gbigbe subgraph, ipin rẹ gbọdọ wa ni atẹjade tẹlẹ si mainnet Ethereum ati pe o gbọdọ ni ifihan agbara mimu diẹ ninu ohun ini nipasẹ apamọwọ ti o ni ipin. Ti a ko ba ṣe atẹjade ipin-iwe rẹ, o gba ọ niyanju pe o kan tẹjade taara lori Arbitrum Ọkan - awọn idiyele gaasi ti o somọ yoo dinku pupọ. Ti o ba fẹ gbe ipin ti a tẹjade ṣugbọn akọọlẹ oniwun ko ti ṣe ami ifihan eyikeyi lori rẹ, o le ṣe ifihan agbara kekere kan (fun apẹẹrẹ Okan GRT) lati akọọlẹ yẹn; rii daju pe o yan ifihan “iṣilọ-laifọwọyi”. -## What happens to the Ethereum mainnet version of my subgraph after I transfer to Arbitrum? +### Kini yoo ṣẹlẹ si ẹya mainnet Ethereum ti subgraph mi lẹhin gbigbe si Arbitrum? -After transferring your subgraph to Arbitrum, the Ethereum mainnet version will be deprecated. We recommend you update your query URL within 48 hours. However, there is a grace period in place that keeps your mainnet URL functioning so that any third-party dapp support can be updated. +Ifipo ipin ipin rẹ lọ si Arbitrum, ẹya mainnet Ethereum yoo jẹ idinku. A ṣeduro pe ki o ṣe URL ibeere rẹ laarin awọn wakati ogoji mejidinlogobon. Sibẹsibẹ, akoko oore-ọfẹ wa ni aaye ti o jẹ ki URL mainnet rẹ iru ki eyikeyi atilẹyin agbara le ni irú. -## After I transfer, do I also need to re-publish on Arbitrum? +### Lẹhin gbigbe, ṣe Mo tun nilo lati tun gbejade lori Arbitrum? -After the 20 minute transfer window, you will need to confirm the transfer with a transaction in the UI to finish the transfer, but the transfer tool will guide you through this. Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +Lẹhin window gbigbe iṣẹju Ogun, iwọ yoo nilo lati jẹrisi gbigbe pẹlu idunadura kan ninu UI lati pari gbigbe, ṣugbọn ọpa gbigbe yoo ṣe itọsọna fun ọ nipasẹ eyi. Ipari ipari L1 rẹ yoo tẹsiwaju lati ni atilẹyin lakoko window gbigbe ati akoko oore lẹhin. A gba ọ niyanju pe ki o ṣe imudojuiwọn aaye ipari rẹ nigbati o rọrun fun ọ. -## Will there be a down-time to my endpoint while re-publishing? +### Will my endpoint experience downtime while re-publishing? -There should be no down time when using the transfer tool to move your subgraph to L2.Your L1 endpoint will continue to be supported during the transfer window and a grace period after. It is encouraged that you update your endpoint when convenient for you. +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## Is publishing and versioning the same on L2 as Ethereum Ethereum mainnet? +### Njẹ titẹjade ati ikede jẹ kanna lori L2 bi Ethereum Ethereum mainnet? -Yes. Be sure to select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## Will my subgraph's curation move with my subgraph? +### Njẹ arosọ ipin-ipin mi yoo gbe pẹlu ipin-ipin mi bi? -If you've chosen auto-migrating signal, 100% of your own curation will move with your subgraph to Arbitrum One. All of the subgraph's curation signal will be converted to GRT at the time of the transfer, and the GRT corresponding to your curation signal will be used to mint signal on the L2 subgraph. +Ti o ba ti yan ifihan agbara-iṣiwa-laifọwọyi, ogorun daji ti itọju tirẹ yoo gbe pẹlu ipin rẹ si Arbitrum Ọkan. Gbogbo ifihan agbara isọdi-ipin naa yoo yipada si GRT ni akoko gbigbe, ati pe GRT ti o baamu si ifihan agbara itọju rẹ yoo ṣee lo lati ifihan mint lori ipin L2. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. +Awọn olutọju miiran le yan boya lati yọkuro ida wọn ti GRT, tabi tun gbe lọ si L2 si ifihan agbara mint lori ipin kanna. -## Can I move my subgraph back to Ethereum mainnet after I transfer? +### Ṣe Mo le gbe ipin mi pada si mainnet Ethereum lẹhin gbigbe? -Once transferred, your Ethereum mainnet version of this subgraph will be deprecated. If you would like to move back to mainnet, you will need to redeploy and publish back to mainnet. However, transferring back to Ethereum mainnet is strongly discouraged as indexing rewards will eventually be distributed entirely on Arbitrum One. +Ni kete ti o ba ti gbe lọ, ẹya mainnet Ethereum rẹ ti ipin-ipin yii yoo jẹ idinku. Ti o ba fẹ lati pada si mainnet, iwọ yoo nilo lati tun gbejade ati gbejade pada si mainnet. Bibẹẹkọ, gbigbe pada si mainnet Ethereum jẹ irẹwẹsi pupọ bi awọn ere itọka yoo bajẹ pin kaakiri patapata lori Arbitrum Ọkan. -## Why do I need bridged ETH to complete my transfer? +### Kini idi ti MO nilo ETH afara lati pari gbigbe mi? -Gas fees on Arbitrum One are paid using bridged ETH (i.e. ETH that has been bridged to Arbitrum One). However, the gas fees are significantly lower when compared to Ethereum mainnet. +Awọn owo gaasi lori Arbitrum Ọkan ni a san ni lilo ETH ti o ni afara (ie ETH ti o ti di afara si Arbitrum Ọkan). Sibẹsibẹ, awọn idiyele gaasi dinku pupọ nigbati a bawe si mainnet Ethereum. -## Curation Signal +## Aṣoju -## How do I transfer my curation? +### Bawo ni Mo ṣe gbe aṣoju mi lọ? -To transfer your curation, you will need to complete the following steps: + -1. Initiate signal transfer on Ethereum mainnet +Lati gbe aṣoju rẹ, iwọ yoo nilo lati pari awọn igbesẹ wọnyi: -2. Specify an L2 Curator address\* +1. Pilẹṣẹ gbigbe aṣoju lori mainnet Ethereum +2. Duro ogun iṣẹju fun ìmúdájú +3. Jẹrisi gbigbe aṣoju lori Arbitrum -3. Wait 20 minutes for confirmation +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*If necessary - i.e. you are using a contract address. +### Kini yoo ṣẹlẹ si awọn ere mi ti Mo ba bẹrẹ gbigbe pẹlu ipin ṣiṣi lori mainnet Ethereum? -## How will I know if the subgraph I curated has moved to L2? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -When viewing the subgraph details page, a banner will notify you that this subgraph has been transferred. You can follow the prompt to transfer your curation. You can also find this information on the subgraph details page of any subgraph that has moved. +### Kini yoo ṣẹlẹ ti Atọka ti Mo ṣe aṣoju lọwọlọwọ si ko si lori Arbitrum Ọkan? -## What if I do not wish to move my curation to L2? +Ohun elo gbigbe L2 yoo ṣiṣẹ nikan ti Atọka ti o ti fi ranṣẹ si ti gbe igi tiwọn lọ si Arbitrum. -When a subgraph is deprecated you have the option to withdraw your signal. Similarly, if a subgraph has moved to L2, you can choose to withdraw your signal in Ethereum mainnet or send the signal to L2. +### Njẹ awọn aṣoju ni aṣayan lati ṣe aṣoju si Atọka miiran bi? -## How do I know my curation successfully transferred? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -Signal details will be accessible via Explorer approximately 20 minutes after the L2 transfer tool is initiated. +### Kini ti Emi ko ba le rii Atọka ti Mo n ṣe aṣoju si L2? -## Can I transfer my curation on more than one subgraph at a time? +Ohun elo gbigbe L2 yoo ṣe awari Atọka ti o ti fi ranṣẹ tẹlẹ si. -There is no bulk transfer option at this time. +### Ṣe Emi yoo ni anfani lati dapọ ati baramu tabi 'tan' awọn aṣoju mi kọja tuntun tabi pupọ Awọn atọka dipo Atọka iṣaaju bi? -## Indexer Stake +Ọpa gbigbe L2 yoo ma gbe aṣoju rẹ nigbagbogbo si Atọka kanna ti o fi ranṣẹ si iṣaaju. Ni kete ti o ba ti lọ si L2, o le yọkuro, duro fun akoko thawing, ki o pinnu boya o fẹ lati pin awọn aṣoju rẹ. -## How do I transfer my stake to Arbitrum? +### Ṣe Mo wa labẹ akoko itutu agbaiye tabi ṣe Mo le yọkuro lẹsẹkẹsẹ lẹhin lilo ohun elo gbigbe aṣoju L2? -To transfer your stake, you will need to complete the following steps: +Ọpa gbigbe gba ọ laaye lati gbe lọ si L2 lẹsẹkẹsẹ. Ti o ba fẹ lati yọkuro iwọ yoo ni lati duro fun akoko thawing naa. Bibẹẹkọ, ti Atọka ba ti gbe gbogbo igi wọn si L2, o le yọkuro lori mainnet Ethereum lẹsẹkẹsẹ. -1. Initiate stake transfer on Ethereum mainnet +### Njẹ awọn ere mi le ni ipa ni odi ti Emi ko ba gbe aṣoju mi lọ? -2. Wait 20 minutes for confirmation +O ti wa ni ifojusọna pe gbogbo ikopa nẹtiwọki yoo gbe lọ si Arbitrum Ọkan ni ojo iwaju. -3. Confirm stake transfer on Arbitrum +### Igba melo ni o gba lati pari gbigbe ti aṣoju mi si L2? -\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Will all of my stake transfer? +### Ṣe MO le gbe awọn aṣoju mi ti MO ba nlo iwe adehun igbẹkẹle GRT/apamọwọ titiipa ami? -You can choose how much of your stake to transfer. If you choose to transfer all of your stake at once, you will need to close any open allocations first. +Bẹẹni! Ilana naa yatọ si diẹ nitori awọn iwe adehun gbigbe ko le firanṣẹ ETH ti o nilo lati sanwo fun gaasi L2, nitorinaa o nilo lati fi sii tẹlẹ. Ti o ko ba ni iwe adehun ni kikun, iwọ yoo tun ni lati kọkọ kọkọ iwe adehun ifasilẹ ẹlẹgbẹ kan lori L2 ati pe yoo ni anfani lati gbe aṣoju lọ si iwe adehun isọdọtun L2 yii. UI lori Explorer le ṣe amọna rẹ nipasẹ ilana yii nigbati o ba ti sopọ si Explorer nipa lilo apamọwọ titiipa vesting. -If you plan on transferring parts of your stake over multiple transactions, you must always specify the same beneficiary address. +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -Note: You must meet the minimum stake requirements on L2 the first time you use the transfer tool. Indexers must send the minimum 100k GRT (when calling this function the first time). If leaving a portion of stake on L1, it must also be over the 100k GRT minimum and be sufficient (together with your delegations) to cover your open allocations. +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## How much time do I have to confirm my stake transfer to Arbitrum? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\* You must confirm your transaction to complete the stake transfer on Arbitrum. This step must be completed within 7 days or stake could be lost. +### Ṣe owo-ori aṣoju eyikeyi wa? -## What if I have open allocations? +Rara Awọn ami-ami ti o gba lori L2 ni a fi ranṣẹ si Atọka pàtó kan fun aṣoju aṣoju ti a ti sọ tẹlẹ laisi gbigba owo-ori aṣoju kan. -If you are not sending all of your stake, the L2 transfer tool will validate that at least the minimum 100k GRT remains in Ethereum mainnet and your remaining stake and delegation is enough to cover any open allocations. You may need to close open allocations if your GRT balance does not cover the minimums + open allocations. +### Will my unrealized rewards be transferred when I transfer my delegation? -## Using the transfer tools, is it necessary to wait 28 days to unstake on Ethereum mainnet before transferring? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -No, you can transfer your stake to L2 immediately, there's no need to unstake and wait before using the transfer tool. The 28-day wait only applies if you'd like to withdraw the stake back to your wallet, on Ethereum mainnet or L2. +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## How long will it take to transfer my stake? +### Is moving delegations to L2 mandatory? Is there a deadline? -It will take approximately 20 minutes for the L2 transfer tool to complete transferring your stake. +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## Do I have to index on Arbitrum before I transfer my stake? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -You can effectively transfer your stake first before setting up indexing, but you will not be able to claim any rewards on L2 until you allocate to subgraphs on L2, index them, and present POIs. +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## Can Delegators move their delegation before I move my indexing stake? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -No, in order for Delegators to transfer their delegated GRT to Arbitrum, the Indexer they are delegating to must be active on L2. +### I don't see a button to transfer my delegation. Why is that? -## Can I transfer my stake if I'm using a GRT vesting contract / token lock wallet? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -Yes! The process is a bit different, because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the stake to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## Delegation +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## How do I transfer my delegation? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -To transfer your delegation, you will need to complete the following steps: +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -1. Initiate delegation transfer on Ethereum mainnet +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. -2. Wait 20 minutes for confirmation +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. -3. Confirm delegation transfer on Arbitrum +## Ifihan agbara Curation -\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +### Bawo ni Mo ṣe gbe itọju mi ​​lọ? -## What happens to my rewards if I initiate a transfer with an open allocation on Ethereum mainnet? +Lati gbe itọju rẹ, iwọ yoo nilo lati pari awọn igbesẹ wọnyi: -If the indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your indexer(s), consider discussing with them to find the best time to do your transfer. +1. Bẹrẹ gbigbe ifihan agbara lori mainnet Ethereum -## What happens if the Indexer I currently delegate to isn't on Arbitrum One? +2. Pato adirẹsi L2 Curator kan \* -The L2 transfer tool will only be enabled if the Indexer you have delegated to has transferred their own stake to Arbitrum. +3. Duro ogun iṣẹju fun ìmúdájú -## Do Delegators have the option to delegate to another Indexer? +\*Ti o ba jẹ dandan - ti o je o nlo adirẹsi adehun kan. -If you wish to delegate to another Indexer, you can transfer to the same indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. +### Bawo ni Mo ṣe mọ boya ipin ti mo ti ṣabọ ti gbe lọ si L2? -## What if I can't find the Indexer I'm delegating to on L2? +Nigbati o ba n wo oju-iwe awọn alaye ipin, asia kan yoo sọ fun ọ pe a ti gbe ipin-ipin yii lọ. O le tẹle itọka naa lati gbe itọju rẹ. O tun le wa alaye yii lori oju-iwe awọn alaye ipin-iwe ti eyikeyi ipin ti o ti gbe. -The L2 transfer tool will automatically detect the Indexer you previously delegated to. +### Kini ti Emi ko ba fẹ gbe itọju mi si L2? -## Will I be able to mix and match or 'spread' my delegation across new or several Indexers instead of the prior Indexer? +Nigbati subgraph ti wa ni idinku o ni aṣayan lati yọọ ifihan agbara rẹ kuro. Bakanna, ti ipin kan ba ti lọ si L2, o le yan lati yọ ami ifihan rẹ kuro ni mainnet Ethereum tabi fi ami naa ranṣẹ si L2. -The L2 transfer tool will always move your delegation to the same Indexer you delegated to previously. Once you have moved to L2, you can undelegate, wait for the thawing period, and decide if you'd like to split up your delegation. +### Bawo ni Mo ṣe mọ pe itọju mi ​​ni gbigbe ni aṣeyọri? -## Am I subject to the cooldown period or can I withdraw immediately after using the L2 delegation transfer tool? +Awọn alaye ifihan yoo wa nipasẹ Explorer ni isunmọ awọn iṣẹju ogun lẹhin ti o ti bẹrẹ irinṣẹ gbigbe L2. -The transfer tool allows you to immediately move to L2. If you would like to undelegate you will have to wait for the thawing period. However, if an Indexer has transferred all of their stake to L2, you can withdraw on Ethereum mainnet immediately. +### Ṣe Mo le gbe iṣojukọ mi lori diẹ ẹ sii ju ipin-ipin kan ni akoko kan? -## Can my rewards be negatively impacted if I do not transfer my delegation? +Ko si aṣayan gbigbe lọpọlọpọ ni akoko yii. -It is anticipated that all network participation will move to Arbitrum One in the future. +## Atọka Indexer -## How long does it take to complete the transfer of my delegation to L2? +### Bawo ni Mo ṣe gbe igi mi si Arbitrum? -A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). +> AlAIgBA: Ti o ba n ṣipaya eyikeyi apakan ti GRT rẹ lọwọlọwọ lori Atọka rẹ, iwọ kii yoo ni anfani lati lo Awọn irinṣẹ Gbigbe L2. + + + +Lati gbe igi rẹ, iwọ yoo nilo lati pari awọn igbesẹ wọnyi: + +1. Bẹrẹ gbigbe igi lori mainnet Ethereum + +2. Duro ogun iṣẹju fun ìmúdájú + +3. Jẹrisi gbigbe igi lori Arbitrum + +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). + +### Ṣe gbogbo igi mi yoo gbe lọ? + +O le yan iye owo-ori rẹ lati gbe lọ. Ti o ba yan lati gbe gbogbo igi rẹ lọ ni ẹẹkan, iwọ yoo nilo lati tii eyikeyi awọn ipin ṣiṣi silẹ ni akọkọ. + +Ti o ba gbero lori gbigbe awọn apakan ti igi rẹ lori awọn iṣowo lọpọlọpọ, o gbọdọ pato adirẹsi alanfani kanna nigbagbogbo. -## Can I transfer my delegation if I'm using a GRT vesting contract/token lock wallet? +Akiyesi: O gbọdọ pade awọn ibeere igi to kere julọ lori L2 ni igba akọkọ ti o lo ọpa gbigbe. Awọn atọka gbọdọ firanṣẹ GRT ẹgbẹrun lọna ọgọrun ti o kere ju (nigbati o ba pe iṣẹ yii ni igba akọkọ). Ti o ba lọ kuro ni ipin ti igi lori L1, o tun gbọdọ jẹ lori ẹgbẹrun lọna ọgọrun GRT o kere ju ati pe o to (paapọ pẹlu awọn aṣoju rẹ) lati bo awọn ipin ṣiṣi rẹ. -Yes! The process is a bit different because vesting contracts can't forward the ETH needed to pay for the L2 gas, so you need to deposit it beforehand. If your vesting contract is not fully vested, you will also have to first initialize a counterpart vesting contract on L2 and will only be able to transfer the delegation to this L2 vesting contract. The UI on Explorer can guide you through this process when you've connected to Explorer using the vesting lock wallet. +### Elo akoko ni Mo ni lati jẹrisi gbigbe igi mi si Arbitrum? -## Is there any delegation tax? +\*\*\* O gbọdọ jẹrisi iṣowo rẹ lati pari gbigbe gbigbe lori Arbitrum. Igbesẹ yii gbọdọ pari laarin awọn ọjọ ogun tabi igi le padanu. -No. Received tokens on L2 are delegated to the specified Indexer on behalf of the specified Delegator without charging a delegation tax. +### Ti Mo ba ni awọn ipin ṣiṣi? -## Vesting Contract Transfer +Ti o ko ba firanṣẹ gbogbo awọn igi rẹ, ohun elo gbigbe L2 yoo fọwọsi pe o kere ju ẹgbẹrun lọna ọgọrun GRT ti o wa ninu mainnet Ethereum ati igi ti o ku ati aṣoju rẹ ti to lati bo eyikeyi awọn ipin ṣiṣi. O le nilo lati tii awọn ipin ṣiṣi silẹ ti iwọntunwọnsi GRT rẹ ko ba bo awọn ipin to kere julọ + awọn ipin ṣiṣi. -## How do I transfer my vesting contract? +### Lilo awọn irinṣẹ gbigbe, ṣe o jẹ dandan lati duro awọn ọjọ I lati yọkuro lori mainnet Ethereum ṣaaju gbigbe? -To transfer your vesting, you will need to complete the following steps: +Rara, o le gbe igi rẹ lọ si L2 lẹsẹkẹsẹ, ko si iwulo lati yọkuro ati duro ṣaaju lilo irinṣẹ gbigbe. Idaduro ọjọ kejidinlọgbọn kan nikan ti o ba fẹ lati yọkuro igi pada si apamọwọ rẹ, lori mainnet Ethereum tabi L2. -1. Initiate the vesting transfer on Ethereum mainnet +### Igba melo ni yoo gba lati gbe igi mi lọ? -2. Wait 20 minutes for confirmation +Yoo gba to iṣẹju ogun fun ohun elo gbigbe L2 lati pari gbigbe igi rẹ. -3. Confirm vesting transfer on Arbitrum +### Ṣe Mo ni lati ṣe atọka lori Arbitrum ṣaaju ki Mo to gbe igi mi? -## How do I transfer my vesting contract if I am only partially vested? +O le gbe igi rẹ ni imunadoko ṣaaju ki o to ṣeto titọka, ṣugbọn iwọ kii yoo ni anfani lati beere eyikeyi awọn ere lori L2 titi iwọ o fi pin si awọn ipin-ipin lori L2, atọka wọn, ati POI lọwọlọwọ. -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### Njẹ awọn aṣoju le gbe aṣoju wọn ṣaaju ki Mo to gbe igi atọka mi bi? -2. Send some locked GRT through the transfer tool contract, to L2 to initialize the L2 vesting lock. This will also set their L2 beneficiary address. +Rara, ni ibere fun Awọn aṣoju lati gbe GRT ti o jẹ aṣoju wọn lọ si Arbitrum, Atọka ti wọn nfiranṣẹ gbọdọ jẹ lọwọ lori L2. -3. Send their stake/delegation to L2 through the "locked" transfer tool functions in the L1Staking contract. +### Ṣe MO le gbe igi mi ti MO ba nlo iwe adehun GRT kan / apamọwọ titiipa ami? -4. Withdraw any remaining ETH from the transfer tool contract +Bẹẹni! Ilana naa yatọ si diẹ, nitori awọn iwe adehun gbigbe ko le firanṣẹ ETH ti o nilo lati sanwo fun gaasi L2, nitorinaa o nilo lati fi sii tẹlẹ. Ti o ko ba ni iwe adehun ni kikun, iwọ yoo tun ni lati kọkọ kọkọ iwe adehun ifasilẹ ẹlẹgbẹ kan lori L2 ati pe yoo ni anfani lati gbe igi naa si iwe adehun isọdọtun L2 yii. UI lori Explorer le ṣe amọna rẹ nipasẹ ilana yii nigbati o ba ti sopọ si Explorer nipa lilo apamọwọ titiipa vesting. -## How do I transfer my vesting contract if I am fully vested? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -For those that are fully vested, the process is similar: +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ -1. Deposit some ETH into the transfer tool contract (UI can help estimate a reasonable amount) +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? -2. Set your L2 address with a call to the transfer tool contract +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. -3. Send your stake/delegation to L2 through the "locked" transfer tool functions in the L1 Staking contract. +## Vesting Adehun Gbigbe -4. Withdraw any remaining ETH from the transfer tool contract +### Bawo ni Mo ṣe gbe iwe adehun gbigbe mi? -## Can I transfer my vesting contract to Arbitrum? +Lati gbe gbigbe rẹ, iwọ yoo nilo lati pari awọn igbesẹ wọnyi: -You can transfer your vesting contract's GRT balance to a vesting contract in L2. This is a prerequisite for transferring stake or delegation from your vesting contract to L2. The vesting contract must hold a nonzero amount of GRT (you can transfer a small amount like 1 GRT to it if needed). +1. Bẹrẹ gbigbe gbigbe lori ipilẹ Ethereum -When you transfer GRT from your L1 vesting contract to L2, you can choose the amount to send and you can do this as many times as you like. The L2 vesting contract will be initialized the first time you transfer GRT. +2. Duro ogun iṣẹju fun ìmúdájú -The transfers are done using a Transfer Tool that will be visible on your Explorer profile when you connect with the vesting contract account. +3. Jẹrisi gbigbe gbigbe lori Arbitrum -Please note that you will not be able to release/withdraw GRT from the L2 vesting contract until the end of your vesting timeline when your contract is fully vested. If you need to release GRT before then, you can transfer the GRT back to the L1 vesting contract using another transfer tool that is available for that purpose. +### Bawo ni Mo ṣe gbe iwe adehun gbigbe mi ti o ba ni ẹtọ ni apakan nikan? -If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. + -## I'm using my vesting contract to stake on mainnet. Can I transfer my stake to Arbitrum? +1. Fi diẹ ninu ETH sinu adehun ohun elo gbigbe (UI le ṣe iranlọwọ ṣe iṣiro iye to tọ) -Yes, but if your contract is still vesting, you can only transfer the stake so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your stake to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +2. Firanṣẹ diẹ ninu awọn GRT titiipa nipasẹ iwe adehun irinṣẹ gbigbe, si L2 lati ṣe ipilẹṣẹ titiipa aṣọ-ikele L2. Eyi yoo tun ṣeto adirẹsi alanfani L2 wọn. -## I'm using my vesting contract to delegate on mainnet. Can I transfer my delegations to Arbitrum? +3. Firanṣẹ igi/aṣoju wọn si L2 nipasẹ awọn iṣẹ irinṣẹ gbigbe “titiipa” ni adehun L1Staking. -Yes, but if your contract is still vesting, you can only transfer the delegation so that it is owned by your L2 vesting contract. You must first initialize this L2 contract by transferring some GRT balance using the vesting contract transfer tool on Explorer. If your contract is fully vested, you can transfer your delegation to any address in L2, but you must set it beforehand and deposit some ETH for the L2 transfer tool to pay for L2 gas. +4. Yọọ kuro eyikeyi ETH ti o ku lati inu adehun irinṣẹ gbigbe -## Can I specify a different beneficiary for my vesting contract on L2? +### Bawo ni Mo ṣe gbe iwe adehun gbigbe mi ti o ba ni ẹtọ ni kikun? -Yes, the first time you transfer a balance and set up your L2 vesting contract, you can specify an L2 beneficiary. Make sure this beneficiary is a wallet that can perform transactions on Arbitrum One, i.e. it must be an EOA or a multisig deployed to Arbitrum One. + -If your contract is fully vested, you will not set up a vesting contract on L2; instead, you will set an L2 wallet address and this will be the receiving wallet for your stake or delegation on Arbitrum. +Fun awọn ti o ni ẹtọ ni kikun, ilana naa jẹ iru: -## My contract is fully vested. Can I transfer my stake or delegation to another address that is not an L2 vesting contract? +1. Fi diẹ ninu ETH sinu adehun ohun elo gbigbe (UI le ṣe iranlọwọ ṣe iṣiro iye to tọ) -Yes. If you haven't transferred any vesting contract balance to L2, and your vesting contract is fully vested, you should not transfer your vesting contract to L2. Instead, you can use the transfer tools to set an L2 wallet address, and directly transfer your stake or delegation to this regular wallet on L2. +2. Ṣeto adirẹsi L2 rẹ pẹlu ipe si adehun ohun elo gbigbe -This allows you to transfer your stake or delegation to any L2 address. +3. Firanṣẹ igi/aṣoju rẹ si L2 nipasẹ awọn iṣẹ irinṣẹ gbigbe “titiipa” ni adehun L1 Staking. -## My vesting contract is still vesting. How do I transfer my vesting contract balance to L2? +4. Yọọ kuro eyikeyi ETH ti o ku lati inu adehun irinṣẹ gbigbe -These steps only apply if your contract is still vesting, or if you've used this process before when your contract was still vesting. +### Ṣe Mo le gbe iwe adehun ifọwọsi mi si Arbitrum? -To transfer your vesting contract to L2, you will send any GRT balance to L2 using the transfer tools, which will initialize your L2 vesting contract: +O le gbe iwọntunwọnsi GRT iwe adehun gbigbe rẹ si iwe adehun ifọwọyi ni L2. Eyi jẹ ohun pataki ṣaaju fun gbigbe igi tabi aṣoju lati iwe adehun ifọwọyi si L2. Adehun iwe adehun gbọdọ mu iye ti kii ṣe odo ti GRT (o le gbe iye kekere kan bi okan GRT si ti o ba nilo). -1. Deposit some ETH into the transfer tool contract (this will be used to pay for L2 gas) +Nigbati o ba gbe GRT lati inu iwe adehun gbigbe L1 rẹ si L2, o le yan iye lati firanṣẹ ati pe o le ṣe eyi ni ọpọlọpọ igba bi o ṣe fẹ. Iwe adehun gbigbe L2 yoo wa ni ipilẹṣẹ ni igba akọkọ ti o ba gbe GRT. -2. Revoke protocol access to the vesting contract (needed for the next step) +Awọn gbigbe naa ni lilo Ọpa Gbigbe kan ti yoo han lori profaili Explorer rẹ nigbati o ba sopọ pẹlu iwe adehun iwe adehun. -3. Give protocol access to the vesting contract (will allow your contract to interact with the transfer tool) +Jọwọ ṣakiyesi pe iwọ kii yoo ni anfani lati tu silẹ/yọkuro GRT lati inu iwe adehun ifọwọyi L2 titi di opin akoko akoko isọwo rẹ nigbati adehun rẹ ti ni kikun. Ti o ba nilo lati tu GRT silẹ ṣaaju lẹhinna, o le gbe GRT pada si iwe adehun vesting L1 nipa lilo ohun elo gbigbe miiran ti o wa fun idi yẹn. -4. Specify an L2 beneficiary address\* and initiate the balance transfer on Ethereum mainnet +Ti o ko ba ti gbe iwọntunwọnsi iwe adehun gbigbe eyikeyi si L2, ati pe iwe adehun gbigbe rẹ ti ni kikun, o yẹ ki o ko gbe iwe adehun gbigbe si L2. Dipo, o le lo awọn irinṣẹ gbigbe lati ṣeto adirẹsi apamọwọ L2 kan, ati gbe igi tabi aṣoju rẹ taara si apamọwọ deede yii lori L2. -5. Wait 20 minutes for confirmation +### Mo n lo iwe adehun ifọwọsi mi lati ṣe igi lori mainnet. Ṣe Mo le gbe igi mi si Arbitrum? -6. Confirm the balance transfer on L2 +Bẹẹni, ṣugbọn ti iwe adehun rẹ ba tun jẹ ifasilẹ, o le gbe igi naa nikan ki o jẹ ohun ini nipasẹ iwe adehun gbigbe L2 rẹ. O gbọdọ kọkọ kọ iwe adehun L2 yii nipa gbigbe iwọntunwọnsi GRT diẹ nipa lilo ohun elo gbigbe adehun vesting lori Explorer. Ti adehun rẹ ba ni kikun, o le gbe igi rẹ si eyikeyi adirẹsi ni L2, ṣugbọn o gbọdọ ṣeto tẹlẹ ki o fi diẹ ninu ETH fun ohun elo gbigbe L2 lati sanwo fun gaasi L2. -\*If necessary - i.e. you are using a contract address. +### Mo n lo iwe adehun ifọwọsi mi lati ṣe aṣoju lori mainnet. Ṣe Mo le gbe awọn aṣoju mi lọ si Arbitrum? + +Bẹẹni, ṣugbọn ti iwe adehun rẹ ba tun jẹ ifasilẹ, o le gbe aṣoju naa nikan ki o jẹ ohun ini nipasẹ iwe adehun vesting L2 rẹ. O gbọdọ kọkọ kọ iwe adehun L2 yii nipa gbigbe iwọntunwọnsi GRT diẹ nipa lilo ohun elo gbigbe adehun vesting lori Explorer. Ti adehun rẹ ba ni kikun, o le gbe aṣoju rẹ si eyikeyi adirẹsi ni L2, ṣugbọn o gbọdọ ṣeto tẹlẹ ki o fi diẹ ninu ETH fun ohun elo gbigbe L2 lati sanwo fun gaasi L2. + +### Ṣe Mo le ṣe pato alanfani ti o yatọ fun iwe adehun iṣojukọ mi lori L2? + +Bẹẹni, ni igba akọkọ ti o ba gbe iwọntunwọnsi kan ati ṣeto iwe adehun isọdọtun L2 rẹ, o le pato alanfani L2 kan. Rii daju pe alanfani yii jẹ apamọwọ ti o le ṣe awọn iṣowo lori Arbitrum Ọkan, ie o gbọdọ jẹ EOA tabi multisig ti a fi ranṣẹ si Arbitrum Ọkan. + +Ti o ba jẹ pe adehun rẹ ni kikun, iwọ kii yoo ṣeto adehun gbigbe lori L2; dipo, iwọ yoo ṣeto adirẹsi apamọwọ L2 kan ati pe eyi yoo jẹ apamọwọ gbigba fun igi tabi aṣoju rẹ lori Arbitrum. + +### Iwe adehun mi ni kikun. Ṣe Mo le gbe igi mi tabi aṣoju si adirẹsi miiran ti kii ṣe iwe adehun gbigbe L2? + +Bẹẹni. Ti o ko ba ti gbe iwọntunwọnsi iwe adehun gbigbe eyikeyi si L2, ati pe iwe adehun gbigbe rẹ ti ni kikun, o yẹ ki o ko gbe iwe adehun gbigbe si L2. Dipo, o le lo awọn irinṣẹ gbigbe lati ṣeto adirẹsi apamọwọ L2 kan, ati gbe igi tabi aṣoju rẹ taara si apamọwọ deede yii lori L2. + +Eyi n gba ọ laaye lati gbe igi tabi aṣoju rẹ si eyikeyi adirẹsi L2. + +### Iwe adehun ibọwọ mi tun n gbe. Bawo ni Mo ṣe gbe iwọntunwọnsi adehun vesting mi si L2? + +Awọn igbesẹ wọnyi kan nikan ti iwe adehun rẹ ba tun jẹ idawọle, tabi ti o ba ti lo ilana yii ṣaaju nigba ti adehun rẹ tun n gbe. + +Lati gbe iwe adehun gbigbe rẹ lọ si L2, iwọ yoo fi iwọntunwọnsi GRT eyikeyi ranṣẹ si L2 ni lilo awọn irinṣẹ gbigbe, eyiti yoo ṣe ipilẹṣẹ adehun isọdọtun L2 rẹ: + +1. Fi diẹ ninu ETH sinu adehun irinṣẹ gbigbe (eyi yoo ṣee lo lati sanwo fun gaasi L2) + +2. Fagilee iraye si ilana si iwe adehun ifọwọyi (nilo fun igbesẹ ti nbọ) + +3. Fun ni iraye si ilana si iwe adehun gbigbe (yoo gba adehun rẹ laaye lati ṣe ajọṣepọ pẹlu ohun elo gbigbe) + +4. Pato adirẹsi alanfani L2 kan \* ki o bẹrẹ gbigbe iwọntunwọnsi lori mainnet Ethereum + +5. Duro ogun iṣẹju fun ìmúdájú + +6. Jẹrisi gbigbe iwọntunwọnsi lori L2 + +\*Ti o ba jẹ dandan - ti o je o nlo adirẹsi adehun kan. \*\*\*\*You must confirm your transaction to complete the balance transfer on Arbitrum. This step must be completed within 7 days or the balance could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## Can I move my vesting contract back to L1? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### Ṣe Mo le gbe iwe adehun igbẹ mi pada si L1? -There is no need to do so because your vesting contract is still in L1. When you use the transfer tools, you just create a new contract in L2 that is connected with your L1 vesting contract, and you can send GRT back and forth between the two. +Ko si iwulo lati ṣe bẹ nitori pe iwe adehun gbigbe rẹ tun wa ni L1. Nigbati o ba lo awọn irinṣẹ gbigbe, ti o kan ṣẹda titun kan guide ni L2 ti o ti wa ni ti sopọ pẹlu rẹ L1 vesting guide, ati awọn ti o le fi GRT pada ati siwaju laarin awọn meji. -## Why do I need to move my vesting contract to begin with? +### Kini idi ti Mo nilo lati gbe iwe adehun isọdọtun mi lati bẹrẹ pẹlu? -You need to set up an L2 vesting contract so that this account can own your stake or delegation on L2. Otherwise, there'd be no way for you to transfer the stake/delegation to L2 without "escaping" the vesting contract. +O nilo lati ṣeto iwe adehun gbigbe L2 kan ki akọọlẹ yii le ni igi tabi aṣoju rẹ lori L2. Bibẹẹkọ, ko si ọna fun ọ lati gbe igi/aṣoju si L2 laisi “sapade” iwe adehun gbigbe. -## What happens if I try to cash out my contract when it is only partially vested? Is this possible? +### Kini yoo ṣẹlẹ ti Mo ba gbiyanju lati san owo adehun mi nigba ti o jẹ ẹtọ ni apakan nikan? Ṣe eyi ṣee ṣe? -This is not a possibility. You can move funds back to L1 and withdraw them there. +Eyi kii ṣe ṣeeṣe. O le gbe awọn owo pada si L1 ki o yọ wọn kuro nibẹ. -## What if I don't want to move my vesting contract to L2? +### Ohun ti o ba ti Emi ko fẹ lati gbe mi vesting guide to L2? -You can keep staking/delegating on L1. Over time, you may want to consider moving to L2 to enable rewards there as the protocol scales on Arbitrum. Note that these transfer tools are for vesting contracts that are allowed to stake and delegate in the protocol. If your contract does not allow staking or delegating, or is revocable, then there is no transfer tool available. You will still be able to withdraw your GRT from L1 when available. +O le pa staking / asoju on L1. Ni akoko pupọ, o le fẹ lati ronu gbigbe si L2 lati mu awọn ere ṣiṣẹ nibẹ bi awọn iwọn ilana lori Arbitrum. Ṣe akiyesi pe awọn irinṣẹ gbigbe wọnyi wa fun awọn iwe adehun gbigbe ti o gba laaye lati gbe ati ṣe aṣoju ninu ilana naa. Ti iwe adehun rẹ ko ba gba laaye staking tabi yiyan, tabi jẹ yiyọ kuro, lẹhinna ko si ohun elo gbigbe ti o wa. Iwọ yoo tun ni anfani lati yọ GRT rẹ kuro ni L1 nigbati o wa. diff --git a/website/pages/yo/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/yo/arbitrum/l2-transfer-tools-guide.mdx index 28c6b7fc277e..be08610a2bc0 100644 --- a/website/pages/yo/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/yo/arbitrum/l2-transfer-tools-guide.mdx @@ -2,47 +2,47 @@ title: L2 Transfer Tools Guide --- -> L2 Transfer Tools have not been released yet. They are expected to be available in the summer of 2023. +Aworan naa ti jẹ ki o rọrun lati gbe lọ si L2 lori Arbitrum Ọkan. Fun alabaṣe ilana kọọkan, ṣeto ti Awọn irinṣẹ Gbigbe L2 wa lati ṣe gbigbe si L2 lainidi fun gbogbo awọn olukopa nẹtiwọọki. Awọn irinṣẹ wọnyi yoo nilo ki o tẹle awọn igbesẹ kan pato ti o da lori ohun ti o n gbe. -The Graph has made it easy to move to L2 on Arbitrum One. For each protocol participant, there are a set of L2 Transfer Tools to make transferring to L2 seamless for all network participants. These tools will require you to follow a specific set of steps depending on what you are transferring. +Diẹ ninu awọn ibeere loorekoore nipa awọn irinṣẹ wọnyi ni a dahun ni [Awọn irinṣẹ Gbigbe L2 FAQ] \(/arbitrum/l2-transfer-tools-faq). Awọn FAQ ni awọn alaye ti o jinlẹ ti bi o ṣe le lo awọn irinṣẹ, bawo ni wọn ṣe n ṣiṣẹ, ati awọn nkan lati tọju ni lokan nigba lilo wọn. -Some frequent questions about these tools are answered in the [L2 Transfer Tools FAQ](/arbitrum/l2-transfer-tools-faq). The FAQs contain in-depth explanations of how to use the tools, how they work, and things to keep in mind when using them. +## Bii o ṣe le gbe ipin rẹ lọ si Arbitrum (L2) -## How to transfer your subgraph to Arbitrum (L2) + -## Benefits of transferring your subgraphs +## Awọn anfani ti gbigbe awọn ipin rẹ -The Graph's community and core devs have [been preparing](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) to move to Arbitrum over the past year. Arbitrum, a layer 2 or "L2" blockchain, inherits the security from Ethereum but provides drastically lower gas fees. +Agbegbe Graph ati awọn devs mojuto ti [ti n murasilẹ](https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) lati lọ si Arbitrum ni ọdun to kọja. Arbitrum, Layer 2 tabi “L2” blockchain, jogun aabo lati ọdọ Ethereum ṣugbọn pese awọn idiyele gaasi ti o dinku pupọ. When you publish or upgrade your subgraph to The Graph Network, you're interacting with smart contracts on the protocol and this requires paying for gas using ETH. By moving your subgraphs to Arbitrum, any future updates to your subgraph will require much lower gas fees. The lower fees, and the fact that curation bonding curves on L2 are flat, also make it easier for other Curators to curate on your subgraph, increasing the rewards for Indexers on your subgraph. This lower-cost environment also makes it cheaper for Indexers to index and serve your subgraph. Indexing rewards will be increasing on Arbitrum and decreasing on Ethereum mainnet over the coming months, so more and more Indexers will be transferring their stake and setting up their operations on L2. -## Understanding what happens with signal, your L1 subgraph and query URLs +## Loye ohun ti o ṣẹlẹ pẹlu ifihan agbara, ipin L1 rẹ ati awọn URL ibeere -Transferring a subgraph to Arbitrum uses the Arbitrum GRT bridge, which in turn uses the native Arbitrum bridge, to send the subgraph to L2. The "transfer" will deprecate the subgraph on mainnet and send the information to re-create the subgraph on L2 using the bridge. It will also include the subgraph owner's signaled GRT, which must be more than zero for the bridge to accept the transfer. +Gbigbe ipin kan lọ si Arbitrum nlo Afara Arbitrum GRT, eyiti o nlo afara Arbitrum abinibi, lati firanṣẹ ipin si L2. “Gbigbe lọ” yoo sọ ipin-ipin naa kuro lori mainnet ki o fi alaye ranṣẹ lati tun-ṣeda ipin lori L2 ni lilo afara. Yoo tun pẹlu aami GRT oniwun ipin, eyiti o gbọdọ jẹ diẹ sii ju odo fun afara lati gba gbigbe naa. -When you choose to transfer the subgraph, this will convert all of the subgraph's curation signal to GRT. This is equivalent to "deprecating" the subgraph on mainnet. The GRT corresponding to your curation will be sent to L2 together with the subgraph, where they will be used to mint signal on your behalf. +Nigbati o ba yan lati gbe ipin-ipin naa lọ, eyi yoo yi gbogbo ami ijẹẹmu ipin-ipin naa pada si GRT. Eyi jẹ deede si “fifipalẹ” ipin-ipin lori mainnet. GRT ti o baamu si itọju rẹ ni yoo firanṣẹ si L2 papọ pẹlu ipin-ipin naa, nibiti wọn yoo ti lo lati ṣe ifihan agbara mint fun ọ. -Other Curators can choose whether to withdraw their fraction of GRT, or also transfer it to L2 to mint signal on the same subgraph. If a subgraph owner does not transfer their subgraph to L2 and manually deprecates it via a contract call, then Curators will be notified and will be able to withdraw their curation. +Awọn olutọju miiran le yan boya lati yọkuro ida wọn ti GRT, tabi tun gbe lọ si L2 si ifihan agbara mint lori ipin kanna. Ti oniwun ipin kan ko ba gbe ipin-ipin wọn si L2 ati pe o fi ọwọ silẹ nipasẹ ipe adehun, lẹhinna Awọn olutọpa yoo jẹ iwifunni ati pe yoo ni anfani lati yọkuro itọju wọn. -As soon as the subgraph is transferred, since all curation is converted to GRT, Indexers will no longer receive rewards for indexing the subgraph. However, there will be Indexers that will 1) keep serving transferred subgraphs for 24 hours, and 2) immediately start indexing the subgraph on L2. Since these Indexers already have the subgraph indexed, there should be no need to wait for the subgraph to sync, and it will be possible to query the L2 subgraph almost immediately. +Ni kete ti a ti gbe ipin-ipin naa, niwọn igba ti gbogbo arosọ ti yipada si GRT, Awọn atọka kii yoo gba awọn ere mọ fun titọka ipin-ipin naa. Bí ó ti wù kí ó rí, àwọn Atọ́ka yóò wà tí yóò 1) máa ń sìn àwọn ìpínrọ̀ tí a gbé lọ́wọ́ fún wákàtí 24, àti 2) kíákíá ni yóò bẹ̀rẹ̀ sítọ́ka ìpínrọ̀ náà lórí L2. Níwọ̀n bí àwọn Atọ́ka wọ̀nyí ti ti ní atọ́ka abẹ́ka náà, kò yẹ kí a dúró de ìpínrọ̀ náà láti múṣiṣẹ́pọ̀, yóò sì ṣeé ṣe láti béèrè lọ́wọ́ L2 ní kíákíá. -Queries to the L2 subgraph will need to be done to a different URL (on `arbitrum-gateway.thegraph.com`), but the L1 URL will continue working for at least 48 hours. After that, the L1 gateway will forward queries to the L2 gateway (for some time), but this will add latency so it is recommended to switch all your queries to the new URL as soon as possible. +Awọn ibeere si ipin L2 yoo nilo lati ṣe si URL ti o yatọ (lori `arbitrum-gateway.thegraph.com`), ṣugbọn URL L1 yoo tẹsiwaju ṣiṣẹ fun o kere ju wakati 48. Lẹhin iyẹn, ẹnu-ọna L1 yoo firanṣẹ awọn ibeere si ẹnu-ọna L2 (fun igba diẹ), ṣugbọn eyi yoo ṣafikun lairi nitori naa o gba ọ niyanju lati yi gbogbo awọn ibeere rẹ pada si URL tuntun ni kete bi o ti ṣee. -## Choosing your L2 wallet +## Yiyan apamọwọ L2 rẹ When you published your subgraph on mainnet, you used a connected wallet to create the subgraph, and this wallet owns the NFT that represents this subgraph and allows you to publish updates. -When transferring the subgraph to Arbitrum, you can choose a different wallet that will own this subgraph NFT on L2. +Nigbati o ba n gbe ipin si Arbitrum, o le yan apamọwọ ti o yatọ ti yoo ni ipin NFT yii lori L2. -If you're using a "regular" wallet like MetaMask (an Externally Owned Account or EOA, i.e. a wallet that is not a smart contract), then this is optional and it is recommended to keep the same owner address as in L1. +Ti o ba nlo apamọwọ “deede” bii MetaMask (Akọọlẹ Ohun-ini Ti ita tabi EOA, ie apamọwọ ti kii ṣe adehun ọlọgbọn), lẹhinna eyi jẹ aṣayan ati pe o gba ọ niyanju lati tọju adirẹsi oniwun kanna bi ni L1. -If you're using a smart contract wallet, like a multisig (e.g. a Safe), then choosing a different L2 wallet address is mandatory, as it is most likely that this account only exists on mainnet and you will not be able to make transactions on Arbitrum using this wallet. If you want to keep using a smart contract wallet or multisig, create a new wallet on Arbitrum and use its address as the L2 owner of your subgraph. +Ti o ba nlo apamọwọ adehun ọlọgbọn kan, bii multisig (fun apẹẹrẹ Aabo), lẹhinna yiyan adirẹsi apamọwọ L2 ti o yatọ jẹ dandan, nitori o ṣee ṣe pe akọọlẹ yii wa lori mainnet nikan ati pe iwọ kii yoo ni anfani lati ṣe awọn iṣowo lori Arbitrum lilo apamọwọ yii. Ti o ba fẹ tọju lilo apamọwọ adehun ti o gbọn tabi multisig, ṣẹda apamọwọ tuntun lori Arbitrum ki o lo adirẹsi rẹ bi oniwun L2 ti ipin rẹ. -**It is very important to use a wallet address that you control, and that can make transactions on Arbitrum. Otherwise, the subgraph will be lost and cannot be recovered.** +** O ṣe pataki pupọ lati lo adirẹsi apamọwọ ti o ṣakoso, ati pe o le ṣe awọn iṣowo lori Arbitrum. Bibẹẹkọ, ipin-ipin naa yoo sọnu ati pe ko le gba pada.** -## Preparing for the transfer: bridging some ETH +## Ngbaradi fun gbigbe: Nsopọ diẹ ninu ETH -Transferring the subgraph involves sending a transaction through the bridge, and then executing another transaction on Arbitrum. The first transaction uses ETH on mainnet, and includes some ETH to pay for gas when the message is received on L2. However, if this gas is insufficient, you will have to retry the transaction and pay for the gas directly on L2 (this is "Step 3: Confirming the transfer" below). This step **must be executed within 7 days of starting the transfer**. Moreover, the second transaction ("Step 4: Finishing the transfer on L2") will be done directly on Arbitrum. For these reasons, you will need some ETH on an Arbitrum wallet. If you're using a multisig or smart contract account, the ETH will need to be in the regular (EOA) wallet that you are using to execute the transactions, not on the multisig wallet itself. +Gbigbe ipin-ipin naa jẹ fifiranṣẹ idunadura kan nipasẹ afara, ati lẹhinna ṣiṣe iṣowo miiran lori Arbitrum. Idunadura akọkọ nlo ETH lori mainnet, ati pẹlu diẹ ninu ETH lati sanwo fun gaasi nigbati ifiranṣẹ ba gba lori L2. Sibẹsibẹ, ti gaasi yii ko ba to, iwọ yoo ni lati tun idunadura naa gbiyanju ati sanwo fun gaasi taara lori L2 (eyi ni “Igbese 3: Imudaduro gbigbe” ni isalẹ). Igbesẹ yii ** gbọdọ ṣiṣẹ laarin awọn ọjọ 7 ti o bẹrẹ gbigbe \***. Pẹlupẹlu, idunadura keji ("Igbese 4: Ipari gbigbe lori L2") yoo ṣee ṣe taara lori Arbitrum. Fun awọn idi wọnyi, iwọ yoo nilo diẹ ninu ETH lori apamọwọ Arbitrum kan. Ti o ba nlo multisig tabi iwe adehun adehun ọlọgbọn, ETH yoo nilo lati wa ninu apamọwọ deede (EOA) ti o nlo lati ṣe awọn iṣowo, kii ṣe lori apamọwọ multisig funrararẹ. You can buy ETH on some exchanges and withdraw it directly to Arbitrum, or you can use the Arbitrum bridge to send ETH from a mainnet wallet to L2: [bridge.arbitrum.io](http://bridge.arbitrum.io). Since gas fees on Arbitrum are lower, you should only need a small amount. It is recommended that you start at a low threshold (0.e.g. 01 ETH) for your transaction to be approved. @@ -116,7 +116,7 @@ A fraction of these GRT corresponding to the subgraph owner is sent to L2 togeth At this point, the curated GRT will not accrue any more query fees, so Curators can choose to withdraw their GRT or transfer it to the same subgraph on L2, where it can be used to mint new curation signal. There is no rush to do this as the GRT can be help indefinitely and everybody gets an amount proportional to their shares, irrespective of when they do it. -## Choosing your L2 wallet +## Yiyan apamọwọ L2 rẹ If you decide to transfer your curated GRT to L2, you can choose a different wallet that will own the curation signal on L2. diff --git a/website/pages/yo/billing.mdx b/website/pages/yo/billing.mdx index 3c21e5de1cdc..34a1ed7a8ce0 100644 --- a/website/pages/yo/billing.mdx +++ b/website/pages/yo/billing.mdx @@ -37,8 +37,12 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a crypto wallet + + > This section is written assuming you already have GRT in your crypto wallet, and you're on Ethereum mainnet. If you don't have GRT, you can learn how to get GRT [here](#getting-grt). +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. You'll be redirected to the wallet selection page. Select your wallet and click on "Connect". @@ -71,6 +75,8 @@ While The Graph protocol operates on Ethereum Mainnet, [the billing contract](ht ### Adding GRT using a multisig wallet + + 1. Go to the [Subgraph Studio Billing page](https://thegraph.com/studio/billing/). 2. Click on the "Connect Wallet" button on the top right corner of the page. Select your wallet and click on "Connect". If you're using [Gnosis-Safe](https://gnosis-safe.io/), you'll be able to connect your multisig as well as your signing wallet. Then, sign the associated message. This will not cost any gas. @@ -153,6 +159,50 @@ This is how you can purchase GRT on Uniswap. You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. Go to [Coinbase](https://www.coinbase.com/) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. Go to [Binance](https://www.binance.com/en) and create an account. +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - Click "Continue" and confirm your transaction. + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). + ## Arbitrum Bridge The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/yo/chain-integration-overview.mdx b/website/pages/yo/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/yo/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/yo/cookbook/arweave.mdx b/website/pages/yo/cookbook/arweave.mdx index 15aaf1a38831..f6fb3a8b2ce3 100644 --- a/website/pages/yo/cookbook/arweave.mdx +++ b/website/pages/yo/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: Building Subgraphs on Arweave --- -> Arweave support in Graph Node and on the Hosted Service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! In this guide, you will learn how to build and deploy Subgraphs to index the Arweave blockchain. @@ -83,7 +83,7 @@ dataSources: ``` - Arweave subgraphs introduce a new kind of data source (`arweave`) -- The network should correspond to a network on the hosting Graph Node. On the Hosted Service, Arweave's mainnet is `arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave data sources introduce an optional source.owner field, which is the public key of an Arweave wallet Arweave data sources support two types of handlers: @@ -150,9 +150,9 @@ Block handlers receive a `Block`, while transactions receive a `Transaction`. Writing the mappings of an Arweave Subgraph is very similar to writing the mappings of an Ethereum Subgraph. For more information, click [here](/developing/creating-a-subgraph/#writing-mappings). -## Deploying an Arweave Subgraph on the Hosted Service +## Deploying an Arweave Subgraph on the hosted service -Once your subgraph has been created on the Hosed Service dashboard, you can deploy by using the `graph deploy` CLI command. +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/yo/cookbook/grafting.mdx b/website/pages/yo/cookbook/grafting.mdx index 54ad7a0eaff8..6d781a5f7e06 100644 --- a/website/pages/yo/cookbook/grafting.mdx +++ b/website/pages/yo/cookbook/grafting.mdx @@ -24,6 +24,22 @@ For more information, you can check: In this tutorial, we will be covering a basic usecase. We will replace an existing contract with an identical contract (with a new address, but the same code). Then, graft the existing subgraph onto the "base" subgraph that tracks the new contract. +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## Building an Existing Subgraph Building subgraphs is an essential part of The Graph, described more in depth [here](http://localhost:3000/en/cookbook/quick-start/). To be able to build and deploy the existing subgraph used in this tutorial, the following repo is provided: diff --git a/website/pages/yo/cookbook/near.mdx b/website/pages/yo/cookbook/near.mdx index 879e8e5c15aa..304e1202e278 100644 --- a/website/pages/yo/cookbook/near.mdx +++ b/website/pages/yo/cookbook/near.mdx @@ -277,7 +277,7 @@ Pending functionality is not yet supported for NEAR subgraphs. In the interim, y ### My question hasn't been answered, where can I get more help building NEAR subgraphs? -If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/cookbook/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## References diff --git a/website/pages/yo/cookbook/upgrading-a-subgraph.mdx b/website/pages/yo/cookbook/upgrading-a-subgraph.mdx index 247a275db08f..bd3b739199d6 100644 --- a/website/pages/yo/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/yo/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ The process of upgrading is quick and your subgraphs will forever benefit from t ### Prerequisites - You have already deployed a subgraph on the hosted service. -- The subgraph is indexing a chain available (or available in beta) on The Graph Network. +- The subgraph is indexing a chain available on The Graph Network. - You have a wallet with ETH to publish your subgraph on-chain. - You have ~10,000 GRT to curate your subgraph so Indexers can begin indexing it. diff --git a/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx index 8cfa32b036f0..d6f0f891c6cc 100644 --- a/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/yo/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: Deploying a Subgraph to the Subgraph Studio --- -> Ensure the network your subgraph is indexing data from is [supported](/developing/supported-chains) on the decentralized network. +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). These are the steps to deploy your subgraph to the Subgraph Studio: diff --git a/website/pages/yo/deploying/hosted-service.mdx b/website/pages/yo/deploying/hosted-service.mdx index 2e6093531110..71a7fb23bdf5 100644 --- a/website/pages/yo/deploying/hosted-service.mdx +++ b/website/pages/yo/deploying/hosted-service.mdx @@ -10,7 +10,7 @@ If you don't have an account on the hosted service, you can sign up with your Gi For a comprehensive list, see [Supported Networks](/developing/supported-networks/#hosted-service). -## Create a Subgraph +## Ṣẹda Subgraph kan First follow the instructions [here](/developing/defining-a-subgraph) to install the Graph CLI. Create a subgraph by passing in `graph init --product hosted-service` @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / The example subgraph is based on the Gravity contract by Dani Grant that manages user avatars and emits `NewGravatar` or `UpdateGravatar` events whenever avatars are created or updated. The subgraph handles these events by writing `Gravatar` entities to the Graph Node store and ensuring these are updated according to the events. Continue on to the [subgraph manifest](/developing/creating-a-subgraph#the-subgraph-manifest) to better understand which events from your smart contracts to pay attention to, mappings, and more. +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## Supported Networks on the hosted service You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/yo/deploying/subgraph-studio.mdx b/website/pages/yo/deploying/subgraph-studio.mdx index 1406065463d4..a6ff02e41188 100644 --- a/website/pages/yo/deploying/subgraph-studio.mdx +++ b/website/pages/yo/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ Querying subgraphs generates query fees, used to reward [Indexers](/network/inde 1. Sign in with your wallet - you can do this via MetaMask or WalletConnect 1. Once you sign in, you will see your unique deploy key on your account home page. This will allow you to either publish your subgraphs or manage your API keys + billing. You will have a unique deploy key that can be re-generated if you think it has been compromised. -## How to Create your Subgraph in Subgraph Studio +## How to Create a Subgraph in Subgraph Studio -The best part! When you first create a subgraph, you’ll be directed to fill out: - -- Your Subgraph Name -- Image -- Description -- Categories (e.g. `DeFi`, `NFTs`, `Governance`) -- Website + ## Subgraph Compatibility with The Graph Network diff --git a/website/pages/yo/developing/creating-a-subgraph.mdx b/website/pages/yo/developing/creating-a-subgraph.mdx index 1fc288833c35..ace69dd1ac7d 100644 --- a/website/pages/yo/developing/creating-a-subgraph.mdx +++ b/website/pages/yo/developing/creating-a-subgraph.mdx @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: The important entries to update for the manifest are: -- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the Hosted Service. +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`: the URL of the repository where the subgraph manifest can be found. This is also displayed by The Graph Explorer. @@ -146,6 +154,10 @@ The important entries to update for the manifest are: - `dataSources.source.startBlock`: the optional number of the block that the data source starts indexing from. In most cases, we suggest using the block in which the contract was created. +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`: the entities that the data source writes to the store. The schema for each entity is defined in the schema.graphql file. - `dataSources.mapping.abis`: one or more named ABI files for the source contract as well as any other smart contracts that you interact with from within the mappings. @@ -242,6 +254,7 @@ We support the following scalars in our GraphQL API: | `String` | Scalar for `string` values. Null characters are not supported and are automatically removed. | | `Boolean` | Scalar for `boolean` values. | | `Int` | The GraphQL spec defines `Int` to have a size of 32 bytes. | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | Large integers. Used for Ethereum's `uint32`, `int64`, `uint64`, ..., `uint256` types. Note: Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. | | `BigDecimal` | `BigDecimal` High precision decimals represented as a significand and an exponent. The exponent range is from −6143 to +6144. Rounded to 34 significant digits. | @@ -770,6 +783,8 @@ In addition to subscribing to contract events or function calls, a subgraph may ### Supported Filters +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### Mapping Function The mapping function will receive an `ethereum.Block` as its only argument. Like mapping functions for events, this function can access existing subgraph entities in the store, call smart contracts and create or update entities. @@ -934,6 +988,8 @@ If the subgraph encounters an error, that query will return both the data and a ### Grafting onto Existing Subgraphs +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + When a subgraph is first deployed, it starts indexing events at the genesis block of the corresponding chain (or at the `startBlock` defined with each data source) In some circumstances; it is beneficial to reuse the data from an existing subgraph and start indexing at a much later block. This mode of indexing is called _Grafting_. Grafting is, for example, useful during development to get past simple errors in the mappings quickly or to temporarily get an existing subgraph working again after it has failed. A subgraph is grafted onto a base subgraph when the subgraph manifest in `subgraph.yaml` contains a `graft` block at the top-level: @@ -963,7 +1019,7 @@ The grafted subgraph can use a GraphQL schema that is not identical to the one o ## File Data Sources -File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way, starting with IPFS. +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > This also lays the groundwork for deterministic indexing of off-chain data, as well as the potential introduction of arbitrary HTTP-sourced data. @@ -1030,7 +1086,7 @@ If the relationship is 1:1 between the parent entity and the resulting file data > You can use [nested filters](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering) to filter parent entities on the basis of these nested entities. -#### Add a new templated data source with `kind: file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` This is the data source which will be spawned when a file of interest is identified. @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { You can now create file data sources during execution of chain-based handlers: - Import the template from the auto-generated `templates` -- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid IPFS content identifier +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> Currently Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). Example: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -This will create a new file data source, which will poll Graph Node's configured IPFS endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. This example is using the CID as the lookup between the parent `Token` entity and the resulting `TokenMetadata` entity. diff --git a/website/pages/yo/developing/developer-faqs.mdx b/website/pages/yo/developing/developer-faqs.mdx index 0b925a79dce2..fe1317fe2548 100644 --- a/website/pages/yo/developing/developer-faqs.mdx +++ b/website/pages/yo/developing/developer-faqs.mdx @@ -1,5 +1,5 @@ --- -title: Developer FAQs +title: Olùgbéejáde FAQs --- ## 1. What is a subgraph? @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } Currently, the recommended approach for a dapp is to add the key to the frontend and expose it to end users. That said, you can limit that key to a hostname, like _yourdapp.io_ and subgraph. The gateway is currently being run by Edge & Node. Part of the responsibility of a gateway is to monitor for abusive behavior and block traffic from malicious clients. -## 25. Where do I go to find my current subgraph on the Hosted Service? +## 25. Where do I go to find my current subgraph on the hosted service? Head over to the hosted service in order to find subgraphs that you or others deployed to the hosted service. You can find it [here](https://thegraph.com/hosted-service). -## 26. Will the Hosted Service start charging query fees? +## 26. Will the hosted service start charging query fees? The Graph will never charge for the hosted service. The Graph is a decentralized protocol, and charging for a centralized service is not aligned with The Graph’s values. The hosted service was always a temporary step to help get to the decentralized network. Developers will have a sufficient amount of time to upgrade to the decentralized network as they are comfortable. -## 27. When will the Hosted Service be shut down? - -The hosted service will shut down in 2023. Read the announcement blog post [here](https://thegraph.com/blog/sunsetting-hosted-service). All dapps using the hosted service are encouraged to upgrade to the decentralized network. Network Grants are available for developers to help upgrade their subgraph to The Graph Network. If your dapp is upgrading a subgraph you can apply [here](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com). - -## 28. How do I update a subgraph on mainnet? +## 27. How do I update a subgraph on mainnet? If you’re a subgraph developer, you can deploy a new version of your subgraph to the Subgraph Studio using the CLI. It’ll be private at that point, but if you’re happy with it, you can publish to the decentralized Graph Explorer. This will create a new version of your subgraph that Curators can start signaling on. diff --git a/website/pages/yo/developing/graph-ts/api.mdx b/website/pages/yo/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..45bfad8f7bfb --- /dev/null +++ b/website/pages/yo/developing/graph-ts/api.mdx @@ -0,0 +1,854 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +This page documents what built-in APIs can be used when writing subgraph mappings. Two kinds of APIs are available out of the box: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API Reference + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- Low-level primitives to translate between different type systems such as Ethereum, JSON, GraphQL and AssemblyScript. + +### Versions + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| Version | Release notes | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### Built-in Types + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### ByteArray + +```typescript +import { ByteArray } from '@graphprotocol/graph-ts' +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +import { BigDecimal } from '@graphprotocol/graph-ts' +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +import { BigInt } from '@graphprotocol/graph-ts' +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### TypedMap + +```typescript +import { TypedMap } from '@graphprotocol/graph-ts' +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### Bytes + +```typescript +import { Bytes } from '@graphprotocol/graph-ts' +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### Address + +```typescript +import { Address } from '@graphprotocol/graph-ts' +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### Store API + +```typescript +import { store } from '@graphprotocol/graph-ts' +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### Creating entities + +The following is a common pattern for creating entities from Ethereum events. + +```typescript +// Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +Each entity must have a unique ID to avoid collisions with other entities. It is fairly common for event parameters to include a unique identifier that can be used. Note: Using the transaction hash as the ID assumes that no other events in the same transaction create entities with this hash as the ID. + +#### Loading entities from the store + +If an entity already exists, it can be loaded from the store with the following: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### Looking up entities created withing a block + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +The store API facilitates the retrieval of entities that were created or updated in the current block. A typical situation for this is that one handler creates a Transaction from some on-chain event, and a later handler wants to access this transaction if it exists. In the case where the transaction does not exist, the subgraph will have to go to the database just to find out that the entity does not exist; if the subgraph author already knows that the entity must have been created in the same block, using loadInBlock avoids this database roundtrip. For some subgraphs, these missed lookups can contribute significantly to the indexing time. + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### Looking up derived entities + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +This enables loading derived entity fields from within an event handler. For example, given the following schema: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### Updating existing entities + +There are two ways to update an existing entity: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +Changing properties is straight forward in most cases, thanks to the generated property setters: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +It is also possible to unset properties with one of the following two instructions: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### Removing entities from the store + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### Ethereum API + +The Ethereum API provides access to smart contracts, public state variables, contract functions, events, transactions, blocks and the encoding/decoding Ethereum data. + +#### Support for Ethereum Types + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +The following example illustrates this. Given a subgraph schema like + +```graphql +type Transfer @entity { + id: Bytes! + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### Events and Block/Transaction Data + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### Access to Smart Contract State + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +A common pattern is to access the contract from which an event originates. This is achieved with the following code: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +Any other contract that is part of the subgraph can be imported from the generated code and can be bound to a valid address. + +#### Handling Reverted Calls + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +Note that a Graph node connected to a Geth or Infura client may not detect all reverts, if you rely on this we recommend using a Graph node connected to a Parity client. + +#### Encoding/Decoding ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +For more information: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### Logging API + +```typescript +import { log } from '@graphprotocol/graph-ts' +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### Logging one or more values + +##### Logging a single value + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### Logging a single entry from an existing array + +In the example below, only the first value of the argument array is logged, despite the array containing three values. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### Logging multiple entries from an existing array + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### Logging a specific entry from an existing array + +To display a specific value in the array, the indexed value must be provided. + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### Logging event information + +The example below logs the block number, block hash and transaction hash from an event: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +import { ipfs } from '@graphprotocol/graph-ts' +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +Given an IPFS hash or path, reading a file from IPFS is done as follows: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +import { crypto } from '@graphprotocol/graph-ts' +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +import { json, JSONValueKind } from '@graphprotocol/graph-ts' +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### Type Conversions Reference + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() or s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() or s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### Data Source Metadata + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity and DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/yo/developing/graph-ts/common-issues.mdx b/website/pages/yo/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..5b99efa8f493 --- /dev/null +++ b/website/pages/yo/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: Common AssemblyScript Issues +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/yo/developing/supported-networks.mdx b/website/pages/yo/developing/supported-networks.mdx index 58ce56345f7c..cd82305bfce2 100644 --- a/website/pages/yo/developing/supported-networks.mdx +++ b/website/pages/yo/developing/supported-networks.mdx @@ -9,7 +9,7 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the Hosted Service. Goerli will be maintained by client developers post-merge, and is also supported by the Hosted Service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. @@ -19,6 +19,6 @@ Substreams-powered subgraphs indexing `mainnet` Ethereum are supported on the Su ## Graph Node -If your preferred network isn't supported on The Graph's decentralized network, you can run your own Graph Node to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph Node can also index other protocols, via a Firehose integration. Firehose integrations have been created for NEAR, Arweave and Cosmos-based networks. diff --git a/website/pages/yo/firehose.mdx b/website/pages/yo/firehose.mdx new file mode 100644 index 000000000000..02f0d63c72db --- /dev/null +++ b/website/pages/yo/firehose.mdx @@ -0,0 +1,22 @@ +--- +title: Firehose +--- + +![Firehose Logo](/img/firehose-logo.png) + +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. + +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. + +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### Getting Started + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/yo/global.json b/website/pages/yo/global.json index 6a3eb234bfce..32766db6c057 100644 --- a/website/pages/yo/global.json +++ b/website/pages/yo/global.json @@ -1,14 +1,14 @@ { - "collapse": "Collapse", - "expand": "Expand", - "previous": "Previous", - "next": "Next", + "collapse": "Subu", + "expand": "Subu", + "previous": "Ti tele", + "next": "Itele", "editPage": "Edit page", - "pageSections": "Page Sections", - "linkToThisSection": "Link to this section", - "technicalLevelRequired": "Technical Level Required", - "notFoundTitle": "Oops! This page was lost in space...", - "notFoundSubtitle": "Check if you’re using the right address or explore our website by clicking on the link below.", - "goHome": "Go Home", - "video": "Video" + "pageSections": "Awọn apakan Oju-iwe", + "linkToThisSection": "Ọna asopọ si apakan yii", + "technicalLevelRequired": "Ipele Imọ-ẹrọ Ti a beere", + "notFoundTitle": "Ops! Oju-iwe yii ti sọnu ni aaye...", + "notFoundSubtitle": "Ṣayẹwo boya o nlo adirẹsi ti o tọ tabi ṣawari oju opo wẹẹbu wa nipa titẹ si ọna asopọ ni isalẹ.", + "goHome": "Lọ si ile", + "video": "Fidio" } diff --git a/website/pages/yo/glossary.mdx b/website/pages/yo/glossary.mdx index 2e840513f1ea..ef24dc0178e0 100644 --- a/website/pages/yo/glossary.mdx +++ b/website/pages/yo/glossary.mdx @@ -12,7 +12,7 @@ title: Glossary - **Subgraph**: A custom API built on blockchain data that can be queried using [GraphQL](https://graphql.org/). Developers can build, deploy and publish subgraphs to The Graph's decentralized network. Then, Indexers can begin indexing subgraphs to make them available to be queried by subgraph consumers. -- **Hosted Service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**: Network participants that run indexing nodes to index data from blockchains and serve GraphQL queries. @@ -24,6 +24,8 @@ title: Glossary - **Indexer's Self Stake**: The amount of GRT that Indexers stake to participate in the decentralized network. The minimum is 100,000 GRT, and there is no upper limit. +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: Network participants who own GRT and delegate their GRT to Indexers. This allows Indexers to increase their stake in subgraphs on the network. In return, Delegators receive a portion of the Indexing Rewards that Indexers receive for processing subgraphs. - **Delegation Tax**: A 0.5% fee paid by Delegators when they delegate GRT to Indexers. The GRT used to pay the fee is burned. @@ -38,27 +40,21 @@ title: Glossary - **Subgraph Manifest**: A JSON file that describes the subgraph's GraphQL schema, data sources, and other metadata. [Here](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf) is an example. -- **Rebate Pool**: An economic security measure that holds query fees paid by subgraph consumers until they may be claimed by Indexers as query fee rebates. Residual GRT is burned. - -- **Epoch**: A unit of time that in network. One epoch is currently 6,646 blocks or approximately 1 day. +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: An Indexer can allocate their total GRT stake (including Delegators' stake) towards subgraphs that have been published on The Graph's decentralized network. Allocations exist in one of four phases. 1. **Active**: An allocation is considered active when it is created on-chain. This is called opening an allocation, and indicates to the network that the Indexer is actively indexing and serving queries for a particular subgraph. Active allocations accrue indexing rewards proportional to the signal on the subgraph, and the amount of GRT allocated. - 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a fisherman can still open a dispute to challenge an Indexer for serving false data. - - 3. **Finalized**: The dispute period has ended, and query fee rebates are available to be claimed by Indexers. - - 4. **Claimed**: The final phase of an allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: A powerful dapp for building, deploying, and publishing subgraphs. -- **Fishermen**: Network participants may dispute Indexers' query responses and POIs. This is called being a Fisherman. A dispute resolved in the Fisherman’s favor results in a financial penalty for the Indexer, along with an award to the Fisherman, thus incentivizing the integrity of the indexing and query work performed by Indexers in the network. The penalty (slashing) is currently set at 2.5% of an Indexer's self stake, with 50% of the slashed GRT going to the Fisherman, and the other 50% being burned. +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: Arbitrators are network participants set via govarnence. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: Indexers can have their staked GRT slashed for providing an incorrect proof of indexing (POI) or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: The rewards that Indexers receive for indexing subgraphs. Indexing rewards are distributed in GRT. @@ -66,7 +62,7 @@ title: Glossary - **GRT**: The Graph's work utility token. GRT provides economic incentives to network participants for contributing to the network. -- **POI or Proof of Indexing**: When an Indexer close their allocation and wants to claim their accrued indexer rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph Node is the component which indexes subgraphs, and makes the resulting data available to query via a GraphQL API. As such it is central to the indexer stack, and correct operation of Graph Node is crucial to running a successful indexer. @@ -80,10 +76,10 @@ title: Glossary - **Cooldown Period**: The time remaining until an Indexer who changed their delegation parameters can do so again. -- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_Upgrading_ a subgraph to The Graph Network**: The process of moving a subgraph from the hosted service to The Graph Network. - **_Updating_ a subgraph**: The process of releasing a new subgraph version with updates to the subgraph's manifest, schema, or mappings. -- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (i.e., curation shares move to the latest version when v0.0.1 is updated to v0.0.2). +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/yo/graphcast.mdx b/website/pages/yo/graphcast.mdx index e397aad36e43..28a374637e81 100644 --- a/website/pages/yo/graphcast.mdx +++ b/website/pages/yo/graphcast.mdx @@ -10,7 +10,7 @@ Currently, the cost to broadcast information to other network participants is de The Graphcast SDK (Software Development Kit) allows developers to build Radios, which are gossip-powered applications that Indexers can run to serve a given purpose. We also intend to create a few Radios (or provide support to other developers/teams that wish to build Radios) for the following use cases: -- Real-time cross-checking of subgraph data integrity ([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - Conducting auctions and coordination for warp syncing subgraphs, substreams, and Firehose data from other Indexers. - Self-reporting on active query analytics, including subgraph request volumes, fee volumes, etc. - Self-reporting on indexing analytics, including subgraph indexing time, handler gas costs, indexing errors encountered, etc. diff --git a/website/pages/yo/index.json b/website/pages/yo/index.json index 9e28e13d5001..7ca98feb4487 100644 --- a/website/pages/yo/index.json +++ b/website/pages/yo/index.json @@ -1,30 +1,30 @@ { "title": "Get Started", - "intro": "Learn about The Graph, a decentralized protocol for indexing and querying data from blockchains.", + "intro": "Kọ ẹkọ nipa Aworan naa, ilana isọdasilẹ fun titọka ati ibeere data lati awọn blockchains.", "shortcuts": { "aboutTheGraph": { "title": "About The Graph", - "description": "Learn more about The Graph" + "description": "Kọ ẹkọ diẹ sii nipa The Graph" }, "quickStart": { - "title": "Quick Start", - "description": "Jump in and start with The Graph" + "title": "Ibẹrẹ kiakia", + "description": "Lọ sinu ki o bẹrẹ pẹlu The Graph" }, "developerFaqs": { - "title": "Developer FAQs", - "description": "Frequently asked questions" + "title": "Olùgbéejáde FAQs", + "description": "Awon ibeere ti awon eniyan saaba ma n beere" }, "queryFromAnApplication": { - "title": "Query from an Application", + "title": "Ibeere lati Ohun elo kan", "description": "Learn to query from an application" }, "createASubgraph": { - "title": "Create a Subgraph", - "description": "Use Studio to create subgraphs" + "title": "Ṣẹda Subgraph kan", + "description": "Lo Studio lati ṣẹda awọn subgraphs" }, "migrateFromHostedService": { - "title": "Migrate from the Hosted Service", - "description": "Migrating subgraphs to The Graph Network" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "Hosted Service", - "description": "Create and explore subgraphs on the Hosted Service" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "Supported Networks", - "description": "The Graph supports the following networks on The Graph Network and the Hosted Service.", - "graphNetworkAndHostedService": "The Graph Network & Hosted Service", - "hostedService": "Hosted Service", - "betaWarning": "In beta." + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/yo/mips-faqs.mdx b/website/pages/yo/mips-faqs.mdx index 73efe82662cb..ae460989f96e 100644 --- a/website/pages/yo/mips-faqs.mdx +++ b/website/pages/yo/mips-faqs.mdx @@ -4,6 +4,8 @@ title: MIPs FAQs ## Introduction +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! + It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). diff --git a/website/pages/yo/network/benefits.mdx b/website/pages/yo/network/benefits.mdx index 839a0a7b9cf7..864672b16515 100644 --- a/website/pages/yo/network/benefits.mdx +++ b/website/pages/yo/network/benefits.mdx @@ -14,7 +14,7 @@ Here is an analysis: - 60-98% lower monthly cost - $0 infrastructure setup costs - Superior uptime -- Access to 438 Indexers (and counting) +- Access to hundreds of independent Indexers around the world - 24/7 technical support by global community ## The Benefits Explained @@ -89,7 +89,7 @@ Zero setup fees. Get started immediately with no setup or overhead costs. No har ## Reliability & Resiliency -The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by 168 Indexers (and counting) securing the network globally. +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. Bottom line: The Graph Network is less expensive, easier to use, and produces superior results compared to running a `graph-node` locally. diff --git a/website/pages/yo/network/indexing.mdx b/website/pages/yo/network/indexing.mdx index c40fd87a22fe..9bdc2fb2eb7e 100644 --- a/website/pages/yo/network/indexing.mdx +++ b/website/pages/yo/network/indexing.mdx @@ -2,7 +2,7 @@ title: Indexing --- -Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn from a Rebate Pool that is shared with all network contributors proportional to their work, following the Cobb-Douglas Rebate Function. +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. GRT that is staked in the protocol is subject to a thawing period and can be slashed if Indexers are malicious and serve incorrect data to applications or if they index incorrectly. Indexers also earn rewards for delegated stake from Delegators, to contribute to the network. @@ -81,17 +81,17 @@ Disputes can be viewed in the UI in an Indexer's profile page under the `Dispute ### What are query fee rebates and when are they distributed? -Query fees are collected by the gateway whenever an allocation is closed and accumulated in the subgraph's query fee rebate pool. The rebate pool is designed to encourage Indexers to allocate stake in rough proportion to the amount of query fees they earn for the network. The portion of query fees in the pool that are allocated to a particular Indexer is calculated using the Cobb-Douglas Production Function; the distributed amount per Indexer is a function of their contributions to the pool and their allocation of stake on the subgraph. +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -Once an allocation has been closed and the dispute period has passed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the delegation pool proportions. +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### What is query fee cut and indexing reward cut? The `queryFeeCut` and `indexingRewardCut` values are delegation parameters that the Indexer may set along with cooldownBlocks to control the distribution of GRT between the Indexer and their Delegators. See the last steps in [Staking in the Protocol](/network/indexing#stake-in-the-protocol) for instructions on setting the delegation parameters. -- **queryFeeCut** - the % of query fee rebates accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fee rebate pool when an allocation is claimed with the other 5% going to the Delegators. +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **indexingRewardCut** - the % of indexing rewards accumulated on a subgraph that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards pool when an allocation is closed and the Delegators will split the other 5%. +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### How do Indexers know which subgraphs to index? @@ -662,21 +662,21 @@ ActionType { Example usage from source: ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` Note that supported action types for allocation management have different input requirements: @@ -798,8 +798,4 @@ After being created by an Indexer a healthy allocation goes through four states. - **Closed** - An Indexer is free to close an allocation once 1 epoch has passed ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) or their Indexer agent will automatically close the allocation after the **maxAllocationEpochs** (currently 28 days). When an allocation is closed with a valid proof of indexing (POI) their indexing rewards are distributed to the Indexer and its Delegators (see "how are rewards distributed?" below to learn more). -- **Finalized** - Once an allocation has been closed there is a dispute period after which the allocation is considered **finalized** and it's query fee rebates are available to be claimed (claim()). The Indexer agent monitors the network to detect **finalized** allocations and claims them if they are above a configurable (and optional) threshold, **—-allocation-claim-threshold**. - -- **Claimed** - The final state of an allocation; it has run its course as an active allocation, all eligible rewards have been distributed and its query fee rebates have been claimed. - Indexers are recommended to utilize offchain syncing functionality to sync subgraph deployments to chainhead before creating the allocation on-chain. This feature is especially useful for subgraphs that may take longer than 28 epochs to sync or have some chances of failing undeterministically. diff --git a/website/pages/yo/new-chain-integration.mdx b/website/pages/yo/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/yo/new-chain-integration.mdx +++ b/website/pages/yo/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/yo/operating-graph-node.mdx b/website/pages/yo/operating-graph-node.mdx index 832b6cccf347..4f0f856db111 100644 --- a/website/pages/yo/operating-graph-node.mdx +++ b/website/pages/yo/operating-graph-node.mdx @@ -22,7 +22,7 @@ In order to index a network, Graph Node needs access to a network client via an While some subgraphs may just require a full node, some may have indexing features which require additional RPC functionality. Specifically subgraphs which make `eth_calls` as part of indexing will require an archive node which supports [EIP-1898](https://eips.ethereum.org/EIPS/eip-1898), and subgraphs with `callHandlers`, or `blockHandlers` with a `call` filter, require `trace_filter` support ([see trace module documentation here](https://openethereum.github.io/JSONRPC-trace-module)). -**Upcoming: Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS Nodes diff --git a/website/pages/yo/publishing/publishing-a-subgraph.mdx b/website/pages/yo/publishing/publishing-a-subgraph.mdx index 1d284dc63af8..63ec80a57e88 100644 --- a/website/pages/yo/publishing/publishing-a-subgraph.mdx +++ b/website/pages/yo/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ Once your subgraph has been [deployed to the Subgraph Studio](/deploying/deployi Publishing a Subgraph to the decentralized network makes it available for [Curators](/network/curating) to begin curating on it, and [Indexers](/network/indexing) to begin indexing it. -For a walkthrough of how to publish a subgraph to the decentralized network, see [this video](https://youtu.be/HfDgC2oNnwo?t=580). + You can find the list of the supported networks [Here](/developing/supported-networks). diff --git a/website/pages/yo/querying/querying-the-hosted-service.mdx b/website/pages/yo/querying/querying-the-hosted-service.mdx index 14777da41247..f00ff226ce09 100644 --- a/website/pages/yo/querying/querying-the-hosted-service.mdx +++ b/website/pages/yo/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: Querying the Hosted Service --- -With the subgraph deployed, visit the [Hosted Service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. An example is provided below, but please see the [Query API](/querying/graphql-api) for a complete reference on how to query the subgraph's entities. @@ -19,9 +19,9 @@ This query lists all the counters our mapping has created. Since we only create } ``` -## Using The Hosted Service +## Using the hosted service -The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the Hosted Service. +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. Some of the main features are detailed below: diff --git a/website/pages/yo/querying/querying-with-python.mdx b/website/pages/yo/querying/querying-with-python.mdx new file mode 100644 index 000000000000..c6f59d476141 --- /dev/null +++ b/website/pages/yo/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## Getting Started + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/yo/quick-start.mdx b/website/pages/yo/quick-start.mdx new file mode 100644 index 000000000000..6b784203174a --- /dev/null +++ b/website/pages/yo/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: Ibẹrẹ kiakia +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +This guide is written assuming that you have: + +- A smart contract address on the network of your choice +- GRT to curate your subgraph +- A crypto wallet + +## 1. Create a subgraph on Subgraph Studio + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +Once connected, you can begin by clicking “create a subgraph.” Select the network of your choice and click continue. + +## 2. Install the Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +On your local machine, run one of the following commands: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. Initialize your Subgraph + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +When you initialize your subgraph, the CLI tool will ask you for the following information: + +- Protocol: choose the protocol your subgraph will be indexing data from +- Subgraph slug: create a name for your subgraph. Your subgraph slug is an identifier for your subgraph. +- Directory to create the subgraph in: choose your local directory +- Ethereum network(optional): you may need to specify which EVM-compatible network your subgraph will be indexing data from +- Contract address: Locate the smart contract address you’d like to query data from +- ABI: If the ABI is not autopopulated, you will need to input it manually as a JSON file +- Start Block: it is suggested that you input the start block to save time while your subgraph indexes blockchain data. You can locate the start block by finding the block where your contract was deployed. +- Contract Name: input the name of your contract +- Index contract events as entities: it is suggested that you set this to true as it will automatically add mappings to your subgraph for every emitted event +- Add another contract(optional): you can add another contract + +Initialize your subgraph from an existing contract by running the following command: + +```sh +graph init --studio +``` + +See the following screenshot for an example for what to expect when initializing your subgraph: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. Write your Subgraph + +The previous commands create a scaffold subgraph that you can use as a starting point for building your subgraph. When making changes to the subgraph, you will mainly work with three files: + +- Manifest (subgraph.yaml) - The manifest defines what datasources your subgraphs will index. +- Schema (schema.graphql) - The GraphQL schema defines what data you wish to retrieve from the subgraph. +- AssemblyScript Mappings (mapping.ts) - This is the code that translates data from your datasources to the entities defined in the schema. + +For more information on how to write your subgraph, see [Creating a Subgraph](/developing/creating-a-subgraph). + +## 5. Deploy to the Subgraph Studio + +Once your subgraph is written, run the following commands: + +```sh +$ graph codegen +$ graph build +``` + +- Authenticate and deploy your subgraph. The deploy key can be found on the Subgraph page in Subgraph Studio. + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. Test your subgraph + +You can test your subgraph by making a sample query in the playground section. + +The logs will tell you if there are any errors with your subgraph. The logs of an operational subgraph will look like this: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. Publish Your Subgraph to The Graph’s Decentralized Network + +Once your subgraph has been deployed to the Subgraph Studio, you have tested it out, and are ready to put it into production, you can then publish it to the decentralized network. + +In the Subgraph Studio, click on your subgraph. On the subgraph’s page, you will be able to click the publish button on the top right. + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +Before you can query your subgraph, Indexers need to begin serving queries on it. In order to streamline this process, you can curate your own subgraph using GRT. + +At the time of writing, it is recommended that you curate your own subgraph with 10,000 GRT to ensure that it is indexed and available for querying as soon as possible. + +To save on gas costs, you can curate your subgraph in the same transaction that you published it by selecting this button when you publish your subgraph to The Graph’s decentralized network: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. Query your Subgraph + +Now, you can query your subgraph by sending GraphQL queries to your subgraph’s Query URL, which you can find by clicking on the query button. + +You can query from your dapp if you don't have your API key via the free, rate-limited temporary query URL that can be used for development and staging. + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/yo/substreams.mdx b/website/pages/yo/substreams.mdx new file mode 100644 index 000000000000..2a06de8ac868 --- /dev/null +++ b/website/pages/yo/substreams.mdx @@ -0,0 +1,44 @@ +--- +title: Substreams +--- + +![Substreams Logo](/img/substreams-logo.png) + +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. + +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### Getting Started + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/yo/sunrise.mdx b/website/pages/yo/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/yo/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/yo/tokenomics.mdx b/website/pages/yo/tokenomics.mdx index 949796a99983..b87200dc6b04 100644 --- a/website/pages/yo/tokenomics.mdx +++ b/website/pages/yo/tokenomics.mdx @@ -11,7 +11,7 @@ The Graph is a decentralized protocol that enables easy access to blockchain dat It's similar to a B2B2C model, except it is powered by a decentralized network of participants. Network participants work together to provide data to end users in exchange for GRT rewards. GRT is the work utility token that coordinates data providers and consumers. GRT serves as a utility for coordinating data providers and consumers within the network and incentivizes protocol participants to organize data effectively. -By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular applications](https://thegraph.com/explorer) in the web3 ecosystem today. +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. The Graph indexes blockchain data similarly to how Google indexes the web. In fact, you may already be using The Graph without realizing it. If you've viewed the front end of a dapp that gets its data from a subgraph, you queried data from a subgraph! @@ -75,7 +75,7 @@ Indexers are the backbone of The Graph. They operate independent hardware and so Indexers can earn GRT rewards in two ways: -1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are deposited into a rebate pool and distributed to Indexers. +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. Indexing rewards: the 3% annual issuance is distributed to Indexers based on the number of subgraphs they are indexing. These rewards incentivize Indexers to index subgraphs, occasionally before the query fees begin, to accrue and submit Proofs of Indexing (POIs) verifying that they have indexed data accurately. diff --git a/website/pages/yo/translations.ts b/website/pages/yo/translations.ts new file mode 100644 index 000000000000..340f7eeea339 --- /dev/null +++ b/website/pages/yo/translations.ts @@ -0,0 +1,13 @@ +import supportedNetworks from './developing/supported-networks.json' +import docsearch from './docsearch.json' +import global from './global.json' +import index from './index.json' + +const translations = { + global, + index, + docsearch, + supportedNetworks, +} + +export default translations diff --git a/website/pages/zh/arbitrum/arbitrum-faq.mdx b/website/pages/zh/arbitrum/arbitrum-faq.mdx index 5383646f3a32..345141d64f8e 100644 --- a/website/pages/zh/arbitrum/arbitrum-faq.mdx +++ b/website/pages/zh/arbitrum/arbitrum-faq.mdx @@ -41,7 +41,7 @@ There is no immediate action required, however, network participants are encoura ## 如果我想参加L2上的Graph网络,该怎么办? -请在L2上帮助测试网络[test the network](https://testnet.thegraph.com/explorer),并在[Discord](https://discord.gg/graphprotocol)上报告您的体验反馈。 +Please help [test the network](https://testnet.thegraph.com/explorer) on L2 and report feedback about your experience in [Discord](https://discord.gg/graphprotocol). ## 将网络扩展到L2是否存在任何风险? diff --git a/website/pages/zh/arbitrum/l2-transfer-tools-faq.mdx b/website/pages/zh/arbitrum/l2-transfer-tools-faq.mdx index f36a1fec4e1a..2d71141c48e8 100644 --- a/website/pages/zh/arbitrum/l2-transfer-tools-faq.mdx +++ b/website/pages/zh/arbitrum/l2-transfer-tools-faq.mdx @@ -2,19 +2,43 @@ title: L2转移工具常见问题解答 --- -> L2转移工具尚未发布。预计它们将在2023年夏季提供。 +## 通用 -## 什么是L2转移工具? +### 什么是L2转移工具? -The Graph将协议部署到Arbitrum One上,大大降低了参与网络的成本,使贡献者的参与费用降低了26倍。L2转移工具由核心开发人员创建,旨在使迁移到L2变得简单。对于每个协议参与者,将共享一组转移助手,以在迁移到L2时提供无缝的体验,避免解冻期或手动提取和桥接GRT。这些工具将要求您根据您在The Graph中的角色和要转移的内容遵循一套特定的步骤。 +The Graph has made it 26x cheaper for contributors to participate in the network by deploying the protocol to Arbitrum One. The L2 Transfer Tools were created by core devs to make it easy to move to L2. -## 我可以在以太坊主网上使用相同的钱包吗? +For each network participant, a set of L2 Transfer Tools are available to make the experience seamless when moving to L2, avoiding thawing periods or having to manually withdraw and bridge GRT. + +These tools will require you to follow a specific set of steps depending on what your role is within The Graph and what you are transferring to L2. + +### 我可以在以太坊主网上使用相同的钱包吗? 如果您使用的是 [EOA](https://ethereum.org/en/developers/docs/accounts/#types-of-account)钱包,则可以使用相同的地址。如果您在以太坊主网上的钱包是智能合约钱包(例如多签钱包),那么您必须指定一个Arbitrum钱包地址,用于接收您的转账。请仔细检查地址,因为发送到错误地址的任何转账都可能导致永久丢失。如果您希望在L2上使用多签钱包,请确保在Arbitrum One上部署一个多签合约。 +Wallets on EVM blockchains like Ethereum and Arbitrum are a pair of keys (public and private), that you create without any need to interact with the blockchain. So any wallet that was created for Ethereum will also work on Arbitrum without having to do anything else. + +The exception is with smart contract wallets like multisigs: these are smart contracts that are deployed separately on each chain, and get their address when they are deployed. If a multisig was deployed to Ethereum, it won't exist with the same address on Arbitrum. A new multisig must be created first on Arbitrum, and may get a different address. + +### 如果我没有在 7 天内完成转账,会发生什么情况? + +L2 传输工具使用 Arbitrum 的原生机制将信息从 L1 发送至 L2。这种机制被称为 "retryable ticket,所有本地令牌网桥都使用这种机制,包括Arbitrum GRT网桥。您可以在[Arbitrum文档](https://docs.arbitrum.io/arbos/l1-to-l2-messaging)中阅读更多关于retryable ticket的信息。 + +当您将您的资产(子图、股权、委托)转移到 L2 时,会通过 Arbitrum GRT 桥接器发送一条信息,该桥接器会在 L2 中创建一个可retryable ticket。转移工具在交易中包含一些 ETH ,用于:1)支付创建票据的费用;2)支付在 L2 中执行票据的气体费用。但是,在票据准备好在 L2 中执行之前,gas价格可能会发生变化,因此自动执行尝试可能会失败。当这种情况发生时,Arbitrum 桥接器会将retryable ticket保留最多 7 天,任何人都可以重试 "赎回 "票据(这需要一个与 Arbitrum 桥接了一些 ETH 的钱包)。 + +这就是我们在所有传输工具中所说的 "确认 "步骤--在大多数情况下,它会自动运行,因为自动执行通常都会成功,但重要的是,您要回过头来检查,以确保它成功了。如果没有成功,并且在 7 天内没有成功的重试,Arbitrum 桥接器将丢弃该票据,您的资产(子图、股权、委托或管理)将丢失且无法恢复。The Graph核心开发人员有一个监控系统来检测这些情况,并尝试在为时已晚之前赎回门票,但确保您的转让及时完成最终还是您的责任。如果您在确认交易时遇到困难,请使用[此表单](https://noteforms.com/forms/notionform-l2-transfer-tooling-issues-0ogqfu?notionforms=1&utm_source=notionforms)联系我们,核心开发人员将为您提供帮助。 + +### I started my delegation/stake/curation transfer and I'm not sure if it made it through to L2, how can I confirm that it was transferred correctly? + +If you don't see a banner on your profile asking you to finish the transfer, then it's likely the transaction made it safely to L2 and no more action is needed. If in doubt, you can check if Explorer shows your delegation, stake or curation on Arbitrum One. + +If you have the L1 transaction hash (which you can find by looking at the recent transactions in your wallet), you can also confirm if the "retryable ticket" that carried the message to L2 was redeemed here: https://retryable-dashboard.arbitrum.io/ - if the auto-redeem failed, you can also connect your wallet there and redeem it. Rest assured that core devs are also monitoring for messages that get stuck, and will attempt to redeem them before they expire. + ## 子图转移 -## 如何转移我的子图? +### 如何转移我的子图? + + 要转移您的子图,您需要完成以下步骤: @@ -28,191 +52,241 @@ The Graph将协议部署到Arbitrum One上,大大降低了参与网络的成 5. 更新查询URL(推荐) -\*请注意,您必须在7天内确认转移,否则您的子图可能会丢失。在大多数情况下,此步骤将自动运行,但如果Arbitrum的燃气价格飙升,则可能需要手动确认。如果在此过程中遇到任何问题,我们将提供帮助:请通过support@thegraph.com或[Discord](https://discord.gg/graphprotocol)与我们联系。 +\*Note that you must confirm the transfer within 7 days otherwise your subgraph may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## 我应该从哪里发起转移? +### 我应该从哪里发起转移? 您可以从[Subgraph Studio](https://thegraph.com/studio/), [Explorer](https://thegraph.com/explorer) 或任何子图详细信息页面发起转移。在子图详细信息页面中,点击“Transfer Subgraph”按钮开始转移。 -## 我需要等多久才能完成子图转移? +### 我需要等多久才能完成子图转移? 转移时间大约需要20分钟。Arbitrum跨链桥在后台工作,自动完成桥接转移。在某些情况下,燃气费用可能会上涨,您需要再次确认交易。 -## 在我将子图转移到L2之后,它还能被发现吗? +### 在我将子图转移到L2之后,它还能被发现吗? 您的子图只能在其发布所在的网络上被发现。例如,如果您的子图在Arbitrum One上,那么您只能在Arbitrum One的Explorer中找到它,而无法在以太坊上找到它。请确保您已在页面顶部的网络切换器中选择了Arbitrum One,以确保您位于正确的网络上。转移后,L1子图将显示为已弃用。 -## 我的子图是否需要发布才能转移? +### 我的子图是否需要发布才能转移? 要使用子图转移工具,您的子图必须已经发布到以太坊主网上,并且拥有子图的钱包必须拥有一定的策划信号。如果您的子图尚未发布,建议您直接在Arbitrum One上进行发布-相关的燃气费用将大大降低。如果您想转移已发布的子图,但拥有该子图的所有者账户尚未对其进行任何策划信号的策展,您可以从该账户中发送一小笔金额(例如1 GRT)进行信号,确保选择“自动迁移”信号。 -## 我将我的子图转移到Arbitrum后,以太坊主网版本的子图会发生什么? +### 我将我的子图转移到Arbitrum后,以太坊主网版本的子图会发生什么? 将子图转移到Arbitrum后,以太坊主网版本的子图将被弃用。我们建议您在48小时内更新查询URL。但是,我们已经设置了一个宽限期使您的主网URL继续可用,以便更新任何第三方dapp的支持。 -## 转移后,我还需要在Arbitrum上重新发布吗? +### 转移后,我还需要在Arbitrum上重新发布吗? 在20分钟的转移窗口之后,您需要通过UI中的交易确认来完成转移,但转移工具将指导您完成此过程。在转移窗口和宽限期期间,您的L1端点将继续受到支持。鼓励您在方便的时候更新您的端点。 -## 在重新发布过程中,我的端点会出现停机时间吗? +### Will my endpoint experience downtime while re-publishing? -在使用转移工具将子图转移到L2时,不应出现停机时间。在转移窗口和宽限期期间,您的L1端点将继续受到支持。鼓励您在方便的时候更新您的端点。 +It is unlikely, but possible to experience a brief downtime depending on which Indexers are supporting the subgraph on L1 and whether they keep indexing it until the subgraph is fully supported on L2. -## L2上的发布和版本控制与以太坊主网上相同吗? +### L2上的发布和版本控制与以太坊主网上相同吗? -是的。请确保在Subgraph Studio中选择Arbitrum One作为您的发布网络。在Studio中,将提供最新的端点,指向子图的最新更新版本。 +Yes. Select Arbitrum One as your published network when publishing in Subgraph Studio. In the Studio, the latest endpoint will be available which points to the latest updated version of the subgraph. -## 我的子图的策划是否会随着子图一起转移? +### 我的子图的策展是否会随着子图一起转移? 如果您选择了自动迁移信号,您自己的全部策展将与子图一起转移到Arbitrum One。在转移时,所有子图的策展信号将转换为GRT,并且与您的策展信号相对应的GRT将用于在L2子图上生成信号。 其他策展人可以选择是否撤回他们的一部分GRT,或者将其转移到L2上,在同一子图上生成信号。 -## 在将子图转移到Arbitrum后,我能否将其转回以太坊主网? +### 在将子图转移到Arbitrum后,我能否将其转回以太坊主网? 一旦转移,您的以太坊主网版本的子图将被弃用。如果您想转回主网,您需要重新部署并发布到主网。然而,强烈不建议再次转移到以太坊主网,因为索引奖励最终将完全在Arbitrum One上分发。 -## 为什么在完成转移时需要桥接ETH? +### 为什么在完成转移时需要桥接ETH? 在Arbitrum One上支付的燃气费用使用桥接ETH(即已桥跨链到Arbitrum One的ETH)支付。但是,与以太坊主网相比,燃气费用要低得多。 -## 策展信号 +## 委托 -## 如何转移我的策展信号? +### 如何转移我的委托? -要转移您的策展信号,您需要完成以下步骤: + -1. 在以太坊主网上启动信号转移 +要转移你的委托,你需要完成以下步骤: -2. 指定L2策展者地址\* +1. 在以太坊主网上启动委托转移 +2. 等待20分钟进行确认 +3. 在Arbitrum上确认委托转移 -3. 等待20分钟进行确认 +\*\*\*\*You must confirm the transaction to complete the delegation transfer on Arbitrum. This step must be completed within 7 days or the delegation could be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -\*如果需要-即您正在使用合约地址。 +### 如果我在以太坊主网上有未完成的分配,我的奖励会怎样? -## 我如何知道我策展的子图已经转移到L2? +If the Indexer to whom you're delegating is still operating on L1, when you transfer to Arbitrum you will forfeit any delegation rewards from open allocations on Ethereum mainnet. This means that you will lose the rewards from, at most, the last 28-day period. If you time the transfer right after the Indexer has closed allocations you can make sure this is the least amount possible. If you have a communication channel with your Indexer(s), consider discussing with them to find the best time to do your transfer. -在查看子图详细信息页面时,将显示一个横幅,通知您该子图已转移。您可以按照提示进行策展转移。您还可以在已转移的任何子图的子图详细信息页面上找到此信息。 +### 如果我当前委托的索引人不在 Arbitrum One 上怎么办? -## 如果我不希望将我的策展转移到L2怎么办? +只有你委托的索引人将自己的质押转移到 Arbitrum 后,L2 转移工具才会启用。 -当子图被弃用时,您可以选择撤回您的信号。同样,如果子图转移到L2,您可以选择在以太坊主网上撤回信号,或将信号发送到L2。 +### 委托人是否可以选择委托给其他索引人? -## 如何知道我的策展是否成功转移? +If you wish to delegate to another Indexer, you can transfer to the same Indexer on Arbitrum, then undelegate and wait for the thawing period. After this, you can select another active Indexer to delegate to. -信号详细信息将在大约20分钟后通过Exploer可访问。 +### 如果我在 L2 上找不到要委派的索引人怎么办? -## 我可以一次在多个子图上转移我的策展吗? +L2 转移工具将自动检测您先前委派的索引人。 -目前没有批量转移选项。 +### 我是否可以将委派分散在新的或多个索引人上,而不是之前的索引人? -## 索引器质押 +L2 转移工具将始终将您的委托转移到您先前委托的同一索引人。一旦您转移到 L2,您可以取消委托,等待解冻期,并决定是否要分散您的委托。 -## 如何将我的质押转移到Arbitrum? +### 在使用 L2 委托转移工具后,我是否需要等待冷却期才能提款,还是可以立即提款? -要转移您的质押,您需要完成以下步骤: +转移工具允许您立即转移到 L2。如果您想取消委托,您将需要等待解冻期。但是,如果一个指标者已经将他们的全部股份转移到 L2 上,您可以立即在以太坊主网上提款。 -1. 在以太坊主网上启动转移过程 +### 如果我不转移委托是否会对我的奖励产生负面影响? -2. 等待20分钟进行确认 +预计所有网络参与者将来都会转移到 Arbitrum One。 -3. 在Arbitrum上确认质押转移 +### 完成委托转移到 L2 需要多长时间? -\*请注意,您必须在7天内确认转移,否则您的子图可能会丢失。在大多数情况下,此步骤将自动运行,但如果Arbitrum的燃气价格飙升,则可能需要手动确认。如果在此过程中遇到任何问题,我们将提供帮助:请通过support@thegraph.com或[Discord](https://discord.gg/graphprotocol)与我们联系。 +A 20-minute confirmation is required for delegation transfer. Please note that after the 20-minute period, you must come back and complete step 3 of the transfer process within 7 days. If you fail to do this, then your delegation may be lost. Note that in most cases the transfer tool will complete this step for you automatically. In case of a failed auto-attempt, you will need to complete it manually. If any issues arise during this process, don't worry, we'll be here to help: contact us at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## 我所有的质押都会转移吗 +### 如果我使用的是 GRT 质押合约/代币锁定钱包,我可以转移委托吗? -您可以选择转移多少质押。如果您选择一次性转移所有质押,您必须先关闭任何未完成的分配。 +可以!由于解锁合约(vesting contracts)无法转发用于支付 L2 交易费用的 ETH,所以流程略有不同,你需要事先存入所需的 ETH。如果你的解锁合约尚未完全解锁,你还需要在 L2 上先初始化一个对应的解锁合约,并且只能将质押转移到此 L2 解锁合约。Explorer 上的用户界面可以指导你在使用解锁钱包(vesting lock wallet)连接到 Explorer 时完成这个过程。 -如果您计划在多个交易中转移部分质押,您必须始终指定相同的受益人地址。 +### Does my Arbitrum vesting contract allow releasing GRT just like on mainnet? -注意:首次使用转移工具时,您必须满足L2的最低质押要求。索引器在首次调用此函数时必须发送最低的100,000个GRT(当调用此函数时)。如果将部分质押留在L1上,它也必须超过100,000个GRT的最低要求,并足以(与您的委托一起)覆盖您的未完成分配。 +No, the vesting contract that is created on Arbitrum will not allow releasing any GRT until the end of the vesting timeline, i.e. until your contract is fully vested. This is to prevent double spending, as otherwise it would be possible to release the same amounts on both layers. -## 确认质押转移至Arbitrum的时间限制是多久? +If you'd like to release GRT from the vesting contract, you can transfer them back to the L1 vesting contract using Explorer: in your Arbitrum One profile, you will see a banner saying you can transfer GRT back to the mainnet vesting contract. This requires a transaction on Arbitrum One, waiting 7 days, and a final transaction on mainnet, as it uses the same native bridging mechanism from the GRT bridge. -\*\*\*您必须确认交易以完成Arbitrum上的质押转移。此步骤必须在7天内完成,否则可能会丢失质押。 +### 是否需要支付任何委托税? -## 如果我有未完成的分配会怎么样? +不需要。在 L2 上收到的代币将以指定的委托人名义委托给指定的索引人,而无需收取委托税。 -如果您不发送全部质押,L2转移工具将验证以太坊主网上至少保留了最低的100,000个GRT,并且您的剩余质押和委托足以覆盖任何未完成的分配。如果您的GRT余额不足以支付最低要求+未完成的分配,您可能需要关闭未完成的分配。 +### Will my unrealized rewards be transferred when I transfer my delegation? -## 使用转移工具,在以太坊主网上进行转移之前,是否需要等待28天解除质押? +​Yes! The only rewards that can't be transferred are the ones for open allocations, as those won't exist until the Indexer closes the allocations (usually every 28 days). If you've been delegating for a while, this is likely only a small fraction of rewards. -不需要,在使用转移工具之前,你可以立即将你的质押转移到 L2(即 Arbitrum),无需解除质押并等待。28天的等待期仅适用于如果你想要将质押提取回你的钱包,无论是在以太坊主网还是 L2 上。 +At the smart contract level, unrealized rewards are already part of your delegation balance, so they will be transferred when you transfer your delegation to L2. ​ -## 转移质押需要多长时间? +### Is moving delegations to L2 mandatory? Is there a deadline? -转移质押的过程大约需要20分钟,L2 转移工具将完成质押的转移。 +​Moving delegation to L2 is not mandatory, but indexing rewards are increasing on L2 following the timeline described in [GIP-0052](https://forum.thegraph.com/t/gip-0052-timeline-and-requirements-to-increase-rewards-in-l2/4193). Eventually, if the Council keeps approving the increases, all rewards will be distributed in L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -## 在转移质押之前,我是否需要在 Arbitrum 上进行索引(indexing)? +### If I am delegating to an Indexer that has already transferred stake to L2, do I stop receiving rewards on L1? -你可以在转移质押之前先有效地进行转移,但在你在 L2 上分配和索引子图之前,你将无法领取 L2 上的任何奖励。要在 L2 上领取奖励,你需要在 L2 上分配子图、对其进行索引并提供 POI。 +​Many Indexers are transferring stake gradually so Indexers on L1 will still be earning rewards and fees on L1, which are then shared with Delegators. Once an Indexer has transferred all of their stake, then they will stop operating on L1, so Delegators will not receive any more rewards unless they transfer to L2. -## 在我转移索引质押之前,委托人是否可以转移他们的委托? +Eventually, if the Council keeps approving the indexing rewards increases in L2, all rewards will be distributed on L2 and there will be no indexing rewards for Indexers and Delegators on L1. ​ -否,为了使委托人能够将他们的委托 GRT 转移到 Arbitrum,他们委托的索引人必须在 L2 上活动。 +### I don't see a button to transfer my delegation. Why is that? -## 如果我使用 GRT 解锁合约(GRT vesting contract)/令牌锁定钱包,我可以转移质押吗? +​Your Indexer has probably not used the L2 transfer tools to transfer stake yet. -可以!由于解锁合约(vesting contracts)无法转发用于支付 L2 交易费用的 ETH,所以流程略有不同,你需要事先存入所需的 ETH。如果你的解锁合约尚未完全解锁,你还需要在 L2 上先初始化一个对应的解锁合约,并且只能将质押转移到此 L2 解锁合约。Explorer 上的用户界面可以指导你在使用解锁钱包(vesting lock wallet)连接到 Explorer 时完成这个过程。 +If you can contact the Indexer, you can encourage them to use the L2 Transfer Tools so that Delegators can transfer delegations to their L2 Indexer address. ​ -## 委托 +### My Indexer is also on Arbitrum, but I don't see a button to transfer the delegation in my profile. Why is that? -## 如何转移我的委托? +​It is possible that the Indexer has set up operations on L2, but hasn't used the L2 transfer tools to transfer stake. The L1 smart contracts will therefore not know about the Indexer's L2 address. If you can contact the Indexer, you can encourage them to use the transfer tool so that Delegators can transfer delegations to their L2 Indexer address. ​ -要转移你的委托,你需要完成以下步骤: +### Can I transfer my delegation to L2 if I have started the undelegating process and haven't withdrawn it yet? -1. 在以太坊主网上启动委托转移 +​No. If your delegation is thawing, you have to wait the 28 days and withdraw it. + +The tokens that are being undelegated are "locked" and therefore cannot be transferred to L2. + +## 策展信号 + +### 如何转移我的策展信号? + +要转移您的策展信号,您需要完成以下步骤: + +1. 在以太坊主网上启动信号转移 + +2. 指定L2策展者地址\* + +3. 等待20分钟进行确认 + +\*如果需要-即您正在使用合约地址。 + +### 我如何知道我策展的子图已经转移到L2? + +在查看子图详细信息页面时,将显示一个横幅,通知您该子图已转移。您可以按照提示进行策展转移。您还可以在已转移的任何子图的子图详细信息页面上找到此信息。 + +### 如果我不希望将我的策展转移到L2怎么办? + +当子图被弃用时,您可以选择撤回您的信号。同样,如果子图转移到L2,您可以选择在以太坊主网上撤回信号,或将信号发送到L2。 + +### 如何知道我的策展是否成功转移? + +信号详细信息将在大约20分钟后通过Exploer可访问。 + +### 我可以一次在多个子图上转移我的策展吗? + +目前没有批量转移选项。 + +## 索引人质押 + +### 如何将我的质押转移到Arbitrum? + +> 免责声明:如果您目前正在取消索引人上的任何 GRT 部分,则无法使用 L2 转移工具。 + + + +要转移您的质押,您需要完成以下步骤: + +1. 在以太坊主网上启动转移过程 2. 等待20分钟进行确认 -3. 在Arbitrum上确认委托转移 +3. 在Arbitrum上确认质押转移 -\*\*\*\*请注意,您必须在7天内确认转移,否则您的子图可能会丢失。在大多数情况下,此步骤将自动运行,但如果Arbitrum的燃气价格飙升,则可能需要手动确认。如果在此过程中遇到任何问题,我们将提供帮助:请通过support@thegraph.com或[Discord](https://discord.gg/graphprotocol)与我们联系。 +\*Note that you must confirm the transfer within 7 days otherwise your stake may be lost. In most cases, this step will run automatically, but a manual confirmation may be needed if there is a gas price spike on Arbitrum. If there are any issues during this process, there will be resources to help: contact support at support@thegraph.com or on [Discord](https://discord.gg/graphprotocol). -## 如果我在以太坊主网上有未完成的分配,我的奖励会怎样? +### 我所有的质押都会转移吗 -如果你将委托转移到 Arbitrum 时,你将放弃来自以太坊主网上的未完成分配的委托奖励。这意味着你将失去最多最近28天的奖励。如果你在索引人关闭分配后的合适时间转移,你可以确保损失的奖励最少。如果你与你的索引人保持沟通,可以考虑与他们讨论以找到最佳转移时间。 +您可以选择转移多少质押。如果您选择一次性转移所有质押,您必须先关闭任何未完成的分配。 -## 如果我当前委托的索引人不在 Arbitrum One 上怎么办? +如果您计划在多个交易中转移部分质押,您必须始终指定相同的受益人地址。 -只有你委托的索引人将自己的质押转移到 Arbitrum 后,L2 转移工具才会启用。 +注意:首次使用转移工具时,您必须满足L2的最低质押要求。索引器在首次调用此函数时必须发送最低的100,000个GRT(当调用此函数时)。如果将部分质押留在L1上,它也必须超过100,000个GRT的最低要求,并足以(与您的委托一起)覆盖您的未完成分配。 -## 委托人是否可以选择委托给其他索引人? +### 确认质押转移至Arbitrum的时间限制是多久? -如果您希望将委派权转移给另一个索引人,您可以将其转移到 Arbitrum 上的同一索引人,然后取消委派并等待解冻期。在此之后,您可以选择另一个活跃的索引人进行委派。 +\*\*\*您必须确认交易以完成Arbitrum上的质押转移。此步骤必须在7天内完成,否则可能会丢失质押。 -## 如果我在 L2 上找不到要委派的索引人怎么办? +### 如果我有未完成的分配会怎么样? -L2 转移工具将自动检测您先前委派的索引人。 +如果您不发送全部质押,L2转移工具将验证以太坊主网上至少保留了最低的100,000个GRT,并且您的剩余质押和委托足以覆盖任何未完成的分配。如果您的GRT余额不足以支付最低要求+未完成的分配,您可能需要关闭未完成的分配。 -## 我是否可以将委派分散在新的或多个索引人上,而不是之前的索引人? +### 使用转移工具,在以太坊主网上进行转移之前,是否需要等待28天解除质押? -L2 转移工具将始终将您的委托转移到您先前委托的同一索引人。一旦您转移到 L2,您可以取消委托,等待解冻期,并决定是否要分散您的委托。 +不需要,在使用转移工具之前,你可以立即将你的质押转移到 L2(即 Arbitrum),无需解除质押并等待。28天的等待期仅适用于如果你想要将质押提取回你的钱包,无论是在以太坊主网还是 L2 上。 -## 在使用 L2 委托转移工具后,我是否需要等待冷却期才能提款,还是可以立即提款? +### 转移质押需要多长时间? -转移工具允许您立即转移到 L2。如果您想取消委托,您将需要等待解冻期。但是,如果一个指标者已经将他们的全部股份转移到 L2 上,您可以立即在以太坊主网上提款。 +转移质押的过程大约需要20分钟,L2 转移工具将完成质押的转移。 -## 如果我不转移委托是否会对我的奖励产生负面影响? +### 在转移质押之前,我是否需要在 Arbitrum 上进行索引(indexing)? -预计所有网络参与者将来都会转移到 Arbitrum One。 +你可以在转移质押之前先有效地进行转移,但在你在 L2 上分配和索引子图之前,你将无法领取 L2 上的任何奖励。要在 L2 上领取奖励,你需要在 L2 上分配子图、对其进行索引并提供 POI。 -## 完成委托转移到 L2 需要多长时间? +### 在我转移索引质押之前,委托人是否可以转移他们的委托? -委托转移需要 20 分钟的确认时间。请注意,在 20 分钟的时间结束后,您必须返回并在 7 天内完成转移过程的第三步。如果未能这样做,您的委托可能会丢失。大多数情况下,转移工具将自动完成此步骤。如果自动尝试失败,您将需要手动完成。如果在此过程中遇到任何问题,不要担心,我们会在这里提供帮助:通过电子邮件 (support email address) 或 [Discord](https://discord.gg/graphprotocol)上的 (channel) 渠道与我们联系。 +否,为了使委托人能够将他们的委托 GRT 转移到 Arbitrum,他们委托的索引人必须在 L2 上活动。 -## 如果我使用的是 GRT 质押合约/代币锁定钱包,我可以转移委托吗? +### 如果我使用 GRT 解锁合约(GRT vesting contract)/令牌锁定钱包,我可以转移质押吗? 可以!由于解锁合约(vesting contracts)无法转发用于支付 L2 交易费用的 ETH,所以流程略有不同,你需要事先存入所需的 ETH。如果你的解锁合约尚未完全解锁,你还需要在 L2 上先初始化一个对应的解锁合约,并且只能将质押转移到此 L2 解锁合约。Explorer 上的用户界面可以指导你在使用解锁钱包(vesting lock wallet)连接到 Explorer 时完成这个过程。 -## 是否需要支付任何委托税? +### I already have stake on L2. Do I still need to send 100k GRT when I use the transfer tools the first time? -不需要。在 L2 上收到的代币将以指定的委托人名义委托给指定的索引人,而无需收取委托税。 +​Yes. The L1 smart contracts will not be aware of your L2 stake, so they will require you to transfer at least 100k GRT when you transfer for the first time. ​ + +### Can I transfer my stake to L2 if I am in the process of unstaking GRT? + +​No. If any fraction of your stake is thawing, you have to wait the 28 days and withdraw it before you can transfer stake. The tokens that are being staked are "locked" and will prevent any transfers or stake to L2. ## 解锁合约转移 -## 如何转移我的解锁合约? +### 如何转移我的解锁合约? 要转移您的解锁合约,您需要完成以下步骤: @@ -222,7 +296,9 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 3. 在 Arbitrum 上确认解锁合约转移 -## 如果我只有部分解锁完成,如何转移我的解锁合约? +### 如果我只有部分解锁完成,如何转移我的解锁合约? + + 1. 向转移工具合约存入一些以太币(用户界面可以帮助估算合理的金额) @@ -232,7 +308,9 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 4. 从转移工具合约中提取剩余的以太币 -## 如果我的份额已经全部解锁,如何转移我的解锁合约? +### 如果我的份额已经全部解锁,如何转移我的解锁合约? + + 对于已完全完成解锁的用户,流程类似: @@ -244,7 +322,7 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 4. 从转移工具合约中提取剩余的以太币 -## 我可以将我的解锁合约转移到 Arbitrum 吗? +### 我可以将我的解锁合约转移到 Arbitrum 吗? 您可以将解锁合约的 GRT 余额转移到 L2 上的解锁合约。这是将您的解锁合约的质押或委托转移到 L2 的先决条件。解锁合约必须持有非零数量的 GRT(如果需要,您可以向其转移少量的 GRT,如 1 GRT)。 @@ -256,27 +334,27 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 如果您尚未将任何解锁合约余额转移到 L2 上,并且您的解锁合约已完全完成,您不应将解锁合约转移到 L2。相反,您可以使用转移工具设置一个 L2 钱包地址,并直接将您的质押或委托转移到 L2 上的常规钱包。 -## 我正在使用解锁合约在主网上质押。我可以将我的质押转移到 Arbitrum 吗? +### 我正在使用解锁合约在主网上质押。我可以将我的质押转移到 Arbitrum 吗? 是的,但如果您的合约仍在解锁中,您只能将质押转移到由您的 L2 解锁合约拥有的地址。您必须先通过 Explorer 上的解锁合约转移工具将一些 GRT 余额转移到 L2 来初始化此 L2 合约。如果您的合约已完全解锁,您可以将质押转移到 L2 中的任何地址,但您必须事先设置好,并存入一些 ETH 以支付 L2 转移工具的 L2 gas 费用。 -## 我在使用解锁合约在主网上委托。我可以将我的委托转移到 Arbitrum 吗? +### 我在使用解锁合约在主网上委托。我可以将我的委托转移到 Arbitrum 吗? 是的,但如果您的合约仍在解锁中,您只能将委托转移到由您的 L2 解锁合约拥有的地址。您必须先通过 Explorer 上的解锁合约转移工具将一些 GRT 余额转移到 L2 来初始化此 L2 合约。如果您的合约已完全解锁,您可以将委托转移到 L2 中的任何地址,但您必须事先设置好,并存入一些 ETH 以支付 L2 转移工具的 L2 gas 费用 -## 我可以为我的解锁合约在 L2 上指定不同的受益人吗? +### 我可以为我的解锁合约在 L2 上指定不同的受益人吗? 是的,在首次转移余额并设置您的 L2 解锁合约时,您可以指定一个 L2 受益人。请确保该受益人是一个可以在 Arbitrum One 上执行交易的钱包,即它必须是一个 EOA(外部所有者账户)或一个在 Arbitrum One 上部署的多签钱包。 如果您的合约已完全解锁,您将不会在 L2 上设置解锁合约;相反,您将设置一个 L2 钱包地址,该地址将接收您在 Arbitrum 上的质押或委托。 -## 我的合约已完全解锁。我可以将我的质押或委托转移到非 L2 解锁合约的其他地址吗? +### 我的合约已完全解锁。我可以将我的质押或委托转移到非 L2 解锁合约的其他地址吗? 是的。如果您尚未将任何解锁合约余额转移到 L2 上,并且您的解锁合约已完全解锁,您不应将解锁合约转移到 L2。相反,您可以使用转移工具设置一个 L2 钱包地址,并直接将您的质押或委托转移到 L2 上的常规钱包。 这样可以使您将质押或委托转移到 L2 上的任何地址。 -## 我的解锁合约仍在解锁中。如何将解锁合约余额转移到 L2? +### 我的解锁合约仍在解锁中。如何将解锁合约余额转移到 L2? 以下步骤仅适用于您的合约仍在解锁中的情况,或者在您的合约仍在解锁中时使用此过程。 @@ -298,18 +376,36 @@ L2 转移工具将始终将您的委托转移到您先前委托的同一索引 \*\*\*\*请注意,您必须在7天内确认转移,否则您的子图可能会丢失。在大多数情况下,此步骤将自动运行,但如果Arbitrum的燃气价格飙升,则可能需要手动确认。如果在此过程中遇到任何问题,我们将提供帮助:请通过support@thegraph.com或[Discord](https://discord.gg/graphprotocol)与我们联系。 -## 我可以将我的解锁合约转移到 L1 吗? +### My vesting contract shows 0 GRT so I cannot transfer it, why is this and how do I fix it? + +​To initialize your L2 vesting contract, you need to transfer a nonzero amount of GRT to L2. This is required by the Arbitrum GRT bridge that is used by the L2 Transfer Tools. The GRT must come from the vesting contract's balance, so it does not include staked or delegated GRT. + +If you've staked or delegated all your GRT from the vesting contract, you can manually send a small amount like 1 GRT to the vesting contract address from anywhere else (e.g. from another wallet, or an exchange). ​ + +### I am using a vesting contract to stake or delegate, but I don't see a button to transfer my stake or delegation to L2, what do I do? + +​If your vesting contract hasn't finished vesting, you need to first create an L2 vesting contract that will receive your stake or delegation on L2. This vesting contract will not allow releasing tokens in L2 until the end of the vesting timeline, but will allow you to transfer GRT back to the L1 vesting contract to be released there. + +When connected with the vesting contract on Explorer, you should see a button to initialize your L2 vesting contract. Follow that process first, and you will then see the buttons to transfer your stake or delegation in your profile. ​ + +### If I initialize my L2 vesting contract, will this also transfer my delegation to L2 automatically? + +​No, initializing your L2 vesting contract is a prerequisite for transferring stake or delegation from the vesting contract, but you still need to transfer these separately. + +You will see a banner on your profile prompting you to transfer your stake or delegation after you have initialized your L2 vesting contract. + +### 我可以将我的解锁合约转移到 L1 吗? 没有必要这样做,因为您的解锁合约仍在 L1 上。当您使用转移工具时,您只是在 L2 上创建一个与您的 L1 解锁合约连接的新合约,您可以在两者之间自由发送 GRT。 -## 为什么我需要转移我的解锁合约? +### 为什么我需要转移我的解锁合约? 您需要设置一个 L2 解锁合约,以便该账户可以在 L2 上拥有您的质押或委托。否则,您将无法在不“逃离”解锁合约的情况下将质押/委托转移到 L2。 -## 如果我尝试在解锁未完全完成时兑现我的合约会发生什么?这可能吗? +### 如果我尝试在解锁未完全完成时兑现我的合约会发生什么?这可能吗? 这是不可能的。您可以将资金转回 L1 并在那里提取。 -## 如果我不想将我的解锁合约转移到 L2 怎么办? +### 如果我不想将我的解锁合约转移到 L2 怎么办? 您可以继续在 L1 上进行质押/委托。随着协议在 Arbitrum 上的扩展和时间的推移,您可能希望考虑转移到 L2 以启用那里的奖励。请注意,这些转移工具适用于允许在协议中进行质押或委托的解锁合约。如果您的合约不允许质押或委托,或者可以撤销,则没有可用的转移工具。您仍然可以在可用时从 L1 提取 GRT。 diff --git a/website/pages/zh/arbitrum/l2-transfer-tools-guide.mdx b/website/pages/zh/arbitrum/l2-transfer-tools-guide.mdx index 8f49fcb0696a..bfdbc9013821 100644 --- a/website/pages/zh/arbitrum/l2-transfer-tools-guide.mdx +++ b/website/pages/zh/arbitrum/l2-transfer-tools-guide.mdx @@ -2,19 +2,19 @@ title: L2转移工具指南 --- -> L2转移工具尚未发布。预计它们将在2023年夏季提供。 - The Graph 使迁移到 Arbitrum One(L2) 上变得非常容易。对于每个协议参与者,都有一组 L2 转账工具,使所有网络参与者无缝地迁移到 L2。根据你要转移的内容,这些工具会要求你按照特定的步骤操作。 关于这些工具的一些常见问题在 L2 Transfer Tools FAQ(/arbitrum/l2-transfer-tools-faq) 中有详细解答。FAQ 中深入解释了如何使用这些工具、它们的工作原理以及在使用过程中需要注意的事项。 ## 如何将你的子图转移到 Arbitrum(L2) + + ## 将子图转移到 Arbitrum 的好处 过去一年里,Graph社区和核心开发人员一直在为迁移到 Arbitrum 做准备(https://forum.thegraph.com/t/gip-0031-arbitrum-grt-bridge/3305) 。Arbitrum 是一种二层网络或“L2”区块链,继承了以太坊的安全性,但提供了大幅降低的燃气费用。 -当你将子图发布或升级到Graph网络时,你正在与协议中的智能合约进行交互,这需要使用 ETH 支付燃气费。通过将你的子图转移到 Arbitrum,未来对你的子图进行的任何升级都将只需要较低的燃气费。较低的费用以及 L2 上平坦的收益率曲线使得其他策展人更容易策展你的子图,从而增加你的子图上的索引人的收益。这种更低成本的环境也使得索引人更便宜地进行子图索引和服务。随着未来几个月内,Arbitrum 上的索引人奖励增加,以太坊主网上的奖励减少,越来越多的索引人将会转移他们的份额在 L2 上。 +当您将子图发布或升级到The Graph Network时,您将与协议上的智能合约进行交互,这需要使用以太币(ETH)支付燃气费用。通过将您的子图迁移到Arbitrum,将来对您的子图进行的任何更新将需要更低的燃气费用。较低的费用以及L2网络上平滑的曲线,使其他策展人更容易在您的子图上进行策展,从而增加了在您的子图上的索引人的奖励。这种较低成本的环境还使得索引器更便宜地对您的子图进行索引和服务。在接下来的几个月里,Arbitrum上的索引奖励将增加,而以太坊主网上的索引奖励将减少,因此越来越多的索引器将会将他们的质押迁移到L2网络并在该网络上设置运营。 ## 理解信号、你的 L1 子图和查询 URL 的变化 @@ -30,7 +30,7 @@ The Graph 使迁移到 Arbitrum One(L2) 上变得非常容易。对于每个协 ## 选择你的 L2 钱包 -当你在主网上发布子图时,你使用一个连接的钱包创建子图,这个钱包拥有代表这个子图的 NFT,并允许你发布升级。 +当你在主网上发布子图时,你使用一个连接的钱包创建了子图,这个钱包拥有代表这个子图的 NFT,并允许你发布升级。 当将子图转移到 Arbitrum 时,你可以选择一个不同的钱包在 L2 上持有这个子图 NFT 。 diff --git a/website/pages/zh/billing.mdx b/website/pages/zh/billing.mdx index 80b65779ae18..9a7e15b9a612 100644 --- a/website/pages/zh/billing.mdx +++ b/website/pages/zh/billing.mdx @@ -37,8 +37,12 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 ### 使用加密钱包添加GRT + + > 本节假设您的加密钱包中已经有GRT,并且您在以太坊主网上。如果你没有GRT,你可以在[这里](#getting-grt)学习如何获得GRT。 +For a video walkthrough of adding GRT to your billing balance using a crypto wallet, watch this [video](https://youtu.be/4Bw2sh0FxCg). + 1. 转到[Subgraph Studio计费页面](https://thegraph.com/studio/billing/)。 2. 单击页面右上角的“Connect Wallet”(连接钱包)按钮。您将被重定向到钱包选择页面。选择您的钱包,然后单击“Connect”(连接)。 @@ -71,6 +75,8 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 ### 使用多签钱包添加GRT + + 1. 转到[Subgraph Studio计费页面](https://thegraph.com/studio/billing/)。 2. 单击页面右上角的“Connect Wallet”(连接钱包)按钮。选择您的钱包,然后单击“Connect”(连接)。如果您正在使用 [Gnosis-Safe](https://gnosis-safe.io/),那么您将能够连接您的 multisig 和您的签名钱包。然后,在相关消息上签名。这不会花费任何gas。 @@ -97,11 +103,11 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 ## 获取GRT -本节将向您展示如何让GRT支付查询费用。 +This section will show you how to get GRT to pay for query fees. ### Coinbase -这将是在Coinbase上购买GRT的分步指南。 +This will be a step by step guide for purchasing GRT on Coinbase. 1. 转到[Coinbase](https://www.coinbase.com/)并创建帐户。 2. 创建账户后,您需要通过KYC(或了解您的客户)流程验证您的身份。这是所有中心化或托管加密交易所的标准程序。 @@ -117,11 +123,11 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 - 输入您要发送的GRT金额和您要发送到的钱包地址。 - 单击“继续”并确认您的交易。-请注意,对于较大的购买金额,Coinbase可能需要您等待7-10天,然后才能将全部金额转移到加密钱包。 -您可以在[这里](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency)了解更多关于在Coinbase上获取GRT的信息。 +You can learn more about getting GRT on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). ### Binance -这将是在Binance上购买GRT的分步指南。 +This will be a step by step guide for purchasing GRT on Binance. 1. 转到[Binance](https://www.binance.com/en)并创建帐户。 2. 创建账户后,您需要通过KYC(或了解您的客户)流程验证您的身份。这是所有中心化或托管加密交易所的标准程序。 @@ -137,11 +143,11 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 - 输入您要发送的GRT金额和您要发送到的白名单钱包地址。 - 单击“继续”并确认您的交易。 -您可以在[这里](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582)了解更多关于在Binance上获取GRT的信息。 +You can learn more about getting GRT on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ### Uniswap -这是您在Uniswap上购买GRT的方式。 +This is how you can purchase GRT on Uniswap. 1. 转到[Uniswap](https://app.uniswap.org/#/swap)并连接您的钱包。 2. 选择要从中交换的代币。选择ETH。 @@ -151,8 +157,52 @@ Banxa使您能够绕过兑换的需要,使用您选择的法定货币支付查 5. 单击“交换”。 6. 确认钱包中的交易,然后等待交易处理。 -您可以在[这里](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-)了解有关在Uniswap上获取GRT的更多信息。 +You can learn more about getting GRT on Uniswap [here](https://support.uniswap.org/hc/en-us/articles/8370549680909-How-to-Swap-Tokens-). + +## Getting Ethereum + +This section will show you how to get Ethereum (ETH) to pay for transaction fees or gas costs. ETH is necessary to execute operations on the Ethereum network such as transferring tokens or interacting with contracts. + +### Coinbase + +This will be a step by step guide for purchasing ETH on Coinbase. + +1. 转到[Coinbase](https://www.coinbase.com/)并创建帐户。 +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy/Sell" button on the top right of the page. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will have successfully purchased ETH. +9. You can transfer the ETH from your Coinbase account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To transfer the ETH to your crypto wallet, click on the "Accounts" button on the top right of the page. + - Click on the "Send" button next to the ETH account. + - Enter the amount of ETH you want to send and the wallet address you want to send it to. + - 单击“继续”并确认您的交易。 + +You can learn more about getting ETH on Coinbase [here](https://help.coinbase.com/en/coinbase/trading-and-funding/buying-selling-or-converting-crypto/how-do-i-buy-digital-currency). + +### Binance + +This will be a step by step guide for purchasing ETH on Binance. + +1. 转到[Binance](https://www.binance.com/en)并创建帐户。 +2. Once you have created an account, verify your identity through a process known as KYC (or Know Your Customer). This is a standard procedure for all centralized or custodial crypto exchanges. +3. Once you have verified your identity, purchase ETH by clicking on the "Buy Now" button on the homepage banner. +4. Select the currency you want to purchase. Select ETH. +5. Select your preferred payment method. +6. Enter the amount of ETH you want to purchase. +7. Review your purchase and click "Buy ETH". +8. Confirm your purchase and you will see your ETH in your Binance Spot Wallet. +9. You can withdraw the ETH from your account to your crypto wallet such as [MetaMask](https://metamask.io/). + - To withdraw the ETH to your crypto wallet, add your crypto wallet's address to the withdrawal whitelist. + - Click on the "wallet" button, click withdraw, and select ETH. + - Enter the amount of ETH you want to send and the whitelisted wallet address you want to send it to. + - 单击“继续”并确认您的交易。 + +You can learn more about getting ETH on Binance [here](https://www.binance.com/en/support/faq/how-to-buy-cryptocurrency-on-binance-homepage-400c38f5e0cd4b46a1d0805c296b5582). ## Arbitrum 跨链桥 -计费合约仅用于将GRT从以太坊主网桥接到Arbitrum网络。如果您想将您的GRT从Arbitrum转移回以太坊主网,您需要使用[Arbitrum跨链桥](https://bridge.arbitrum.io/?l2ChainId=42161)。 +The billing contract is only designed to bridge GRT from Ethereum mainnet to the Arbitrum network. If you'd like to transfer your GRT from Arbitrum back to Ethereum mainnet, you'll need to use the [Arbitrum Bridge](https://bridge.arbitrum.io/?l2ChainId=42161). diff --git a/website/pages/zh/chain-integration-overview.mdx b/website/pages/zh/chain-integration-overview.mdx new file mode 100644 index 000000000000..2fe6c2580909 --- /dev/null +++ b/website/pages/zh/chain-integration-overview.mdx @@ -0,0 +1,49 @@ +--- +title: Chain Integration Process Overview +--- + +A transparent and governance-based integration process was designed for blockchain teams seeking [integration with The Graph protocol](https://forum.thegraph.com/t/gip-0057-chain-integration-process/4468). It is a 3-phase process, as summarised below. + +## Stage 1. Technical Integration + +- Teams work on a Graph Node integration and Firehose for non-EVM based chains. [Here's how](/new-chain-integration/). +- Teams initiate the protocol integration process by creating a Forum thread [here](https://forum.thegraph.com/c/governance-gips/new-chain-support/71) (New Data Sources sub-category under Governance & GIPs). Using the default Forum template is mandatory. + +## Stage 2. Integration Validation + +- Teams collaborate with core developers, Graph Foundation and operators of GUIs and network gateways, such as [Subgraph Studio](https://thegraph.com/studio/), to ensure a smooth integration process. This involves providing the necessary backend infrastructure, such as the integrating chain's JSON RPC or Firehose endpoints. Teams wanting to avoid self-hosting such infrastructure can leverage The Graph's community of node operators (Indexers) to do so, which the Foundation can help with. +- Graph Indexers test the integration on The Graph's testnet. +- Core developers and Indexers monitor stability, performance, and data determinism. + +## Stage 3. Mainnet Integration + +- Teams propose mainnet integration by submitting a Graph Improvement Proposal (GIP) and initiating a pull request (PR) on the [feature support matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) (more details on the link). +- The Graph Council reviews the request and approves mainnet support, providing a successful Stage 2 and positive community feedback. + +--- + +If the process looks daunting, don't worry! The Graph Foundation is committed to supporting integrators by fostering collaboration, offering essential information, and guiding them through various stages, including navigating governance processes such as Graph Improvement Proposals (GIPs) and pull requests. If you have questions, please reach out to [info@thegraph.foundation](mailto:info@thegraph.foundation) or through Discord (either Pedro, The Graph Foundation member, IndexerDAO, or other core developers). + +Ready to shape the future of The Graph Network? [Start your proposal](https://github.com/graphprotocol/graph-improvement-proposals/blob/main/gips/0057-chain-integration-process.md) now and be a part of the web3 revolution! + +--- + +## Frequently Asked Questions + +### 1. How does this relate to the [World of Data Services GIP](https://forum.thegraph.com/t/gip-0042-a-world-of-data-services/3761)? + +This process is related to the Subgraph Data Service, applicable only to new Subgraph `Data Sources`. + +### 2. What happens if Firehose & Substreams support comes after the network is supported on mainnet? + +This would only impact protocol support for indexing rewards on Substreams-powered subgraphs. The new Firehose implementation would need testing on testnet, following the methodology outlined for Stage 2 in this GIP. Similarly, assuming the implementation is performant and reliable, a PR on the [Feature Support Matrix](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md) would be required (`Substreams data sources` Subgraph Feature), as well as a new GIP for protocol support for indexing rewards. Anyone can create the PR and GIP; the Foundation would help with Council approval. + +### 3. How much time will this process take? + +The time to mainnet is expected to be several weeks, varying based on the time of integration development, whether additional research is required, testing and bug fixes, and, as always, the timing of the governance process that requires community feedback. + +Protocol support for indexing rewards depends on the stakeholders' bandwidth to proceed with testing, feedback gathering, and handling contributions to the core codebase, if applicable. This is directly tied to the integration's maturity and how responsive the integration team is (who may or may not be the team behind the RPC/Firehose implementation). The Foundation is here to help support throughout the whole process. + +### 4. How will priorities be handled? + +Similar to #3, it will depend on overall readiness and involved stakeholders' bandwidth. For example, a new chain with a brand new Firehose implementation may take longer than integrations that have already been battle-tested or are farther along in the governance process. This is especially true for chains previously supported on the [hosted service](https://thegraph.com/hosted-service) or those relying on already tested stacks. diff --git a/website/pages/zh/cookbook/arweave.mdx b/website/pages/zh/cookbook/arweave.mdx index 32df4448ed7e..24ba246385a5 100644 --- a/website/pages/zh/cookbook/arweave.mdx +++ b/website/pages/zh/cookbook/arweave.mdx @@ -2,7 +2,7 @@ title: 在 Arweave 上构建子图 --- -> Graph 节点和托管服务中对Arweave的支持目前处于测试阶段:任何有关构建 Arweave 子图的任何问题,请联系Arweave@https://discord.gg/graphprotocol! +> Arweave support in Graph Node and on the hosted service is in beta: please reach us on [Discord](https://discord.gg/graphprotocol) with any questions about building Arweave subgraphs! 在本指南中,您将学习如何构建和部署子图以索引Arweave区块链。 @@ -83,7 +83,7 @@ dataSources: ``` - Arweave子图引入了一种新的数据源(`arweave`) -- 网络应该对应于托管Graph节点上的网络。在托管服务上,Arweave 的主网是 `Arweave-mainnet` +- The network should correspond to a network on the hosting Graph Node. On the hosted service, Arweave's mainnet is `arweave-mainnet` - Arweave 数据源引入了一个可选的 source. owner 字段,它是 Arweave 钱包的公钥 Arweave 数据源支持两种类型的处理程序: @@ -150,9 +150,9 @@ class Transaction { 写 Arweave 子图的映射与写 Etherum 子图的映射非常相似。了解更多信息,请点击[这里](/developing/creating-a-subgraph/#writing-mappings)。 -## 在托管服务上部署 Arweave 子图 +## Deploying an Arweave Subgraph on the hosted service -一旦您的子图已经在托管服务仪表板上创建,您就可以通过使用`graph deploy` CLI 命令进行部署。 +Once your subgraph has been created on the hosted service dashboard, you can deploy by using the `graph deploy` CLI command. ```bash graph deploy --node https://api.thegraph.com/deploy/ --ipfs https://api.thegraph.com/ipfs/ --access-token diff --git a/website/pages/zh/cookbook/grafting.mdx b/website/pages/zh/cookbook/grafting.mdx index 1ff8a232a5fa..72b75587b4a4 100644 --- a/website/pages/zh/cookbook/grafting.mdx +++ b/website/pages/zh/cookbook/grafting.mdx @@ -24,6 +24,22 @@ title: 用嫁接替换合约并保持合约的历史 在本教程中,我们将介绍一个基本用例。我们将用一个相同的合约(用一个新的地址,但相同的代码) 替换现有的合约。然后,将现有的子图移植到跟踪新合约的基本子图上。 +## Important Note on Grafting When Upgrading to the Network + +> **Caution**: if you are upgrading your subgraph from Subgraph Studio or the hosted service to the decentralized network, it is strongly recommended to avoid using grafting during the upgrade process. + +### Why Is This Important? + +Grafting is a powerful feature that allows you to "graft" one subgraph onto another, effectively transferring historical data from the existing subgraph to a new version. While this is an effective way to preserve data and save time on indexing, grafting may introduce complexities and potential issues when migrating from a hosted environment to the decentralized network. It is not possible to graft a subgraph from The Graph Network back to the hosted service or Subgraph Studio. + +### Best Practices + +**Initial Migration**: when you first deploy your subgraph to the decentralized network, do so without grafting. Ensure that the subgraph is stable and functioning as expected. + +**Subsequent Updates**: once your subgraph is live and stable on the decentralized network, you may use grafting for future versions to make the transition smoother and to preserve historical data. + +By adhering to these guidelines, you minimize risks and ensure a smoother migration process. + ## 构建现有子图 构建子图是Graph的重要组成部分,我们在[此文](http://localhost:3000/en/cookbook/quick-start/)进行更深入的描述。为了能够构建和部署本教程中使用的现有子图,提供了以下存储库: diff --git a/website/pages/zh/cookbook/near.mdx b/website/pages/zh/cookbook/near.mdx index eec19fe3220d..b09482f12fb8 100644 --- a/website/pages/zh/cookbook/near.mdx +++ b/website/pages/zh/cookbook/near.mdx @@ -277,7 +277,7 @@ NEAR 子图尚不支持挂起的功能。 在此期间,您可以将新版本 ### 我的问题尚未得到解答,在哪里可以获得更多构建 NEAR 子图的帮助? -如果这是一个关于子图开发的一般性问题,那么在 [开发者文档](/cookbook/quick-start)的其余部分中会有更多的信息。否则,请加入[Graph 协议的Discord](https://discord.gg/graphprotocol),并在 # near 频道或发邮件到 near@thegraph. com 询问。 +If it is a general question about subgraph development, there is a lot more information in the rest of the [Developer documentation](/quick-start). Otherwise please join [The Graph Protocol Discord](https://discord.gg/graphprotocol) and ask in the #near channel or email near@thegraph.com. ## 参考 diff --git a/website/pages/zh/cookbook/substreams-powered-subgraphs.mdx b/website/pages/zh/cookbook/substreams-powered-subgraphs.mdx index 29b227bd1bd3..5f6fc7ca5a45 100644 --- a/website/pages/zh/cookbook/substreams-powered-subgraphs.mdx +++ b/website/pages/zh/cookbook/substreams-powered-subgraphs.mdx @@ -2,7 +2,7 @@ title: 基于Substreams的子图 --- -[Substreams](/substreams) 是由StreamingFast为The Graph Network开发的一种处理区块链数据的新框架。Substreams模块可以输出与Subgraph实体兼容的实体变更。子图可以将这样的Substreams模块作为数据源,将Substreams的索引速度和附加数据带给子图开发者。 +[Substreams](/substreams) is a new framework for processing blockchain data, developed by StreamingFast for The Graph Network. A substreams modules can output entity changes, which are compatible with Subgraph entities. A subgraph can use such a Substreams module as a data source, bringing the indexing speed and additional data of Substreams to subgraph developers. ## 要求 @@ -22,7 +22,7 @@ graph init --from-example substreams-powered-subgraph ## 定义Substreams包 -Substreams(子流)包由类型(定义为 [Protocol Buffers](https://protobuf.dev/))、模块(用 Rust 编写)和一个 `substreams.yaml` 文件组成,该文件引用类型并指定模块的触发方式。如果您想了解更多关于 Substreams 开发的信息,请访问 [Substreams 文档](/substreams)。另外,您还可以查看 [awesome-substreams](https://github.com/pinax-network/awesome-substreams) 和 [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) 来获取更多示例。 +A Substreams package is composed of types (defined as [Protocol Buffers](https://protobuf.dev/)), modules (written in Rust), and a `substreams.yaml` file which references the types, and specifies how modules are triggered. [Visit the Substreams documentation to learn more about Substreams development](/substreams), and check out [awesome-substreams](https://github.com/pinax-network/awesome-substreams) and the [Substreams cookbook](https://github.com/pinax-network/substreams-cookbook) for more examples. 该Substreams包可以检测以太坊主网上的合约部署,并跟踪所有新部署合约的创建块和时间戳。为此,在/proto/example.proto中有一个专门的Contract类型(了解更多关于定义Protocol Buffers的信息[learn more about defining Protocol Buffers](https://protobuf.dev/programming-guides/proto3/#simple)): diff --git a/website/pages/zh/cookbook/upgrading-a-subgraph.mdx b/website/pages/zh/cookbook/upgrading-a-subgraph.mdx index 3d123bc507c7..425c485c29c1 100644 --- a/website/pages/zh/cookbook/upgrading-a-subgraph.mdx +++ b/website/pages/zh/cookbook/upgrading-a-subgraph.mdx @@ -11,7 +11,7 @@ title: 将现有子图升级到Graph网络 ### 先决条件 - 您已经在托管服务上部署了子图。 -- 该子图正在索引Graph网络上支持的链(或测试版中支持的链)。 +- The subgraph is indexing a chain available on The Graph Network. - 您钱包里有ETH去在链上发布子图 - 您有10,000GRT去策展你的子图使得索引人可以索引子图 @@ -80,7 +80,7 @@ graph deploy --studio 就是这样!在您完成发布之后,您将能够通过Graph浏览器(https://thegraph.com/explorer)在去中心化的网络上实时查看您的子图。 -敬请使用Discord 上的 #Curators 频道 (https://discord.gg/s5HfGMXmbW) 让策展人知道您的子图已准备好发出信号。 如果您与他们分享您的预期查询量,这也会很有帮助。 因此,他们可以估计他们应该在您的子图上发出多少 GRT。 +Feel free to leverage the [#Curators channel](https://discord.gg/s5HfGMXmbW) on Discord to let Curators know that your subgraph is ready to be signaled. It would also be helpful if you share your expected query volume with them. Therefore, they can estimate how much GRT they should signal on your subgraph. ### 创建一个API密钥 @@ -116,7 +116,7 @@ graph deploy --studio ### 在去中心化网络上查询子图 -现在,您可以在Graph浏览器中检查网络上的索引人的索引状态(这里的示例)。顶部的绿线表示在发布8个索引人时,成功索引该子图。另外,在索引人选项卡中,您可以看到哪些索引人选择了您的子图。 +现在,您可以在Graph浏览器中检查网络上的索引人的索引状态(这里的示例)。顶部的绿线表示在发布8个索引人时,成功索引该子图。另外,在索引人选项卡中,您可以看到哪些索引人选择了您的子图。 ![Rocket Pool subgraph](/img/rocket-pool-subgraph.png) @@ -174,7 +174,7 @@ graph deploy --studio ### 维护子图的稳定版本 -如果要对子图进行大量更改,那么不断升级子图并预付升级费用就不是一个好主意。维护您子图版本的稳定和一致是至关重要的,不仅从成本的角度来看,而且这样索引人可以对他们的同步时间有把握。在计划升级时,应该标记索引人,以便索引人同步时间不受影响。请随意利用Discord上的#Indexers channel,以便您对您的子图进行版本控制时让索引人知悉。 +If you're making a lot of changes to your subgraph, it is not a good idea to continually update it and front the update costs. Maintaining a stable and consistent version of your subgraph is critical, not only from the cost perspective but also so that Indexers can feel confident in their syncing times. Indexers should be flagged when you plan for an update so that Indexer syncing times do not get impacted. Feel free to leverage the [#Indexers channel](https://discord.gg/JexvtHa7dq) on Discord to let Indexers know when you're versioning your subgraphs. 子图是外部开发人员正在利用的开放 API。 开放 API 需要遵循严格的标准,以免破坏外部开发人员的应用程序。 在 Graph网络中,子图开发人员必须考虑索引人以及同步新子图、使用子图的其他开发人员需要多长时间。 diff --git a/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx b/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx index 39a1a3e6dcb1..7b02c5b7534a 100644 --- a/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx +++ b/website/pages/zh/deploying/deploying-a-subgraph-to-studio.mdx @@ -2,7 +2,7 @@ title: 将子图部署到子图工作室 --- -> 确保您子图从中索引数据的网络在去中心化网络上受[支持](/developing/supported-chains)。 +> Learn how you can deploy non rate-limited subgraphs to Subgraph Studio [here](https://www.notion.so/edgeandnode/The-Graph-Subgraph-Studio-Non-Rate-Limited-Chain-Integration-889fe061ee6b4423a7f8e2c8070b9294). 以下是将子图部署到子图工作室的步骤: diff --git a/website/pages/zh/deploying/hosted-service.mdx b/website/pages/zh/deploying/hosted-service.mdx index 63fdfcb085c8..59ea9c75df0e 100644 --- a/website/pages/zh/deploying/hosted-service.mdx +++ b/website/pages/zh/deploying/hosted-service.mdx @@ -46,6 +46,17 @@ graph init --from-example --product hosted-service / 示例子图基于 Dani Grant 的 Gravity 合约,该合约管理用户头像,并在创建或更新头像时发出 `NewGravatar` 或 `UpdateGravatar` 事件。子图通过将 `Gravatar` 实体写入 Graph 节点存储并确保根据事件更新它们来处理这些事件。继续查看[子图清单](/developing/creating-a-subgraph#the-subgraph-manifest),以便更好地理解智能合约中需要关注的事件、映射等等。 +### From a Proxy Contract + +To build a subgraph tailored for monitoring a Proxy contract, initialize the subgraph by specifying the address of the implementation contract. Once the initialization process is concluded, the last step involves updating the network name in the subgraph.yaml file to the address of the Proxy contract. You can use the command below. + +```sh +graph init \ + --product hosted-service + --from-contract \ + / [] +``` + ## 托管服务支持的网络 您可以在[这里](/developing/supported-networks)找到支持的网络列表。 diff --git a/website/pages/zh/deploying/subgraph-studio.mdx b/website/pages/zh/deploying/subgraph-studio.mdx index 5c0f489bd57d..081239f7347c 100644 --- a/website/pages/zh/deploying/subgraph-studio.mdx +++ b/website/pages/zh/deploying/subgraph-studio.mdx @@ -30,15 +30,9 @@ title: 如何使用子图工作室 1. 使用您的钱包登录 - 您可以通过 MetaMask 或 WalletConnect 进行此操作 1. 一旦您登录,您将在您的账户主页上看到您唯一的部署密钥。这将允许您发布您的子图或管理您的 API 密钥 + 计费。您将拥有一个惟一的部署密钥,如果您认为该密钥已被破坏,则可以重新生成该密钥。 -## 如何在子图工作室中创建子图 +## How to Create a Subgraph in Subgraph Studio -最好的部分! 当您第一次创建子图时,您将被指示填写: - -- 您的子图名称 -- 图片 -- 描述 -- 类别 (e.g. `DeFi`, `NFTs`, `Governance`) -- 网站 + ## 子图与图形网络的兼容性 diff --git a/website/pages/zh/developing/creating-a-subgraph.mdx b/website/pages/zh/developing/creating-a-subgraph.mdx index c3667f09cdd3..7f005360b4d2 100644 --- a/website/pages/zh/developing/creating-a-subgraph.mdx +++ b/website/pages/zh/developing/creating-a-subgraph.mdx @@ -98,7 +98,7 @@ Options: ```yaml specVersion: 0.0.4 description: Gravatar for Ethereum -repository: https://github.com/graphprotocol/example-subgraphs +repository: https://github.com/graphprotocol/graph-tooling schema: file: ./schema.graphql dataSources: @@ -109,6 +109,14 @@ dataSources: address: '0x2E645469f354BB4F5c8a05B3b30A929361cf77eC' abi: Gravity startBlock: 6175244 + endBlock: 7175245 + context: + foo: + type: Bool + data: true + bar: + type: String + data: 'bar' mapping: kind: ethereum/events apiVersion: 0.0.6 @@ -136,7 +144,7 @@ dataSources: 清单中要更新的重要条目是: -- `description`:关于子图是什么的人类可读的描述。 当子图部署到托管服务时,Graph 浏览器 会显示此描述。 +- `description`: a human-readable description of what the subgraph is. This description is displayed by the Graph Explorer when the subgraph is deployed to the hosted service. - `repository`:可以找到子图清单的存储库的 URL。 这也由 Graph 浏览器显示。 @@ -146,6 +154,10 @@ dataSources: - `dataSources.source.startBlock`:数据源开始索引的区块的可选编号。 在大多数情况下,我们建议使用创建合约的区块。 +- `dataSources.source.endBlock`: The optional number of the block that the data source stops indexing at, including that block. Minimum spec version required: `0.0.9`. + +- `dataSources.context`: key-value pairs that can be used within subgraph mappings. Supports various data types like `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. Each variable needs to specify its `type` and `data`. These context variables are then accessible in the mapping files, offering more configurable options for subgraph development. + - `dataSources.mapping.entities`:数据源写入存储的实体。 每个实体的模式在 schema.graphql 文件中定义。 - `dataSources.mapping.abis`:源合约以及您在映射中与之交互的任何其他智能合约的一个或多个命名 ABI 文件。 @@ -242,6 +254,7 @@ Null value resolved for non-null field 'name' | `字符串` | `string` 值的标量。 不支持空字符,并会自动进行删除。 | | `Boolean` | `boolean` 值的标量。 | | `Int` | GraphQL 规范将 `Int` 定义为 32 字节的大小。 | +| `Int8` | An 8-byte signed integer, also known as a 64-bit signed integer, can store values in the range from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807. Prefer using this to represent `i64` from ethereum. | | `BigInt` | 大整数。 用于以太坊的 `uint32`、`int64`、`uint64`、...、`uint256` 类型。 注意:`uint32`以下的所有类型,例如`int32`、`uint24`或`int8`都表示为`i32`。 | | `BigDecimal` | `BigDecimal` 表示为有效数字和指数的高精度小数。 指数范围是 -6143 到 +6144。 四舍五入到 34 位有效数字。 | @@ -770,6 +783,8 @@ export function handleCreateGravatar(call: CreateGravatarCall): void { ### 支持的过滤器 +#### Call Filter + ```yaml filter: kind: call @@ -806,6 +821,45 @@ dataSources: kind: call ``` +#### Polling Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Polling filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleBlock + filter: + kind: polling + every: 10 +``` + +The defined handler will be called once for every `n` blocks, where `n` is the value provided in the `every` field. This configuration allows the subgraph to perform specific operations at regular block intervals. + +#### Once Filter + +> **Requires `specVersion` >= 0.0.8** + +> **Note:** Once filters are only available on dataSources of `kind: ethereum`. + +```yaml +blockHandlers: + - handler: handleOnce + filter: + kind: once +``` + +The defined handler with the once filter will be called only once before all other handlers run. This configuration allows the subgraph to use the handler as an initialization handler, performing specific tasks at the start of indexing. + +```ts +export function handleOnce(block: ethereum.Block): void { + let data = new InitialData(Bytes.fromUTF8('initial')) + data.data = 'Setup data here' + data.save() +} +``` + ### 映射函数 映射函数将接收 `ethereum.Block` 作为其唯一参数。 与事件的映射函数一样,此函数可以访问存储中现有的子图实体、调用智能合约、以及创建或更新实体。 @@ -934,6 +988,8 @@ _meta { ### 嫁接到现有子图 +> **Note:** it is not recommended to use grafting when initially upgrading to The Graph Network. Learn more [here](/cookbook/grafting/#important-note-on-grafting-when-upgrading-to-the-network). + 首次部署子图时,它会在相应链的启动区块(或每个数据源定义的 `startBlock` 处)开始索引事件。在某些情况下,可以使用现有子图已经索引的数据并在更晚的区块上开始索引。 这种索引模式称为*Grafting*。 例如,嫁接在开发过程中非常有用,可以快速克服映射中的简单错误,或者在现有子图失败后暂时恢复工作。 当 `subgraph.yaml` 中的子图清单在顶层包含 `graft` 区块时,子图被嫁接到基础子图: @@ -963,7 +1019,7 @@ graft: ## 文件数据源 -文件数据源是一种新的子图功能,用于在索引过程中以一种有力、可扩展的方式访问链外数据,从IPFS开始。 +File data sources are a new subgraph functionality for accessing off-chain data during indexing in a robust, extendable way. File data sources support fetching files from IPFS and from Arweave. > 这也为链外数据的确定性索引以及引入任意HTTP源数据奠定了基础。 @@ -975,7 +1031,7 @@ graft: > 这将替换现有的`ipfs.cat` API -### Upgrade guide +### 升级指南 #### 更新`graph-ts`和`graph-cli` @@ -1030,7 +1086,7 @@ type TokenMetadata @entity { > 可以使用[嵌套](https://thegraph.com/docs/en/querying/graphql-api/#example-for-nested-entity-filtering)过滤器根据这些嵌套实体过滤母实体。 -#### 添加新的模板化数据源,`类型为:file/ipfs` +#### Add a new templated data source with `kind: file/ipfs` or `kind: file/arweave` 这是在识别出感兴趣的文件时生成的数据源。 @@ -1096,9 +1152,11 @@ export function handleMetadata(content: Bytes): void { 现在,您可以在执行基于链的处理程序期间创建文件数据源: - 从自动生成的`模板`导入模板 -- 从映射中调用`TemplateName.create(cid:string)`,其中cid是有效的IPFS内容标识符 +- call `TemplateName.create(cid: string)` from within a mapping, where the cid is a valid content identifier for IPFS or Arweave + +For IPFS, Graph Node supports [v0 and v1 content identifiers](https://docs.ipfs.tech/concepts/content-addressing/), and content identifers with directories (e.g. `bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`). -> 当前Graph Node支持[v0和v1内容标识符](https://docs.ipfs.tech/concepts/content-addressing/),以及带有目录的内容标识符(例如`bafyreighykzv2we26wfrbzkcdw37sbrby4upq7ae3aqobbq7i4er3tnxci/metadata.json`) +For Arweave, as of version 0.33.0 Graph Node can fetch files stored on Arweave based on their [transaction ID](https://docs.arweave.org/developers/server/http-api#transactions) from an Arweave gateway ([example file](https://bdxujjl5ev5eerd5ouhhs6o4kjrs4g6hqstzlci5pf6vhxezkgaa.arweave.net/CO9EpX0lekJEfXUOeXncUmMuG8eEp5WJHXl9U9yZUYA)). Arweave supports transactions uploaded via Bundlr, and Graph Node can also fetch files based on [Bundlr manifests](https://docs.bundlr.network/learn/gateways#indexing). 例子: @@ -1129,7 +1187,7 @@ export function handleTransfer(event: TransferEvent): void { } ``` -这将创建一个新的文件数据源,该数据源将轮询 Graph节点配置的 IPFS 端点,如果找不到,则重试。找到文件后,将执行文件数据源处理程序。 +This will create a new file data source, which will poll Graph Node's configured IPFS or Arweave endpoint, retrying if it is not found. When the file is found, the file data source handler will be executed. 此示例使用 CID 作为母 `Token` 实体和生成的 `TokenMetadata` 实体之间的查找。 diff --git a/website/pages/zh/developing/developer-faqs.mdx b/website/pages/zh/developing/developer-faqs.mdx index 530b60ec61fa..3e89710c94dd 100644 --- a/website/pages/zh/developing/developer-faqs.mdx +++ b/website/pages/zh/developing/developer-faqs.mdx @@ -125,18 +125,14 @@ someCollection(first: 1000, skip: ) { ... } 目前,推荐的 dapp 方法是将密钥添加到前端并将其公开给最终用户。 也就是说,您可以将该键限制为主机名,例如 _yourdapp.io_ 和子图。 网关目前由 Edge & Node 运营。 网关的部分职责是监控滥用行为,并阻止来自恶意客户端的流量。 -## 25. 在哪里可以找到托管服务上我的当前子图? +## 25. Where do I go to find my current subgraph on the hosted service? 请前往托管服务,查找您或其他人部署到托管服务的子图。 您可以在[这里](https://thegraph.com/hosted-service)找到托管服务。 -## 26. 托管服务会开始收取查询费用吗? +## 26. Will the hosted service start charging query fees? Graph 永远不会对托管服务收费。 Graph 是一个去中心化的协议,中心化服务的收费与 Graph 的价值观不一致。 托管服务始终是帮助进入去中心化网络的临时步骤。 开发人员将有足够的时间在他们适宜时迁移到去中心化网络。 -## 27. 托管服务何时关闭? - -托管服务将于2023年第一季度关闭。请[在此处](https://thegraph.com/blog/sunsetting-hosted-service)阅读公告博客文章。鼓励所有使用托管服务的dapp迁移到去中心化网络。迁移补助金可用于开发人员帮助迁移其子图。如果您的dapp正在迁移子图,您可以在[这里](https://thegraph.typeform.com/to/Zz8UAPri?typeform-source=thegraph.com)应用。 - -## 28. 如何升级主网上的子图? +## 27. How do I update a subgraph on mainnet? 如果您是子图开发人员,您可以使用 CLI 将新版本的子图升级到工作室。 届时子图将是私有的,但如果您对它感到满意,您可以发布到去中心化的 Graph浏览器。 这将创建一个新版本的子图,策展人可以开始对其发出信号。 diff --git a/website/pages/zh/developing/graph-ts/api.mdx b/website/pages/zh/developing/graph-ts/api.mdx new file mode 100644 index 000000000000..d67a78af0456 --- /dev/null +++ b/website/pages/zh/developing/graph-ts/api.mdx @@ -0,0 +1,853 @@ +--- +title: AssemblyScript API +--- + +> Note: if you created a subgraph prior to `graph-cli`/`graph-ts` version `0.22.0`, you're using an older version of AssemblyScript, we recommend taking a look at the [`Migration Guide`](/release-notes/assemblyscript-migration-guide) + +此页面记录了编写子图映射时可以使用的内置 API。有两种开箱即用的 API: + +- the [Graph TypeScript library](https://github.com/graphprotocol/graph-ts) (`graph-ts`) and +- code generated from subgraph files by `graph codegen`. + +It is also possible to add other libraries as dependencies, as long as they are compatible with [AssemblyScript](https://github.com/AssemblyScript/assemblyscript). Since this is the language mappings are written in, the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki) is a good source for language and standard library features. + +## API 参考 + +The `@graphprotocol/graph-ts` library provides the following APIs: + +- An `ethereum` API for working with Ethereum smart contracts, events, blocks, transactions, and Ethereum values. +- A `store` API to load and save entities from and to the Graph Node store. +- A `log` API to log messages to the Graph Node output and the Graph Explorer. +- An `ipfs` API to load files from IPFS. +- A `json` API to parse JSON data. +- A `crypto` API to use cryptographic functions. +- 用于在不同类型系统(例如 Ethereum、JSON、GraphQL 和 AssemblyScript)之间进行转换的低级原语。 + +### 版本 + +The `apiVersion` in the subgraph manifest specifies the mapping API version which is run by Graph Node for a given subgraph. + +| 版本 | Release 说明 | +| :-: | --- | +| 0.0.7 | Added `TransactionReceipt` and `Log` classes to the Ethereum types
    Added `receipt` field to the Ethereum Event object | +| 0.0.6 | Added `nonce` field to the Ethereum Transaction object
    Added `baseFeePerGas` to the Ethereum Block object | +| 0.0.5 | AssemblyScript upgraded to version 0.19.10 (this includes breaking changes, please see the [`Migration Guide`](/release-notes/assemblyscript-migration-guide))
    `ethereum.transaction.gasUsed` renamed to `ethereum.transaction.gasLimit` | +| 0.0.4 | Added `functionSignature` field to the Ethereum SmartContractCall object | +| 0.0.3 | Added `from` field to the Ethereum Call object
    `etherem.call.address` renamed to `ethereum.call.to` | +| 0.0.2 | Added `input` field to the Ethereum Transaction object | + +### 内置类型 + +Documentation on the base types built into AssemblyScript can be found in the [AssemblyScript wiki](https://github.com/AssemblyScript/assemblyscript/wiki/Types). + +The following additional types are provided by `@graphprotocol/graph-ts`. + +#### 字节数组 + +```typescript +从'@graphprotocol/graph-ts'导入{ ByteArray } +``` + +`ByteArray` represents an array of `u8`. + +_Construction_ + +- `fromI32(x: i32): ByteArray` - Decomposes `x` into bytes. +- `fromHexString(hex: string): ByteArray` - Input length must be even. Prefixing with `0x` is optional. + +_Type conversions_ + +- `toHexString(): string` - Converts to a hex string prefixed with `0x`. +- `toString(): string` - Interprets the bytes as a UTF-8 string. +- `toBase58(): string` - Encodes the bytes into a base58 string. +- `toU32(): u32` - Interprets the bytes as a little-endian `u32`. Throws in case of overflow. +- `toI32(): i32` - Interprets the byte array as a little-endian `i32`. Throws in case of overflow. + +_Operators_ + +- `equals(y: ByteArray): bool` – can be written as `x == y`. +- `concat(other: ByteArray) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by `other` +- `concatI32(other: i32) : ByteArray` - return a new `ByteArray` consisting of `this` directly followed by the byte representation of `other` + +#### BigDecimal + +```typescript +从“@Graphprotocol/graph-ts”导入{ BigDecimal } +``` + +`BigDecimal` is used to represent arbitrary precision decimals. + +> Note: [Internally](https://github.com/graphprotocol/graph-node/blob/master/graph/src/data/store/scalar.rs) `BigDecimal` is stored in [IEEE-754 decimal128 floating-point format](https://en.wikipedia.org/wiki/Decimal128_floating-point_format), which supports 34 decimal digits of significand. This makes `BigDecimal` unsuitable for representing fixed-point types that can span wider than 34 digits, such as a Solidity [`ufixed256x18`](https://docs.soliditylang.org/en/latest/types.html#fixed-point-numbers) or equivalent. + +_Construction_ + +- `constructor(bigInt: BigInt)` – creates a `BigDecimal` from an `BigInt`. +- `static fromString(s: string): BigDecimal` – parses from a decimal string. + +_Type conversions_ + +- `toString(): string` – prints to a decimal string. + +_Math_ + +- `plus(y: BigDecimal): BigDecimal` – can be written as `x + y`. +- `minus(y: BigDecimal): BigDecimal` – can be written as `x - y`. +- `times(y: BigDecimal): BigDecimal` – can be written as `x * y`. +- `div(y: BigDecimal): BigDecimal` – can be written as `x / y`. +- `equals(y: BigDecimal): bool` – can be written as `x == y`. +- `notEqual(y: BigDecimal): bool` – can be written as `x != y`. +- `lt(y: BigDecimal): bool` – can be written as `x < y`. +- `le(y: BigDecimal): bool` – can be written as `x <= y`. +- `gt(y: BigDecimal): bool` – can be written as `x > y`. +- `ge(y: BigDecimal): bool` – can be written as `x >= y`. +- `neg(): BigDecimal` - can be written as `-x`. + +#### BigInt + +```typescript +从'@graphprotocol/graph-ts'导入{ BigInt } +``` + +`BigInt` is used to represent big integers. This includes Ethereum values of type `uint32` to `uint256` and `int64` to `int256`. Everything below `uint32`, such as `int32`, `uint24` or `int8` is represented as `i32`. + +The `BigInt` class has the following API: + +_Construction_ + +- `BigInt.fromI32(x: i32): BigInt` – creates a `BigInt` from an `i32`. + +- `BigInt.fromString(s: string): BigInt`– Parses a `BigInt` from a string. + +- `BigInt.fromUnsignedBytes(x: Bytes): BigInt` – Interprets `bytes` as an unsigned, little-endian integer. If your input is big-endian, call `.reverse()` first. + +- `BigInt.fromSignedBytes(x: Bytes): BigInt` – Interprets `bytes` as a signed, little-endian integer. If your input is big-endian, call `.reverse()` first. + + _Type conversions_ + +- `x.toHex(): string` – turns `BigInt` into a string of hexadecimal characters. + +- `x.toString(): string` – turns `BigInt` into a decimal number string. + +- `x.toI32(): i32` – returns the `BigInt` as an `i32`; fails if the value does not fit into `i32`. It's a good idea to first check `x.isI32()`. + +- `x.toBigDecimal(): BigDecimal` - converts into a decimal with no fractional part. + +_Math_ + +- `x.plus(y: BigInt): BigInt` – can be written as `x + y`. +- `x.minus(y: BigInt): BigInt` – can be written as `x - y`. +- `x.times(y: BigInt): BigInt` – can be written as `x * y`. +- `x.div(y: BigInt): BigInt` – can be written as `x / y`. +- `x.mod(y: BigInt): BigInt` – can be written as `x % y`. +- `x.equals(y: BigInt): bool` – can be written as `x == y`. +- `x.notEqual(y: BigInt): bool` – can be written as `x != y`. +- `x.lt(y: BigInt): bool` – can be written as `x < y`. +- `x.le(y: BigInt): bool` – can be written as `x <= y`. +- `x.gt(y: BigInt): bool` – can be written as `x > y`. +- `x.ge(y: BigInt): bool` – can be written as `x >= y`. +- `x.neg(): BigInt` – can be written as `-x`. +- `x.divDecimal(y: BigDecimal): BigDecimal` – divides by a decimal, giving a decimal result. +- `x.isZero(): bool` – Convenience for checking if the number is zero. +- `x.isI32(): bool` – Check if the number fits in an `i32`. +- `x.abs(): BigInt` – Absolute value. +- `x.pow(exp: u8): BigInt` – Exponentiation. +- `bitOr(x: BigInt, y: BigInt): BigInt` – can be written as `x | y`. +- `bitAnd(x: BigInt, y: BigInt): BigInt` – can be written as `x & y`. +- `leftShift(x: BigInt, bits: u8): BigInt` – can be written as `x << y`. +- `rightShift(x: BigInt, bits: u8): BigInt` – can be written as `x >> y`. + +#### 类型化映射 + +```typescript +从'@graphprotocol/graph-ts'导入{ TypedMap } +``` + +`TypedMap` can be used to store key-value pairs. See [this example](https://github.com/graphprotocol/aragon-subgraph/blob/29dd38680c5e5104d9fdc2f90e740298c67e4a31/individual-dao-subgraph/mappings/constants.ts#L51). + +The `TypedMap` class has the following API: + +- `new TypedMap()` – creates an empty map with keys of type `K` and values of type `V` +- `map.set(key: K, value: V): void` – sets the value of `key` to `value` +- `map.getEntry(key: K): TypedMapEntry | null` – returns the key-value pair for a `key` or `null` if the `key` does not exist in the map +- `map.get(key: K): V | null` – returns the value for a `key` or `null` if the `key` does not exist in the map +- `map.isSet(key: K): bool` – returns `true` if the `key` exists in the map and `false` if it does not + +#### 字节 + +```typescript +从 '@graphprotocol/graph-ts'导入{ Bytes } +``` + +`Bytes` is used to represent arbitrary-length arrays of bytes. This includes Ethereum values of type `bytes`, `bytes32`, etc. + +The `Bytes` class extends AssemblyScript's [Uint8Array](https://github.com/AssemblyScript/assemblyscript/blob/3b1852bc376ae799d9ebca888e6413afac7b572f/std/assembly/typedarray.ts#L64) and this supports all the `Uint8Array` functionality, plus the following new methods: + +_Construction_ + +- `fromHexString(hex: string) : Bytes` - Convert the string `hex` which must consist of an even number of hexadecimal digits to a `ByteArray`. The string `hex` can optionally start with `0x` +- `fromI32(i: i32) : Bytes` - Convert `i` to an array of bytes + +_Type conversions_ + +- `b.toHex()` – returns a hexadecimal string representing the bytes in the array +- `b.toString()` – converts the bytes in the array to a string of unicode characters +- `b.toBase58()` – turns an Ethereum Bytes value to base58 encoding (used for IPFS hashes) + +_Operators_ + +- `b.concat(other: Bytes) : Bytes` - - return new `Bytes` consisting of `this` directly followed by `other` +- `b.concatI32(other: i32) : ByteArray` - return new `Bytes` consisting of `this` directly follow by the byte representation of `other` + +#### 地址 + +```typescript +从 '@graphprotocol/graph-ts'导入{ Address } +``` + +`Address` extends `Bytes` to represent Ethereum `address` values. + +It adds the following method on top of the `Bytes` API: + +- `Address.fromString(s: string): Address` – creates an `Address` from a hexadecimal string +- `Address.fromBytes(b: Bytes): Address` – create an `Address` from `b` which must be exactly 20 bytes long. Passing in a value with fewer or more bytes will result in an error + +### 商店API + +```typescript +从 '@graphprotocol/graph-ts'导入 { store } +``` + +The `store` API allows to load, save and remove entities from and to the Graph Node store. + +Entities written to the store map one-to-one to the `@entity` types defined in the subgraph's GraphQL schema. To make working with these entities convenient, the `graph codegen` command provided by the [Graph CLI](https://github.com/graphprotocol/graph-cli) generates entity classes, which are subclasses of the built-in `Entity` type, with property getters and setters for the fields in the schema as well as methods to load and save these entities. + +#### 创建实体 + +以下是从以太坊事件创建实体的常见模式。 + +```typescript +/ Import the Transfer event class generated from the ERC20 ABI +import { Transfer as TransferEvent } from '../generated/ERC20/ERC20' + +// Import the Transfer entity type generated from the GraphQL schema +import { Transfer } from '../generated/schema' + +// Transfer event handler +export function handleTransfer(event: TransferEvent): void { + // Create a Transfer entity, using the transaction hash as the entity ID + let id = event.transaction.hash + let transfer = new Transfer(id) + + // Set properties on the entity, using the event parameters + transfer.from = event.params.from + transfer.to = event.params.to + transfer.amount = event.params.amount + + // Save the entity to the store + transfer.save() +} +``` + +When a `Transfer` event is encountered while processing the chain, it is passed to the `handleTransfer` event handler using the generated `Transfer` type (aliased to `TransferEvent` here to avoid a naming conflict with the entity type). This type allows accessing data such as the event's parent transaction and its parameters. + +每个实体都必须有一个唯一的 ID 以避免与其他实体发生冲突。 事件参数包含可以使用的唯一标识符是相当常见的。 注意:使用交易hash作为 ID 时, 假定同一交易中没有其他事件创建以该hash作为 ID 的实体。 + +#### 从存储中加载实体 + +如果实体已经存在,则可以使用以下内容从存储中加载它: + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.load(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +As the entity may not exist in the store yet, the `load` method returns a value of type `Transfer | null`. It may thus be necessary to check for the `null` case before using the value. + +> **Note:** Loading entities is only necessary if the changes made in the mapping depend on the previous data of an entity. See the next section for the two ways of updating existing entities. + +#### 查找在区块中创建的实体 + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.30.0 and `@graphprotocol/graph-cli` v0.49.0 the `loadInBlock` method is available on all entity types. + +存储API有助于检索在当前区块中创建或更新的实体。这方面的一种典型情况是,一个处理程序从某个链上事件创建一个,之交易后的处理程序希望访问该交易(如果存在)。在交易不存在的情况下,子图必须去数据库才能发现实体不存在;如果子图作者已经知道实体必须是在同一个区块中创建的,那么使用loadInBlock可以避免这种数据库往返。对于某些子图,这些遗漏的查找可能会显著增加索引时间。 + +```typescript +let id = event.transaction.hash // or however the ID is constructed +let transfer = Transfer.loadInBlock(id) +if (transfer == null) { + transfer = new Transfer(id) +} + +// Use the Transfer entity as before +``` + +> Note: If there is no entity created in the given block, `loadInBlock` will return `null` even if there is an entity with the given ID in the store. + +#### 查找派生实体 + +As of `graph-node` v0.31.0, `@graphprotocol/graph-ts` v0.31.0 and `@graphprotocol/graph-cli` v0.51.0 the `loadRelated` method is available. + +这允许从事件处理程序中加载派生实体字段。例如,给定以下模式: + +```graphql +type Token @entity { + id: ID! + holder: Holder! + color: String +} + +type Holder @entity { + id: ID! + tokens: [Token!]! @derivedFrom(field: "holder") +} +``` + +The following code will load the `Token` entity that the `Holder` entity was derived from: + +```typescript +let holder = Holder.load('test-id') +// Load the Token entities associated with a given holder +let tokens = holder.tokens.load() +``` + +#### 更新现有实体 + +有两种方法可以更新现有实体: + +1. Load the entity with e.g. `Transfer.load(id)`, set properties on the entity, then `.save()` it back to the store. +2. Simply create the entity with e.g. `new Transfer(id)`, set properties on the entity, then `.save()` it to the store. If the entity already exists, the changes are merged into it. + +由于生成的属性设置器,在大多数情况下更改属性是直截了当的: + +```typescript +let transfer = new Transfer(id) +transfer.from = ... +transfer.to = ... +transfer.amount = ... +``` + +也可以使用以下两条指令之一取消设置属性: + +```typescript +transfer.from.unset() +transfer.from = null +``` + +This only works with optional properties, i.e. properties that are declared without a `!` in GraphQL. Two examples would be `owner: Bytes` or `amount: BigInt`. + +Updating array properties is a little more involved, as the getting an array from an entity creates a copy of that array. This means array properties have to be set again explicitly after changing the array. The following assumes `entity` has a `numbers: [BigInt!]!` field. + +```typescript +// This won't work +entity.numbers.push(BigInt.fromI32(1)) +entity.save() + +// This will work +let numbers = entity.numbers +numbers.push(BigInt.fromI32(1)) +entity.numbers = numbers +entity.save() +``` + +#### 从存储中删除实体 + +There is currently no way to remove an entity via the generated types. Instead, removing an entity requires passing the name of the entity type and the entity ID to `store.remove`: + +```typescript +import { store } from '@graphprotocol/graph-ts' +... +let id = event.transaction.hash +store.remove('Transfer', id) +``` + +### 以太坊 API + +以太坊 API 提供对智能合约、公共状态变量、合约函数、事件、交易、区块和编码/解码以太坊数据的访问。 + +#### 对以太坊类型的支持 + +As with entities, `graph codegen` generates classes for all smart contracts and events used in a subgraph. For this, the contract ABIs need to be part of the data source in the subgraph manifest. Typically, the ABI files are stored in an `abis/` folder. + +With the generated classes, conversions between Ethereum types and the [built-in types](#built-in-types) take place behind the scenes so that subgraph authors do not have to worry about them. + +以下示例说明了这一点。 给定一个子图模式,如 + +```graphql +type Transfer @entity { + from: Bytes! + to: Bytes! + amount: BigInt! +} +``` + +and a `Transfer(address,address,uint256)` event signature on Ethereum, the `from`, `to` and `amount` values of type `address`, `address` and `uint256` are converted to `Address` and `BigInt`, allowing them to be passed on to the `Bytes!` and `BigInt!` properties of the `Transfer` entity: + +```typescript +let id = event.transaction.hash.toHex() +let transfer = new Transfer(id) +transfer.from = event.params.from +transfer.to = event.params.to +transfer.amount = event.params.amount +transfer.save() +``` + +#### 事件和区块/交易数据 + +Ethereum events passed to event handlers, such as the `Transfer` event in the previous examples, not only provide access to the event parameters but also to their parent transaction and the block they are part of. The following data can be obtained from `event` instances (these classes are a part of the `ethereum` module in `graph-ts`): + +```typescript +class Event { + address: Address + logIndex: BigInt + transactionLogIndex: BigInt + logType: string | null + block: Block + transaction: Transaction + parameters: Array + receipt: TransactionReceipt | null +} + +class Block { + hash: Bytes + parentHash: Bytes + unclesHash: Bytes + author: Address + stateRoot: Bytes + transactionsRoot: Bytes + receiptsRoot: Bytes + number: BigInt + gasUsed: BigInt + gasLimit: BigInt + timestamp: BigInt + difficulty: BigInt + totalDifficulty: BigInt + size: BigInt | null + baseFeePerGas: BigInt | null +} + +class Transaction { + hash: Bytes + index: BigInt + from: Address + to: Address | null + value: BigInt + gasLimit: BigInt + gasPrice: BigInt + input: Bytes + nonce: BigInt +} + +class TransactionReceipt { + transactionHash: Bytes + transactionIndex: BigInt + blockHash: Bytes + blockNumber: BigInt + cumulativeGasUsed: BigInt + gasUsed: BigInt + contractAddress: Address + logs: Array + status: BigInt + root: Bytes + logsBloom: Bytes +} + +class Log { + address: Address + topics: Array + data: Bytes + blockHash: Bytes + blockNumber: Bytes + transactionHash: Bytes + transactionIndex: BigInt + logIndex: BigInt + transactionLogIndex: BigInt + logType: string + removed: bool | null +} +``` + +#### 访问智能合约状态 + +The code generated by `graph codegen` also includes classes for the smart contracts used in the subgraph. These can be used to access public state variables and call functions of the contract at the current block. + +一种常见的模式是访问事件起源的合约。 这是通过以下代码实现的: + +```typescript +// Import the generated contract class and generated Transfer event class +import { ERC20Contract, Transfer as TransferEvent } from '../generated/ERC20Contract/ERC20Contract' +// Import the generated entity class +import { Transfer } from '../generated/schema' + +export function handleTransfer(event: TransferEvent) { + // Bind the contract to the address that emitted the event + let contract = ERC20Contract.bind(event.address) + + // Access state variables and functions by calling them + let erc20Symbol = contract.symbol() +} +``` + +`Transfer` is aliased to `TransferEvent` here to avoid a naming conflict with the entity type + +As long as the `ERC20Contract` on Ethereum has a public read-only function called `symbol`, it can be called with `.symbol()`. For public state variables a method with the same name is created automatically. + +作为子图一部分的任何其他合约都可以从生成的代码中导入,并且可以绑定到一个有效地址。 + +#### 处理重复调用 + +If the read-only methods of your contract may revert, then you should handle that by calling the generated contract method prefixed with `try_`. For example, the Gravity contract exposes the `gravatarToOwner` method. This code would be able to handle a revert in that method: + +```typescript +let gravity = Gravity.bind(event.address) +let callResult = gravity.try_gravatarToOwner(gravatar) +if (callResult.reverted) { + log.info('getGravatar reverted', []) +} else { + let owner = callResult.value +} +``` + +请注意,连接到 Geth 或 Infura 客户端的 Graph 节点可能无法检测到所有重复使用,如果您依赖于此,我们建议使用连接到 Parity 客户端的 Graph 节点。 + +#### 编码/解码 ABI + +Data can be encoded and decoded according to Ethereum's ABI encoding format using the `encode` and `decode` functions in the `ethereum` module. + +```typescript +import { Address, BigInt, ethereum } from '@graphprotocol/graph-ts' + +let tupleArray: Array = [ + ethereum.Value.fromAddress(Address.fromString('0x0000000000000000000000000000000000000420')), + ethereum.Value.fromUnsignedBigInt(BigInt.fromI32(62)), +] + +let tuple = tupleArray as ethereum.Tuple + +let encoded = ethereum.encode(ethereum.Value.fromTuple(tuple))! + +let decoded = ethereum.decode('(address,uint256)', encoded) +``` + +查询更多的信息: + +- [ABI Spec](https://docs.soliditylang.org/en/v0.7.4/abi-spec.html#types) +- Encoding/decoding [Rust library/CLI](https://github.com/rust-ethereum/ethabi) +- More [complex example](https://github.com/graphprotocol/graph-node/blob/6a7806cc465949ebb9e5b8269eeb763857797efc/tests/integration-tests/host-exports/src/mapping.ts#L72). + +### 日志记录 API + +```typescript +从 '@graphprotocol/graph-ts'导入 { log } +``` + +The `log` API allows subgraphs to log information to the Graph Node standard output as well as the Graph Explorer. Messages can be logged using different log levels. A basic format string syntax is provided to compose log messages from argument. + +The `log` API includes the following functions: + +- `log.debug(fmt: string, args: Array): void` - logs a debug message. +- `log.info(fmt: string, args: Array): void` - logs an informational message. +- `log.warning(fmt: string, args: Array): void` - logs a warning. +- `log.error(fmt: string, args: Array): void` - logs an error message. +- `log.critical(fmt: string, args: Array): void` – logs a critical message _and_ terminates the subgraph. + +The `log` API takes a format string and an array of string values. It then replaces placeholders with the string values from the array. The first `{}` placeholder gets replaced by the first value in the array, the second `{}` placeholder gets replaced by the second value and so on. + +```typescript +log.info('Message to be displayed: {}, {}, {}', [value.toString(), anotherValue.toString(), 'already a string']) +``` + +#### 记录一个或多个值 + +##### 记录单个值 + +In the example below, the string value "A" is passed into an array to become`['A']` before being logged: + +```typescript +let myValue = 'A' + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" + log.info('My value is: {}', [myValue]) +} +``` + +##### 从现有数组记录单个条目 + +在下面的示例中,尽管参数数组包含三个值,只有该数组的第一个值被记录了。 + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My value is: A" (Even though three values are passed to `log.info`) + log.info('My value is: {}', myArray) +} +``` + +#### 从现有数组记录多个条目 + +Each entry in the arguments array requires its own placeholder `{}` in the log message string. The below example contains three placeholders `{}` in the log message. Because of this, all three values in `myArray` are logged. + +```typescript +let myArray = ['A', 'B', 'C'] + +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My first value is: A, second value is: B, third value is: C" + log.info('My first value is: {}, second value is: {}, third value is: {}', myArray) +} +``` + +##### 从现有数组记录特定条目 + +要在数组中显示特定值,必须提供它的索引值。 + +```typescript +export function handleSomeEvent(event: SomeEvent): void { + // Displays : "My third value is C" + log.info('My third value is: {}', [myArray[2]]) +} +``` + +##### 记录事件信息 + +下面的例子记录了一个事件的区块号、区块hash和交易hash: + +```typescript +import { log } from '@graphprotocol/graph-ts' + +export function handleSomeEvent(event: SomeEvent): void { + log.debug('Block number: {}, block hash: {}, transaction hash: {}', [ + event.block.number.toString(), // "47596000" + event.block.hash.toHexString(), // "0x..." + event.transaction.hash.toHexString(), // "0x..." + ]) +} +``` + +### IPFS API + +```typescript +从 '@graphprotocol/graph-ts'导入{ ipfs } +``` + +Smart contracts occasionally anchor IPFS files on chain. This allows mappings to obtain the IPFS hashes from the contract and read the corresponding files from IPFS. The file data will be returned as `Bytes`, which usually requires further processing, e.g. with the `json` API documented later on this page. + +给定一个 IPFS hash或路径,从 IPFS 读取文件的过程如下: + +```typescript +// Put this inside an event handler in the mapping +let hash = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D' +let data = ipfs.cat(hash) + +// Paths like `QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile` +// that include files in directories are also supported +let path = 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D/Makefile' +let data = ipfs.cat(path) +``` + +**Note:** `ipfs.cat` is not deterministic at the moment. If the file cannot be retrieved over the IPFS network before the request times out, it will return `null`. Due to this, it's always worth checking the result for `null`. + +It is also possible to process larger files in a streaming fashion with `ipfs.map`. The function expects the hash or path for an IPFS file, the name of a callback, and flags to modify its behavior: + +```typescript +import { JSONValue, Value } from '@graphprotocol/graph-ts' + +export function processItem(value: JSONValue, userData: Value): void { + // See the JSONValue documentation for details on dealing + // with JSON values + let obj = value.toObject() + let id = obj.get('id') + let title = obj.get('title') + + if (!id || !title) { + return + } + + // Callbacks can also created entities + let newItem = new Item(id) + newItem.title = title.toString() + newitem.parent = userData.toString() // Set parent to "parentId" + newitem.save() +} + +// Put this inside an event handler in the mapping +ipfs.map('Qm...', 'processItem', Value.fromString('parentId'), ['json']) + +// Alternatively, use `ipfs.mapJSON` +ipfs.mapJSON('Qm...', 'processItem', Value.fromString('parentId')) +``` + +The only flag currently supported is `json`, which must be passed to `ipfs.map`. With the `json` flag, the IPFS file must consist of a series of JSON values, one value per line. The call to `ipfs.map` will read each line in the file, deserialize it into a `JSONValue` and call the callback for each of them. The callback can then use entity operations to store data from the `JSONValue`. Entity changes are stored only when the handler that called `ipfs.map` finishes successfully; in the meantime, they are kept in memory, and the size of the file that `ipfs.map` can process is therefore limited. + +On success, `ipfs.map` returns `void`. If any invocation of the callback causes an error, the handler that invoked `ipfs.map` is aborted, and the subgraph is marked as failed. + +### Crypto API + +```typescript +从'@graphprotocol/graph-ts'导入{ crypto } +``` + +The `crypto` API makes a cryptographic functions available for use in mappings. Right now, there is only one: + +- `crypto.keccak256(input: ByteArray): ByteArray` + +### JSON API + +```typescript +从'@graphprotocol/graph-ts'导入{ json, JSONValueKind } +``` + +JSON data can be parsed using the `json` API: + +- `json.fromBytes(data: Bytes): JSONValue` – parses JSON data from a `Bytes` array interpreted as a valid UTF-8 sequence +- `json.try_fromBytes(data: Bytes): Result` – safe version of `json.fromBytes`, it returns an error variant if the parsing failed +- `json.fromString(data: string): JSONValue` – parses JSON data from a valid UTF-8 `String` +- `json.try_fromString(data: string): Result` – safe version of `json.fromString`, it returns an error variant if the parsing failed + +The `JSONValue` class provides a way to pull values out of an arbitrary JSON document. Since JSON values can be booleans, numbers, arrays and more, `JSONValue` comes with a `kind` property to check the type of a value: + +```typescript +let value = json.fromBytes(...) +if (value.kind == JSONValueKind.BOOL) { + ... +} +``` + +In addition, there is a method to check if the value is `null`: + +- `value.isNull(): boolean` + +When the type of a value is certain, it can be converted to a [built-in type](#built-in-types) using one of the following methods: + +- `value.toBool(): boolean` +- `value.toI64(): i64` +- `value.toF64(): f64` +- `value.toBigInt(): BigInt` +- `value.toString(): string` +- `value.toArray(): Array` - (and then convert `JSONValue` with one of the 5 methods above) + +### 类型转换参考 + +| Source(s) | Destination | Conversion function | +| -------------------- | -------------------- | ---------------------------- | +| Address | Bytes | none | +| Address | String | s.toHexString() | +| BigDecimal | String | s.toString() | +| BigInt | BigDecimal | s.toBigDecimal() | +| BigInt | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| BigInt | String (unicode) | s.toString() | +| BigInt | i32 | s.toI32() | +| Boolean | Boolean | none | +| Bytes (signed) | BigInt | BigInt.fromSignedBytes(s) | +| Bytes (unsigned) | BigInt | BigInt.fromUnsignedBytes(s) | +| Bytes | String (hexadecimal) | s.toHexString() 或 s.toHex() | +| Bytes | String (unicode) | s.toString() | +| Bytes | String (base58) | s.toBase58() | +| Bytes | i32 | s.toI32() | +| Bytes | u32 | s.toU32() | +| Bytes | JSON | json.fromBytes(s) | +| int8 | i32 | none | +| int32 | i32 | none | +| int32 | BigInt | BigInt.fromI32(s) | +| uint24 | i32 | none | +| int64 - int256 | BigInt | none | +| uint32 - uint256 | BigInt | none | +| JSON | boolean | s.toBool() | +| JSON | i64 | s.toI64() | +| JSON | u64 | s.toU64() | +| JSON | f64 | s.toF64() | +| JSON | BigInt | s.toBigInt() | +| JSON | string | s.toString() | +| JSON | Array | s.toArray() | +| JSON | Object | s.toObject() | +| String | Address | Address.fromString(s) | +| Bytes | Address | Address.fromBytes(s) | +| String | BigInt | BigInt.fromString(s) | +| String | BigDecimal | BigDecimal.fromString(s) | +| String (hexadecimal) | Bytes | ByteArray.fromHexString(s) | +| String (UTF-8) | Bytes | ByteArray.fromUTF8(s) | + +### 数据源元数据 + +You can inspect the contract address, network and context of the data source that invoked the handler through the `dataSource` namespace: + +- `dataSource.address(): Address` +- `dataSource.network(): string` +- `dataSource.context(): DataSourceContext` + +### Entity 和 DataSourceContext + +The base `Entity` class and the child `DataSourceContext` class have helpers to dynamically set and get fields: + +- `setString(key: string, value: string): void` +- `setI32(key: string, value: i32): void` +- `setBigInt(key: string, value: BigInt): void` +- `setBytes(key: string, value: Bytes): void` +- `setBoolean(key: string, value: bool): void` +- `setBigDecimal(key, value: BigDecimal): void` +- `getString(key: string): string` +- `getI32(key: string): i32` +- `getBigInt(key: string): BigInt` +- `getBytes(key: string): Bytes` +- `getBoolean(key: string): boolean` +- `getBigDecimal(key: string): BigDecimal` + +### DataSourceContext in Manifest + +The `context` section within `dataSources` allows you to define key-value pairs that are accessible within your subgraph mappings. The available types are `Bool`, `String`, `Int`, `Int8`, `BigDecimal`, `Bytes`, `List`, and `BigInt`. + +Here is a YAML example illustrating the usage of various types in the `context` section: + +```yaml +dataSources: + - kind: ethereum/contract + name: ContractName + network: mainnet + context: + bool_example: + type: Bool + data: true + string_example: + type: String + data: 'hello' + int_example: + type: Int + data: 42 + int8_example: + type: Int8 + data: 127 + big_decimal_example: + type: BigDecimal + data: '10.99' + bytes_example: + type: Bytes + data: '0x68656c6c6f' + list_example: + type: List + data: + - type: Int + data: 1 + - type: Int + data: 2 + - type: Int + data: 3 + big_int_example: + type: BigInt + data: '1000000000000000000000000' +``` + +- `Bool`: Specifies a Boolean value (`true` or `false`). +- `String`: Specifies a String value. +- `Int`: Specifies a 32-bit integer. +- `Int8`: Specifies an 8-bit integer. +- `BigDecimal`: Specifies a decimal number. Must be quoted. +- `Bytes`: Specifies a hexadecimal string. +- `List`: Specifies a list of items. Each item needs to specify its type and data. +- `BigInt`: Specifies a large integer value. Must be quoted due to its large size. + +This context is then accessible in your subgraph mapping files, enabling more dynamic and configurable subgraphs. diff --git a/website/pages/zh/developing/graph-ts/common-issues.mdx b/website/pages/zh/developing/graph-ts/common-issues.mdx new file mode 100644 index 000000000000..ae7c7b6a4787 --- /dev/null +++ b/website/pages/zh/developing/graph-ts/common-issues.mdx @@ -0,0 +1,8 @@ +--- +title: AssemblyScript的常见问题 +--- + +There are certain [AssemblyScript](https://github.com/AssemblyScript/assemblyscript) issues that are common to run into during subgraph development. They range in debug difficulty, however, being aware of them may help. The following is a non-exhaustive list of these issues: + +- `Private` class variables are not enforced in [AssembyScript](https://www.assemblyscript.org/status.html#language-features). There is no way to protect class variables from being directly changed from the class object. +- Scope is not inherited into [closure functions](https://www.assemblyscript.org/status.html#on-closures), i.e. variables declared outside of closure functions cannot be used. Explanation in [Developer Highlights #3](https://www.youtube.com/watch?v=1-8AW-lVfrA&t=3243s). diff --git a/website/pages/zh/developing/substreams-powered-subgraphs-faq.mdx b/website/pages/zh/developing/substreams-powered-subgraphs-faq.mdx index 7e5b1381a6b0..22bba846642e 100644 --- a/website/pages/zh/developing/substreams-powered-subgraphs-faq.mdx +++ b/website/pages/zh/developing/substreams-powered-subgraphs-faq.mdx @@ -4,9 +4,9 @@ title: 基于Substreams的子图 ## 什么是Substreams? -Substreams是由[StreamingFast](https://www.streamingfast.io/)开发的,它是一个非常强大的处理引擎,能够处理丰富的区块链数据流。Substreams允许您对区块链数据进行精炼和塑形,以便最终用户应用程序能够快速无缝地消化这些数据。具体来说,Substreams是一个与区块链无关、并行化的、以流为先的引擎,可用作区块链数据转换层。它由[Firehose](https://firehose.streamingfast.io/)驱动,使开发人员能够编写Rust模块,构建社区模块,提供极高性能的索引,并将数据[sink](https://substreams.streamingfast.io/developers-guide/sink-targets)到任何地方。 +Developed by [StreamingFast](https://www.streamingfast.io/), Substreams is an exceptionally powerful processing engine capable of consuming rich streams of blockchain data. Substreams allow you to refine and shape blockchain data for fast and seamless digestion by end-user applications. More specifically, Substreams is a blockchain-agnostic, parallelized, and streaming-first engine, serving as a blockchain data transformation layer. Powered by the [Firehose](https://firehose.streamingfast.io/), it ​​enables developers to write Rust modules, build upon community modules, provide extremely high-performance indexing, and [sink](https://substreams.streamingfast.io/developers-guide/sink-targets) their data anywhere. -前往[Substreams文档](/substreams),了解更多关于Substreams的信息。 +Go to the [Substreams Documentation](/substreams) to learn more about Substreams. ## 什么是基于Substreams的子图? @@ -22,7 +22,7 @@ Substreams是由[StreamingFast](https://www.streamingfast.io/)开发的,它是 ## 使用基于Substeams的子图的优势是什么? -Substreams强化子图将Substreams的所有优势与子图的可查询性相结合。它们为The Graph带来了更强的可组合性和高性能索引功能。同时,它们也为新的数据应用场景提供了可能性;例如,一旦您构建了Substreams强化子图,您可以重复使用[Substreams模块](https://substreams.streamingfast.io/developers-guide/modules),并将其输出到不同的[sinks(数据输出目标)](https://substreams.streamingfast.io/developers-guide/sink-targets),比如PostgreSQL、MongoDB和Kafka。 +Substreams-powered subgraphs combine all the benefits of Substreams with the queryability of subgraphs. They bring greater composability and high-performance indexing to The Graph. They also enable new data use cases; for example, once you've built your Substreams-powered Subgraph, you can reuse your [Substreams modules](https://substreams.streamingfast.io/developers-guide/modules) to output to different [sinks](https://substreams.streamingfast.io/developers-guide/sink-targets) such as PostgreSQL, MongoDB, and Kafka. ## Substreams的优势是什么? @@ -62,7 +62,7 @@ Firehose是由[StreamingFast](https://www.streamingfast.io/)开发的区块链 ## 开发人员在哪里可以获得关于Substreams-powered子图和Substreams的更多信息? -[Substreams documentation](/substreams) 将教您如何构建Substreams模块。 +The [Substreams documentation](/substreams) will teach you how to build Substreams modules. [Substreams-powered subgraphs documentation](/cookbook/substreams-powered-subgraphs/) 将向您展示如何将它们打包部署在The Graph上。 @@ -70,7 +70,7 @@ Firehose是由[StreamingFast](https://www.streamingfast.io/)开发的区块链 Rust模块相当于子图中的AssemblyScript mappers。它们以类似的方式编译为WASM,但编程模型允许并行执行。它们定义了您想要对原始区块链数据应用的转换和聚合类型。 -有关详细信息,请参阅[modules documentation](https://substreams.streamingfast.io/developers-guide/modules) 。 +See [modules documentation](https://substreams.streamingfast.io/developers-guide/modules) for details. ## 什么使Substreams具有组合性? diff --git a/website/pages/zh/developing/supported-networks.json b/website/pages/zh/developing/supported-networks.json index 5e12392b8c7d..5e6294cec90e 100644 --- a/website/pages/zh/developing/supported-networks.json +++ b/website/pages/zh/developing/supported-networks.json @@ -1,9 +1,9 @@ { - "network": "Network", - "cliName": "CLI Name", - "chainId": "Chain ID", + "network": "网络", + "cliName": "CLI名字", + "chainId": "链id", "studioAndHostedService": "Studio and Hosted Service", - "decentralizedNetwork": "Decentralized Network", + "decentralizedNetwork": "去中心化网络", "supportedByUpgradeIndexer": "Supported only by upgrade Indexer", "supportsSubstreams": "Supports Substreams" } diff --git a/website/pages/zh/developing/supported-networks.mdx b/website/pages/zh/developing/supported-networks.mdx index ab0d386ff5ca..c0bced3ae466 100644 --- a/website/pages/zh/developing/supported-networks.mdx +++ b/website/pages/zh/developing/supported-networks.mdx @@ -7,11 +7,11 @@ import { SupportedNetworksTable } from '@/src/supportedNetworks' -托管服务依赖于底层技术的稳定性和可靠性,即所提供的JSON RPC端点。 +The hosted service relies on the stability and reliability of the underlying technologies, namely the provided JSON RPC endpoints. -Ropsten、Rinkeby和Kovan正在被弃用。阅读更多在[以太坊基金会博客](https://blog.ethereum.org/2022/06/21/testnet-deprecation)。自2023年2月25日起,托管服务不再支持Ropsten、Rinkeby和Kovan。Goerli将在合并后由客户端开发人员维护,也由托管服务支持。鼓励目前使用Ropsten,Rinkeby或Kovan作为其暂存/测试环境的开发人员迁移到Goerli。 +Ropsten, Rinkeby and Kovan are being deprecated. Read more on the [Ethereum Foundation Blog](https://blog.ethereum.org/2022/06/21/testnet-deprecation). As of Feb 25th 2023, Ropsten, Rinkeby and Kovan are no longer supported by the hosted service. Goerli will be maintained by client developers post-merge, and is also supported by the hosted service. Developers who currently use Ropsten, Rinkeby or Kovan as their staging/testing environment are encouraged to migrate to Goerli. -索引Gnosis Chain的子图现在可以使用`gnosis`网络标识符进行部署。现有托管服务子图仍然支持`xdai`。 +Subgraphs indexing Gnosis Chain can now be deployed with the `gnosis` network identifier. `xdai` is still supported for existing hosted service subgraphs. 有关去中心化网络支持哪些功能的完整列表,请参阅[本页](https://github.com/graphprotocol/indexer/blob/main/docs/feature-support-matrix.md)。 @@ -19,6 +19,6 @@ Substreams-支持的子图索引 `主网` 子图工作室以及分布式网络 ## Graph 节点 -如果您想使用的网络不受The Graph的去中心化网络支持,您可以运行自己的Graph节点来索引任何与以太坊虚拟机(EVM)兼容的网络。确保您使用的[版本](https://github.com/graphprotocol/graph-node/releases) 支持该网络,并且您设置好了所需的配置。 +If your preferred network isn't supported on The Graph's decentralized network, you can run your own [Graph Node](https://github.com/graphprotocol/graph-node) to index any EVM-compatible network. Make sure that the [version](https://github.com/graphprotocol/graph-node/releases) you are using supports the network and you have the needed configuration. Graph节点 还可以通过 Firehose 集成对其他协议进行索引。Firehose集成已创建的 NEAR,Arweave 和基于Cosmos的链。 diff --git a/website/pages/zh/firehose.mdx b/website/pages/zh/firehose.mdx index 5e2b37ee4bb6..be2ecda94c66 100644 --- a/website/pages/zh/firehose.mdx +++ b/website/pages/zh/firehose.mdx @@ -2,10 +2,21 @@ title: Firehose --- -Firehose provides a files-based and streaming-first approach to processing blockchain data. +![Firehose Logo](/img/firehose-logo.png) -Firehose integrations have been built for Ethereum (and many EVM chains), NEAR, Solana, Cosmos and Arweave, with more in the works. +Firehose is a new technology developed by StreamingFast working with The Graph Foundation. The product provides **previously unseen capabilities and speeds for indexing blockchain data** using a files-based and streaming-first approach. -Graph Node integrations have been built for multiple chains, so subgraphs can stream data from a Firehose to power performant and scaleable indexing. Firehose also powers [substreams](/substreams), a new transformation technology built by The Graph core developers. +Firehose extracts, transforms and saves blockchain data in a highly performant file-based strategy. Blockchain developers can then access data extracted by Firehose through binary data streams. Firehose is intended to stand as a replacement for The Graph’s original blockchain data extraction layer. -Visit the [firehose documentation](https://firehose.streamingfast.io/) to learn more. +## Firehose Documentation + +The Firehose documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://firehose.streamingfast.io/). + +### 开始 + +- Read this [Firehose introduction](https://firehose.streamingfast.io/introduction/firehose-overview) to get an overview of what it is and why it was built. +- Learn about the [Prerequisites](https://firehose.streamingfast.io/introduction/prerequisites) to install and deploy Firehose. + +### Expand Your Knowledge + +- Learn about the different [Firehose components](https://firehose.streamingfast.io/architecture/components) available. diff --git a/website/pages/zh/glossary.mdx b/website/pages/zh/glossary.mdx index 2e3739a6f8b4..e29a9d8cd58a 100644 --- a/website/pages/zh/glossary.mdx +++ b/website/pages/zh/glossary.mdx @@ -12,7 +12,7 @@ title: 术语汇编 - **Subgraph**: 基于区块链数据构建的定制 API,可以使用 [GraphQL](https://graphql.org/) 进行查询。开发人员可以在 Graph 的去中心化网络中构建、部署和发布子图。然后,索引人可以开始索引子图,以便子图使用者可以查询它们。 -- **Hosted Service**: 一个临时的脚手架服务,用于构建和查询子图,因为 Graph 去中心化网络的服务成本、服务质量和开发人员体验正在成熟。 +- **Hosted service**: A temporary scaffold service for building and querying subgraphs as The Graph's decentralized network is maturing its cost of service, quality of service, and developer experience. - **Indexers**:网络参与者运行索引节点,从区块链索引数据并提供 GraphQL 查询。 @@ -24,6 +24,8 @@ title: 术语汇编 - **Indexer's Self Stake**: 索引人参与去中心化网络的 GRT 金额。最低为100000 GRT,并且没有上限。 +- **Upgrade Indexer**: A temporary Indexer designed to act as a fallback for subgraph queries not serviced by other Indexers on the network. It ensures a seamless transition for subgraphs upgrading from the hosted service by readily serving their queries upon being published. The upgrade Indexer is not competitive with other Indexers. It supports numerous blockchains that were previously only available on the hosted service. + - **Delegators**: 拥有 GRT 并将其 GRT 委托给索引人的网络参与者。这使得索引人可以增加它们在网络子图中的份额。作为回报,委托方将获得索引方为处理子图而获得的索引奖励的一部分。 - **Delegation Tax**: 委托人将 GRT 委托给索引人时支付的0.5% 的费用。用于支付费用的 GRT 将被消耗。 @@ -38,27 +40,21 @@ title: 术语汇编 - **Subgraph Manifest**: 描述子图的 GraphQL 模式、数据源和其他元数据的 JSON 文件。[此处](https://ipfs.io/ipfs/QmVQdzeGdPUiLiACeqXRpKAYpyj8Z1yfWLMUq7A7WundUf)示例。 -- **Rebate Pool**: 一种经济安全措施,持有子图消费者支付的查询费用,直到它们可能被索引人声称为查询费用回扣。剩余的 GRT 被消耗了。 - -- **Epoch**: 网络中的时间单位。一个时期目前为6,646个区块或大约1天。 +- **Epoch**: A unit of time within the network. Currently, one epoch is 6,646 blocks or approximately 1 day. - **Allocation**: 一个索引人可以分配他们的总 GRT 份额(包括委托人的股份) 到已经部署在Graph去中心化网络的子图。分配存在于四个阶段之一。 1. **Active**: 分配在链上创建时被认为是活动的。这称为打开一个分配,并向网络表明索引人正在为特定子图建立索引并提供查询服务。主动分配的增值索引奖励与子图上的信号以及分配的 GRT 的数量成比例。 - 2. **Closed**: 索引人可以通过提交最近的、有效的索引证明(POI) 来索取给定子图上的应计索引奖励。这称为关闭分配。分配必须至少开放一个时期才能关闭。最大分配周期为28个时期。如果一个索引人将一个分配保留到超过28个时期,那么它就被称为过期分配。当分配处于**封闭**状态时,fisherman仍然可以就索引人提供虚假数据提出争议。 - - 3. **Finalized**: 争议期已经结束,查询费用回扣可由索引人提取。 - - 4. **Claimed**: 分配的最后阶段,所有符合条件的奖励已经分配,其查询费回扣已经提取。 + 2. **Closed**: An Indexer may claim the accrued indexing rewards on a given subgraph by submitting a recent, and valid, Proof of Indexing (POI). This is known as closing an allocation. An allocation must have been open for a minimum of one epoch before it can be closed. The maximum allocation period is 28 epochs. If an indexer leaves an allocation open beyond 28 epochs, it is known as a stale allocation. When an allocation is in the **Closed** state, a Fisherman can still open a dispute to challenge an Indexer for serving false data. - **Subgraph Studio**: 用于构建、部署和发布子图的强大 dapp。 -- **Fishermen**: 网络参与者可能会对索引人的查询响应和 POI 提出争议。这就是所谓的Fisherman。Fisherman提出争议胜出的收益来自索引人的经济处罚,以及对Fisherman的奖励,从而激励索引人在网络中执行的索引和查询工作的完整性。惩罚(削减) 目前设置为2.5% 的索引人的自我份额,与50% 的削减GRT归属 Fisherman,其余50% 被消耗。 +- **Fishermen**: A role within The Graph Network held by participants who monitor the accuracy and integrity of data served by Indexers. When a Fisherman identifies a query response or a POI they believe to be incorrect, they can initiate a dispute against the Indexer. If the dispute rules in favor of the Fisherman, the Indexer is slashed. Specifically, the Indexer will lose 2.5% of their self-stake of GRT. Of this amount, 50% is awarded to the Fisherman as a bounty for their vigilance, and the remaining 50% is removed from circulation (burned). This mechanism is designed to encourage Fishermen to help maintain the reliability of the network by ensuring that Indexers are held accountable for the data they provide. -- **Arbitrators**: 仲裁员是通过治理设置的网络参与者。仲裁员的作用是决定索引和查询争议的结果。他们的目标是最大限度地提高Graph网络的效用和可靠性。 +- **Arbitrators**: Arbitrators are network participants appointed through a governance process. The role of the Arbitrator is to decide the outcome of indexing and query disputes. Their goal is to maximize the utility and reliability of The Graph Network. -- **Slashing**: 索引人可能因为提供了不正确的索引证明(POI) 或提供了不准确的数据而削减它们所占的 GRT。削减百分比是一个协议参数,目前设置为索引人自身权益的2.5% 。50% 的削减后的总注册税收归Fisherman,他们对不准确的数据或不正确的 POI 提出异议。剩下的50% 被消耗了。 +- **Slashing**: Indexers can have their self-staked GRT slashed for providing an incorrect POI or for serving inaccurate data. or for serving inaccurate data. The slashing percentage is a protocol parameter currently set to 2.5% of an Indexer's self stake. 50% of the slashed GRT goes to the Fisherman that disputed the inaccurate data or incorrect POI. The other 50% is burned. - **Indexing Rewards**: 索引人因为索引子图而获得的奖励。索引奖励是通过GRT 来分配的。 @@ -66,7 +62,7 @@ title: 术语汇编 - **GRT**: Graph的工作效用代币。 GRT 为网络参与者提供经济激励,鼓励他们为网络做出贡献。 -- **POI or Proof of Indexing**: 当一个索引人关闭他们的分配,并希望要求他们的累积索引人奖励在一个给定的子图,他们必须提供一个有效的和最近的索引证明(POI)。Fishermen可以对索引人提供的 POI 提出异议。Fisherman胜出的争端将导致索引人被惩罚。 +- **POI or Proof of Indexing**: When an Indexer closes their allocation and wants to claim their accrued indexing rewards on a given subgraph, they must provide a valid and recent Proof of Indexing (POI). Fishermen may dispute the POI provided by an Indexer. A dispute resolved in the Fisherman's favor will result in slashing of the Indexer. - **Graph Node**: Graph节点是索引子图的组件,并使生成的数据可通过GraphQL API进行查询。因此,它是索引人堆栈的中心,Graph节点的正确操作对于运行成功的索引人至关重要。 @@ -80,10 +76,10 @@ title: 术语汇编 - **Cooldown Period**: 直到更改其委托参数的索引人可以再次进行此操作之前的剩余时间。 -- **L2 转账工具**: 智能合约和UI,使网络参与者能够从以太坊主网转移到Arbitrum One。网络参与者可以转移委托的GRT、子图、策展股份和索引者自己的股份。 +- **L2 Transfer Tools**: Smart contracts and UI that enable network participants to transfer network related assets from Ethereum mainnet to Arbitrum One. Network participants can transfer delegated GRT, subgraphs, curation shares, and Indexer's self stake. - **_升级_ 子图到 Graph网络中**: 将子图从托管服务移动到Graph网络的过程. - **_更新_ 子图**: 发布新子图版本的过程,其中包含对子图的清单、模式或映射的更新。 -- **迁移**: 从旧版本的子图移动到新版本的子图的过程(即,当v0.0.1更新到v0.0.2时,策展共享迁移到最新版本)。 +- **Migrating**: The process of curation shares moving from an old version of a subgraph to a new version of a subgraph (e.g. when v0.0.1 is updated to v0.0.2). diff --git a/website/pages/zh/graphcast.mdx b/website/pages/zh/graphcast.mdx index c1570d1293a1..5b524f96d2ae 100644 --- a/website/pages/zh/graphcast.mdx +++ b/website/pages/zh/graphcast.mdx @@ -10,7 +10,7 @@ title: Graphcast Graphcast SDK(软件开发工具包)允许开发人员构建Radio,这是一种使用gossip协议的应用程序,索引人可以运行这些应用程序来服务于特定的目的。我们还打算为以下用例创建一些Radio(或为希望构建Radio的其他开发人员/团队提供支持): -- \-子图数据完整性的实时交叉检查([POI Radio](https://docs.graphops.xyz/graphcast/radios/poi-radio)). +- Real-time cross-checking of subgraph data integrity ([Subgraph Radio](https://docs.graphops.xyz/graphcast/radios/subgraph-radio)). - \-对来自其他索引人的warp同步中的子图、子流和Firehose数据进行拍卖和协调。 - \-主动查询分析的自我报告,包括子图请求量、费用量等。 - \-索引分析的自我报告,包括子图索引时间、处理程序gas成本、遇到的索引错误等。 diff --git a/website/pages/zh/index.json b/website/pages/zh/index.json index 367b3d0d2bb1..4228b57be8d5 100644 --- a/website/pages/zh/index.json +++ b/website/pages/zh/index.json @@ -23,8 +23,8 @@ "description": "在子图工作室中创建子图" }, "migrateFromHostedService": { - "title": "从托管服务中迁移", - "description": "将子图迁移到Graph网络上" + "title": "Upgrade from the hosted service", + "description": "Upgrading subgraphs to The Graph Network" } }, "networkRoles": { @@ -63,15 +63,14 @@ }, "hostedService": { "title": "托管服务", - "description": "在托管服务上创建和探索子图" + "description": "Create and explore subgraphs on the hosted service" } } }, "supportedNetworks": { "title": "支持的网络", - "description": "Graph支持在Graph网络和托管服务上的以下网络。", - "graphNetworkAndHostedService": "Graph网络和托管服务", - "hostedService": "托管服务", - "betaWarning": "网络正处于测试阶段" + "description": "The Graph supports the following networks on The Graph Network and the hosted service.", + "graphNetworkAndHostedService": "The Graph Network & hosted service", + "hostedService": "hosted service" } } diff --git a/website/pages/zh/mips-faqs.mdx b/website/pages/zh/mips-faqs.mdx index 68896a95e7ad..7c3d594dd78b 100644 --- a/website/pages/zh/mips-faqs.mdx +++ b/website/pages/zh/mips-faqs.mdx @@ -4,122 +4,124 @@ title: MIP常见问题解答 ## 介绍 -这是一个可以参与Graph生态系统,激动人心的时刻!2022年[Graph日]期间(https://thegraph.com/graph-day/2022/)Yaniv Tal宣布[即将结束托管服务](https://thegraph.com/blog/sunsetting-hosted-service/),这是Graph生态系统多年来一直致力于的一刻。 +> Note: the MIPs program is closed as of May 2023. Thank you to all the Indexers who participated! -为了支持托管服务的退出及其所有活动向去中心化网络的迁移,Graph基金会宣布了[迁移基础设施提供商(MIP)计划](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). +It's an exciting time to be participating in The Graph ecosystem! During [Graph Day 2022](https://thegraph.com/graph-day/2022/) Yaniv Tal announced the [sunsetting of the hosted service](https://thegraph.com/blog/sunsetting-hosted-service/), a moment The Graph ecosystem has been working towards for many years. -MIPs计划是索引人的激励计划,为他们提供资源来索引以太坊主网以外的链,并帮助Graph协议将去中心化网络扩展到多链基础设施层。 +To support the sunsetting of the hosted service and the migration of all of it's activity to the decentralized network, The Graph Foundation has announced the [Migration Infrastructure Providers (MIPs) program](https://thegraph.com/blog/mips-multi-chain-indexing-incentivized-program). -MIPs计划已分配了0.75%的GRT供应(7500万GRT),其中0.5%用于奖励对网络引导有贡献的索引人,0.25%用于使用多链子图的子图开发人员的迁移奖励。 +The MIPs program is an incentivization program for Indexers to support them with resources to index chains beyond Ethereum mainnet and help The Graph protocol expand the decentralized network into a multi-chain infrastructure layer. -### 有用的资源 +The MIPs program has allocated 0.75% of the GRT supply (75M GRT), with 0.5% to reward Indexers who contribute to bootstrapping the network and 0.25% allocated to Network Grants for subgraph developers using multi-chain subgraphs. -- [来自文森特·塔格利亚(Vincent(Victor)Taglia)的索引人2ools](https://indexer-2ools.vincenttaglia.com/#/) -- [如何成为Graph网络上的有效索引人](https://thegraph.com/blog/how-to-become-indexer/) -- [索引人知识库](https://thegraph.academy/indexers/) -- [分配优化器](https://github.com/graphprotocol/allocationopt.jl) -- [配置优化工具](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) +### Useful Resources -### 1. 即使子图失败,是否可以生成有效的索引证明(POI)? +- [Indexer 2ools from Vincent (Victor) Taglia](https://indexer-2ools.vincenttaglia.com/#/) +- [How to Become an Effective Indexer on The Graph Network](https://thegraph.com/blog/how-to-become-indexer/) +- [Indexer Knowledge Hub](https://thegraph.academy/indexers/) +- [Allocation Optimiser](https://github.com/graphprotocol/allocationopt.jl) +- [Allocation Optimization Tooling](https://github.com/anyblockanalytics/thegraph-allocation-optimization/) -是的,的确如此。 +### 1. Is it possible to generate a valid proof of indexing (POI) even if a subgraph has failed? -关于背景,仲裁章程,[在此处了解有关章程的更多信息]\(https://hackmd.io/@4Ln8SAS4RX-505bHZTeRw/BJcHzpHDu#摘要)规定了为故障子图生成POI的方法。 +Yes, it is indeed. -社区成员[SunTzu](https://github.com/suntzu93),已经创建了一个脚本,以按照仲裁章程的方法自动化此过程。查看存储库[此处](https://github.com/suntzu93/get_valid_poi_subgraph). +For context, the arbitration charter, [learn more about the charter here](https://hackmd.io/@4Ln8SAS4RX-505bIHZTeRw/BJcHzpHDu#Abstract), specifies the methodology for generating a POI for a failed subgraph. -### 2. MIP计划将首先激励哪一条链? +A community member, [SunTzu](https://github.com/suntzu93), has created a script to automate this process in compliance with the arbitration charter's methodology. Check out the repo [here](https://github.com/suntzu93/get_valid_poi_subgraph). -去中心化网络将支持的第一个链是Gnosis链!GnosisChain原名xDAI,是一个基于EVM的链。GnosisChain被选为第一个,因为它运行节点的用户友好性、索引人就绪性、与Graph的一致性以及在web3中的采用。 +### 2. Which chain will the MIPs program incentivise first? -### 3. 如何将新链添加到MIP计划中? +The first chain that will be supported on the decentralized network is Gnosis Chain! Formerly known as xDAI, Gnosis Chain is an EVM-based chain. Gnosis Chain was selected as the first given its user-friendliness of running nodes, Indexer readiness, alignment with The Graph and adoption within web3. -根据索引人的准备情况、需求和社区情绪,新链将在整个MIP计划中宣布。链将首先在测试网上得到支持,随后,将通过一个GIP来支持主网上的链。参与MIP计划的索引人将选择他们感兴趣支持的链,并将获得每个链的奖励,此外还将获得查询费和网络上服务子图的索引奖励。MIP参与者将根据他们的表现、服务网络需求的能力和社区支持进行评分。 +### 3. How will new chains be added to the MIPs program? -### 4. 我们如何知道网络何时对新链做好准备? +New chains will be announced throughout the MIPs program, based on Indexer readiness, demand, and community sentiment. Chains will firstly be supported on the testnet and, subsequently, a GIP will be passed to support that chain on mainnet. Indexers participating in the MIPs program will choose which chains they are interested in supporting and will earn rewards per chain, in addition to earning query fees and indexing rewards on the network for serving subgraphs. MIPs participants will be scored based on their performance, ability to serve network needs, and community support. -Graph基金会将监控QoS性能指标、网络性能和社区渠道,以最佳地评估准备情况。首要任务是确保网络满足多链dapp能够迁移其子图的性能需求。 +### 4. How will we know when the network is ready for a new chain? -### 5. 每个链的奖励是如何分配的? +The Graph Foundation will be monitoring QoS performance metrics, network performance and community channels to best assess readiness. The priority is ensuring the network meets performance needs for those multi-chain dapps to be able to migrate their subgraphs. -考虑到链对同步节点的要求各不相同,并且它们在查询量和采用率方面也不同,每个链的奖励将在该链的周期结束时决定,以确保捕获所有反馈和学习。然而,一旦网络支持该链,索引人也可以随时获得查询费和索引奖励。 +### 5. How are rewards divided per chain? -### 6. 我们需要对MIPs计划中的所有链进行索引吗?或者我们可以只选择一个链并对其进行索引? +Given that chains vary in their requirements for syncing nodes, and they differ in query volume and adoption, rewards per chain will be decided at the end of that chain's cycle to ensure that all feedback and learnings are captured. However, at all times Indexers will also be able to earn query fees and indexing rewards once the chain is supported on the network. -欢迎您索引任何您想索引的链!MIPs计划的目标是为索引人提供工具和知识,以索引他们想要的链,并支持他们感兴趣的web3生态系统。然而,对于每个链,都有从测试网到主网的阶段。确保完成正在索引的链的所有阶段。参见[MIPs notion 页面](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059)了解更多有关阶段的信息。 +### 6. Do we need to index all the chains in the MIPs program or can we pick just one chain and index that? -### 7. 奖励何时发放? +You are welcome to index whichever chain you'd like! The goal of the MIPs program is to equip Indexers with the tools & knowledge to index the chains they desire and support the web3 ecosystems they are interested in. However, for every chain, there are phases from testnet to mainnet. Make sure to complete all the phases for the chains you are indexing. See [The MIPs notion page](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) to learn more about the phases. -一旦满足性能指标并且这些索引人支持迁移的子图,MIP奖励将按链分配。在每条链的周期中,请查看每条链的总奖励信息。 +### 7. When will rewards be distributed? -### 8. 评分如何进行? +MIPs rewards will be distributed per chain once performance metrics are met and migrated subgraphs are supported by those Indexers. Look out for info about the total rewards per chain mid-way through that chain's cycle. -索引人将根据排行榜上整个项目的得分来竞争奖励。项目评分将基于: +### 8. How does scoring work? -**子图覆盖率** +Indexers will compete for rewards based on scoring throughout the program on the leaderboard. Program scoring will be based on: -- 您是否为每个链的子图提供了最大的支持? +**Subgraph Coverage** -- 在MIP期间,大型索引人预计将在其支持的每个链上持有50%以上份额的子图。 +- Are you providing maximal support for subgraphs per chain? -**服务质量** +- During MIPs, large Indexers are expected to stake 50%+ of subgraphs per chain they support. -- 索引人是否以良好的服务质量(延迟、新数据、正常运行时间等)服务于链? +**Quality Of Service** -- 索引人是否支持dapp开发人员对他们的需求做出反应? +- Is the Indexer serving the chain with good Quality of Service (latency, fresh data, uptime, etc.)? -索引人分配是否有效,有助于网络的整体健康? +- Is the Indexer supporting dapp developers being reactive to their needs? -**社区支持** +Is Indexer allocating efficiently, contributing to the overall health of the network? -- 索引人是否与其他索引人合作,帮助他们建立多链? +**Community Support** -- 索引人是否在整个项目中向核心开发人员提供反馈,或在论坛中与索引人共享信息? +- Is Indexer collaborating with fellow Indexers to help them get set up for multi-chain? -### 9. 如何分配Discord角色? +- Is Indexer providing feedback to core devs throughout the program or sharing information with Indexers in the Forum? -管理员将在接下来的几天内分配角色。 +### 9. How will the Discord role be assigned? -### 10. 可以在测试网上启动程序,然后切换到主网吗?您是否能够识别我的节点并在分配奖励时将其考虑在内? +Moderators will assign the roles in the next few days. -是的,实际上你应该这么做。有几个阶段在Görli上,一个阶段在主网上。 +### 10. Is it okay to start the program on a testnet and then switch to Mainnet? Will you be able to identify my node and take it into account while distributing rewards? -### 11. 您希望参与者在什么时候添加主网部署? +Yes, it is actually expected of you to do so. Several phases are on Görli and one is on the mainnet. -在第3阶段需要有一个主网索引人。关于这方面的更多信息将[很快在这个 notion 页面中分享](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) +### 11. At what point do you expect participants to add a mainnet deployment? -### 12. 奖励是否受制于授予? +There will be a requirement to have a mainnet indexer during phase 3. More infomation on this will be [shared in this notion page soon.](https://thegraphfoundation.notion.site/MIPs-Home-911e1187f1d94d12b247317265f81059) -计划结束时分配的百分比将根据授予情况而定。更多信息将在索引人协议中分享。 +### 12. Will rewards be subject to vesting? -### 13. 对于拥有一名以上成员的团队,是否所有团队成员都会被授予MIPs Discord角色? +The percentage to be distributed at the end of the program will be subject to vesting. More on this will be shared in the Indexer Agreement. -是的 +### 13. For teams with more than one member, will all the team members be given a MIPs Discord role? -### 14. 是否可以使用graph策展人计划中的锁定代币来参与MIPs测试网? +Yes -是的 +### 14. Is it possible to use the locked tokens from the graph curator program to participate in the MIPs testnet? -### 15. 在MIP计划期间,是否会有一段时间对无效POI提出异议? +Yes -待定。请定期返回此页面以了解更多详细信息,或者如果您的请求很紧急,请发送电子邮件info@thegraph.foundation +### 15. During the MIPs program, will there be a period to dispute invalid POI? -### 17. 我们可以合并两份归属合约吗? +To be decided. Please return to this page periodically for more details on this or if your request is urgent, please email info@thegraph.foundation -不可以。选项是:您可以将一个委托给另一个,或者运行两个独立的索引人。 +### 17. Can we combine two vesting contracts? -### 18. KYC问题? +No. The options are: you can delegate one to the other one or run two separate indexers. -请发送电子邮件到info@thegraph.foundation +### 18. KYC Questions? -### 19. 我还没有准备好索引Gnosis链,我可以先从另一个链开始索引,在准备好后开始吗? +Please email info@thegraph.foundation -是的 +### 19. I am not ready to index Gnosis chain, can I jump in and start indexing from another chain when I am ready? -### 20. 是否有运行服务器的推荐区域? +Yes -我们不就地区提出建议。在选择地点时,您可能需要考虑加密货币的主要市场。 +### 20. Are there recommended regions to run the servers? -### 21. 什么是“处理程序gas成本”? +We do not give recommendations on regions. When picking locations you might want to think about where the major markets are for cryptocurrencies. -它是执行处理程序成本的决定性度量。与名字可能暗示的相反,它与区块链的gas成本无关。 +### 21. What is “handler gas cost”? + +It is the deterministic measure of the cost of executing a handler. Contrary to what the name might suggest, it is not related to the gas cost on blockchains. diff --git a/website/pages/zh/network/benefits.mdx b/website/pages/zh/network/benefits.mdx index 057fa566e685..1d1b32f131db 100644 --- a/website/pages/zh/network/benefits.mdx +++ b/website/pages/zh/network/benefits.mdx @@ -14,7 +14,7 @@ Graph的去中心化网络经过精心设计和完善,创造了强大的索引 - 每月成本降低60-98% - 基础设施设置成本为0美元 - 超群的正常运行时间 -- 访问438个索引人(还在增加) +- Access to hundreds of independent Indexers around the world - 全球社区24/7的技术支持 ## 好处解释 @@ -89,7 +89,7 @@ Graph的去中心化网络经过精心设计和完善,创造了强大的索引 ## 可靠性和弹性 -Graph 的去中心化网络使用户能够访问自托管`graph-node`时不存在的异地备援。由于99.9% 以上的正常运行时间,全球有168个索引人(还在增加) 保证了网络的安全,查询得到了可靠的服务。 +The Graph’s decentralized network gives users access to geographic redundancy that does not exist when self-hosting a `graph-node`. Queries are served reliably thanks to 99.9%+ uptime, achieved by hundreds of independent Indexers securing the network globally. 一句话: 与在本地运行一个`graph-node`相比,Graph网络成本更低,更容易使用,并且产生更好的结果。 diff --git a/website/pages/zh/network/explorer.mdx b/website/pages/zh/network/explorer.mdx index e891a1d48c2f..1f04ffec65fc 100644 --- a/website/pages/zh/network/explorer.mdx +++ b/website/pages/zh/network/explorer.mdx @@ -74,7 +74,7 @@ title: Graph浏览器 委托人在维护 The Graph 网络的安全性和去中心化方面发挥着关键作用。 他们通过将 GRT 代币委托给一个或多个索引人(即“质押”)来参与网络。 如果没有委托人,索引人不太可能获得可观的奖励和费用。 因此,索引人试图通过向委托人提供他们获得的一部分索引奖励和查询费用来吸引委托人。 -委托人反过来根据许多不同的变量选择索引人,例如过去的表现、索引奖励率和查询费用削减。 社区内的声誉也可以起到一定的作用! 建议连接通过[The Graph’s Discord](https://discord.gg/graphprotocol) 或者 [The Graph 论坛](https://forum.thegraph.com/)选择索引人! +Delegators, in turn, select Indexers based on a number of different variables, such as past performance, indexing reward rates, and query fee cuts. Reputation within the community can also play a factor in this! It’s recommended to connect with the indexers selected via [The Graph’s Discord](https://discord.gg/graphprotocol) or [The Graph Forum](https://forum.thegraph.com/)! ![资源管理器图像 7](/img/Delegation-Overview.png) diff --git a/website/pages/zh/network/indexing.mdx b/website/pages/zh/network/indexing.mdx index 011fdcfdaeba..effefb23adbe 100644 --- a/website/pages/zh/network/indexing.mdx +++ b/website/pages/zh/network/indexing.mdx @@ -2,7 +2,7 @@ title: 索引 --- -索引人是Graph 网络中的节点运营商,他们质押 Graph 通证 (GRT) 以提供索引和查询处理服务。 索引人通过他们的服务赚取查询费和索引奖励。 他们还根据 Cobbs-Douglas 回扣函数从回扣池中赚取收益,该回扣池与所有网络贡献者按他们的工作成比例共享。 +Indexers are node operators in The Graph Network that stake Graph Tokens (GRT) in order to provide indexing and query processing services. Indexers earn query fees and indexing rewards for their services. They also earn query fees that are rebated according to an exponential rebate function. 抵押在协议中的 GRT 会受到解冻期的影响,如果索引人是恶意的并向应用程序提供不正确的数据或索引不正确,则可能会被削减。 索引人也可以从委托人那里获得委托,为网络做出贡献。 @@ -26,7 +26,7 @@ title: 索引 索引奖励来自协议通胀,每年发行量设定为 3%。 它们根据每个子图上所有管理信号的比例分布在子图上,然后根据他们在该子图上分配的份额按比例分配给索引人。 **一项分配必须以符合仲裁章程规定的标准的有效索引证明(POI)来结束,才有资格获得奖励。** -社区创建了许多用于计算奖励的工具,您会在 [“社区指南”集合](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c)中找到它们。 您还可以在 [Discord 服务器](https://discord.gg/graphprotocol)上的 #delegators 和 #indexers 频道 中找到最新的工具列表。在这里,我们链接一个[推荐的分配优化器](https://github.com/graphprotocol/AllocationOpt.jl)与索引器者软件栈集成。 +Numerous tools have been created by the community for calculating rewards; you'll find a collection of them organized in the [Community Guides collection](https://www.notion.so/Community-Guides-abbb10f4dba040d5ba81648ca093e70c). You can also find an up to date list of tools in the #Delegators and #Indexers channels on the [Discord server](https://discord.gg/graphprotocol). Here we link a [recommended allocation optimiser](https://github.com/graphprotocol/AllocationOpt.jl) integrated with the indexer software stack. ### 什么是索引证明 (POI)? @@ -81,17 +81,17 @@ query indexerAllocations { ### 什么是查询费返利? 何时分配? -每当分配关闭并累积在子图的查询费用回扣池中时,网关就会收取查询费用。 回扣池旨在鼓励索引人按他们为网络赚取的查询费用的粗略比例分配股份。 池中分配给特定索引人的查询费用部分使用 Cobbs-Douglas 生产函数计算;每个索引人的分配量是他们对池的贡献和他们在子图上的份额分配的函数。 +Query fees are collected by the gateway and distributed to indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). The exponential rebate function is proposed as a way to ensure indexers achieve the best outcome by faithfully serving queries. It works by incentivizing Indexers to allocate a large amount of stake (which can be slashed for erring when serving a query) relative to the amount of query fees they may collect. -一旦分配已结束且争议期已过,索引人就可以要求回扣。 查询费用回扣根据查询费用减免和委托池比例分配给索引人及其委托人。 +Once an allocation has been closed the rebates are available to be claimed by the Indexer. Upon claiming, the query fee rebates are distributed to the Indexer and their Delegators based on the query fee cut and the exponential rebate function. ### 什么是查询费减免和索引奖励减免? `queryFeeCut` 和 `indexingRewardCut` 值是委托的参数,该索引可以设置连同 cooldownBlocks 控制 GRT 的索引和他们的委托人之间的分配。 有关设置委托参数的说明,请参阅[协议中的质押](/network/indexing#stake-in-the-protocol)的最后步骤。 -- **查询费用削减** - 在将分配给索引人的子图上累积的查询费用回扣的百分比。 如果将其设置为 95%,则在申请分配时,索引人将获得查询费用回扣池的 95%,另外 5% 将分配给委托人。 +- **queryFeeCut** - the % of query fee rebates that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the query fees earned when an allocation is closed with the other 5% going to the Delegators. -- **索引奖励削减** - 将分配给索引人的子图上累积的索引奖励的百分比。 如果将其设置为 95%,则当分配结束时,索引人将获得索引奖励池的 95%,而委托人将分配其他 5%。 +- **indexingRewardCut** - the % of indexing rewards that will be distributed to the Indexer. If this is set to 95%, the Indexer will receive 95% of the indexing rewards when an allocation is closed and the Delegators will split the other 5%. ### 索引人如何知道要索引哪些子图? @@ -375,7 +375,7 @@ docker-compose up #### 开始 -索引人代理和索引人服务应该与你的 Graph节点基础设施共同定位。 有很多方法可以为你的索引人组件设置虚拟执行环境,这里我们将解释如何使用 NPM 包或源码在裸机上运行它们,或者通过谷歌云 Kubernetes 引擎上的 kubernetes 和 docker 运行。 如果这些设置实例不能很好地转化为你的基础设施,很可能会有一个社区指南供参考,请到[Discord](https://discord.gg/graphprotocol)在启动您的 Indexer 组件之前,请记住[stake in the protocol](/network/indexing#stake-in-the-protocol)! +The Indexer agent and Indexer service should be co-located with your Graph Node infrastructure. There are many ways to set up virtual execution environments for your Indexer components; here we'll explain how to run them on baremetal using NPM packages or source, or via kubernetes and docker on the Google Cloud Kubernetes Engine. If these setup examples do not translate well to your infrastructure there will likely be a community guide to reference, come say hi on [Discord](https://discord.gg/graphprotocol)! Remember to [stake in the protocol](/network/indexing#stake-in-the-protocol) before starting up your Indexer components! #### 来自 NPM 包 @@ -661,21 +661,21 @@ ActionType { 数据源用法示例 ```bash -indexer indexer actions get all +graph indexer actions get all -indexer indexer actions get --status queued +graph indexer actions get --status queued -indexer indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 +graph indexer actions queue allocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 5000 -indexer indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 +graph indexer actions queue reallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae5 55000 -indexer indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae +graph indexer actions queue unallocate QmeqJ6hsdyk9dVbo1tvRgAxWrVS3rkERiEMsxzPShKLco6 0x4a58d33e27d3acbaecc92c15101fbc82f47c2ae -indexer indexer actions cancel +graph indexer actions cancel -indexer indexer actions approve 1 3 5 +graph indexer actions approve 1 3 5 -indexer indexer actions execute approve +graph indexer actions execute approve ``` 请注意,分配管理支持的操作类型有不同的输入要求: @@ -797,8 +797,4 @@ setDelegationParameters(950000, 600000, 500) - **Closed** - 索引人可以在经过1个时期后自由关闭分配 ([closeAllocation()](https://github.com/graphprotocol/contracts/blob/master/contracts/staking/Staking.sol#L873)) ,或者其索引人代理将在**maxAllocationEpochs** 之后(当前为28天)自动关闭分配。当分配以有效的索引证明(POI)结束时,其索引奖励将分配给索引人及其委托人(请参阅下面的“如何分配奖励?”了解更多信息)。 -- **Finalized** - 一旦分配结束,将有一段争议期,在此之后,分配将被视为**已完成**,其查询费回扣将可申请(claim())。索引人代理监视网络以检测**最终**分配,如果它们高于可配置(可选)阈值**—-allocation-claim-threshold**,则声明它们。 - -- **Claimed** - 分配的最终状态;它已经完成了作为活跃分配的过程,所有符合条件的奖励已经分配完毕,其查询费返利也已声明。 - 建议索引人在链上创建分配之前,利用链外同步功能将子图部署同步到链头。对于可能需要超过28个时期才能同步或有一些无法确定失败的机会的子图,此功能特别有用。 diff --git a/website/pages/zh/new-chain-integration.mdx b/website/pages/zh/new-chain-integration.mdx index c5934efa6f87..b5492d5061af 100644 --- a/website/pages/zh/new-chain-integration.mdx +++ b/website/pages/zh/new-chain-integration.mdx @@ -11,15 +11,15 @@ Graph Node can currently index data from the following chain types: If you are interested in any of those chains, integration is a matter of Graph Node configuration and testing. -If you are interested in a different chain type, you will need to build a new integration with Graph Node. Our recommended approach is development of a Firehose for the chain, and then integration of that Firehose with Graph Node. +If you are interested in a different chain type, a new with Graph Node must be built. Our recommended approach is developing a new Firehose for the chain in question and then the integration of that Firehose with Graph Node. More info below. -** 1. EVM JSON-RPC** +**1. EVM JSON-RPC** If the blockchain is EVM equivalent and the client/node exposes the standard EVM JSON-RPC API, Graph Node should be able to index the new chain. For more information, refer to [Testing an EVM JSON-RPC](new-chain-integration#testing-an-evm-json-rpc). **2. Firehose** -For non-EVM-based chains, Graph Node will need to ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. +For non-EVM-based chains, Graph Node must ingest blockchain data via gRPC and known type definitions. This can be done via [Firehose](firehose/), a new technology developed by [StreamingFast](https://www.streamingfast.io/) that provides a highly-scalable indexing blockchain solution using a files-based and streaming-first approach. Reach out to the [StreamingFast team](mailto:integrations@streamingfast.io/) if you need help with Firehose development. ## Difference between EVM JSON-RPC & Firehose @@ -52,7 +52,7 @@ For Graph Node to be able to ingest data from an EVM chain, the RPC node must ex > Do not change the env var name itself. It must remain `ethereum` even if the network name is different. 3. Run an IPFS node or use the one used by The Graph: https://api.thegraph.com/ipfs/ -\*Test the integration by locally deploying a subgraph\*\* +**Test the integration by locally deploying a subgraph** 1. Install [graph-cli](https://github.com/graphprotocol/graph-cli) 2. Create a simple example subgraph. Some options are below: diff --git a/website/pages/zh/operating-graph-node.mdx b/website/pages/zh/operating-graph-node.mdx index 2f1e0e95749b..78fa738fcf22 100644 --- a/website/pages/zh/operating-graph-node.mdx +++ b/website/pages/zh/operating-graph-node.mdx @@ -22,7 +22,7 @@ Graph节点的主存储区,这是存储子图数据、子图元数据以及子 虽然有些子图可能只需要一个完整的节点,但有些子图的索引功能可能需要额外的RPC功能。特别是,将`eth_calls`作为索引的一部分的子图需要一个支持[EIP-1898](https://eips.ethereum.org/EIPS/eip-1898)的归档节点,而带有`callHandlers`或带有`调用`筛选器的`blockHandlers`的子图则需要`trace_filter`支持[(请参阅此处的跟踪模块文档)](https://openethereum.github.io/JSONRPC-trace-module)。 -**即将推出:Network Firehose** - Firehose 是一种gRPC服务,它提供有序但可分叉的区块流,由Graph的核心开发人员开发,以更好地支持大规模的性能索引。这目前不是索引人的要求,但鼓励索引人在完全网络支持之前熟悉该技术。在[这里](https://firehose.streamingfast.io/)了解有关Firehose的更多信息。 +**Network Firehoses** - a Firehose is a gRPC service providing an ordered, yet fork-aware, stream of blocks, developed by The Graph's core developers to better support performant indexing at scale. This is not currently an Indexer requirement, but Indexers are encouraged to familiarise themselves with the technology, ahead of full network support. Learn more about the Firehose [here](https://firehose.streamingfast.io/). ### IPFS节点 diff --git a/website/pages/zh/publishing/publishing-a-subgraph.mdx b/website/pages/zh/publishing/publishing-a-subgraph.mdx index 258ce4dfdfa8..ea343c5fbfc4 100644 --- a/website/pages/zh/publishing/publishing-a-subgraph.mdx +++ b/website/pages/zh/publishing/publishing-a-subgraph.mdx @@ -6,7 +6,7 @@ title: 向去中心化的网络发布子图 将一个子图发布到去中心化的网络上,使其可以开始让[策展人](/network/curating)进行策展,并开始让[索引人](/network/indexing)进行索引。 -关于如何将子图发布到去中心化网络的演示,请看[这个视频](https://youtu.be/HfDgC2oNnwo?t=580) + 您可以在[这里](/developing/supported-networks)找到支持的网络列表。 diff --git a/website/pages/zh/querying/querying-best-practices.mdx b/website/pages/zh/querying/querying-best-practices.mdx index 11bc3eed8529..4584b5a48fc5 100644 --- a/website/pages/zh/querying/querying-best-practices.mdx +++ b/website/pages/zh/querying/querying-best-practices.mdx @@ -67,18 +67,18 @@ query [operationName]([variableName]: [variableType]) { ### 向 GraphQL API 发送查询 -GraphQL is a language and set of conventions that transport over HTTP. +GraphQL 是一种通过 HTTP 传输的语言和一组协议。 -It means that you can query a GraphQL API using standard `fetch` (natively or via `@whatwg-node/fetch` or `isomorphic-fetch`). +这意味着您可以使用标准`fetch`(本机或通过`@whatwg-node/提取`或`isomorphic-fetch`) 查询 GraphQL API。 -However, as stated in ["Querying from an Application"](/querying/querying-from-an-application), we recommend you to use our `graph-client` that supports unique features such as: +但是,正如[“从应用程序查询”](/querying/querying-from-an-application)中所说,我们建议您使用我们的`graph-client`,该客户端支持以下独特功能: - 跨链子图处理: 在一个查询中从多个子图进行查询 - [自动区块跟踪](https://github.com/graphprotocol/graph-client/blob/main/packages/block-tracking/README.md) - [自动分页](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - 完全类型化的结果 -Here's how to query The Graph with `graph-client`: +以下是如何使用 `Graph-client` 查询Graph: ```tsx import { execute } from '../.graphclient' @@ -102,9 +102,9 @@ async function main() { main() ``` -More GraphQL client alternatives are covered in ["Querying from an Application"](/querying/querying-from-an-application). +[“从应用程序查询”](/querying/querying-from-an-application)中介绍了更多的 GraphQL 客户端替代方案。 -Now that we covered the basic rules of GraphQL queries syntax, let's now look at the best practices of GraphQL query writing. +现在我们已经介绍了 GraphQL 查询语法的基本规则,接下来让我们看看 GraphQL 查询编写的最佳实践。 --- @@ -112,7 +112,7 @@ Now that we covered the basic rules of GraphQL queries syntax, let's now look at ### 始终编写静态查询 -A common (bad) practice is to dynamically build query strings as follows: +一个常见的(不好的) 实践是动态构建查询字符串,如下所示: ```tsx const id = params.id @@ -128,14 +128,14 @@ query GetToken { // Execute query... ``` -While the above snippet produces a valid GraphQL query, **it has many drawbacks**: +虽然上面的代码生成了一个有效的 GraphQL 查询,但是**它有很多缺点**: - 它使得从整体上**理解查询变得更加困难** - 开发人员**负责安全地消毒字符串插值** - 如果不将变量的值作为请求参数的一部分发送,则可能会**阻止服务器端的缓存** - 它**阻止工具静态分析查询**(例如: Linter 或类型生成工具) -For this reason, it is recommended to always write queries as static strings: +因此,建议始终将查询写为静态字符串: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -157,18 +157,18 @@ const result = await execute(query, { }) ``` -Doing so brings **many advantages**: +这样做有**很多好处**: - **易于阅读和维护**查询 - GraphQL **服务器处理变量的净化** - 可以在服务器级别**缓存变量** - **查询可以通过工具进行静态分析**(下面几节将详细介绍) -**Note: How to include fields conditionally in static queries** +**注意: 如何在静态查询中有条件地包括字段** -We might want to include the `owner` field only on a particular condition. +我们可能希望仅在特定条件下包括 `owner` 字段。 -For this, we can leverage the `@include(if:...)` directive as follows: +为此,我们可以利用`@include (if:...)`,如下所示: ```tsx import { execute } from 'your-favorite-graphql-client' @@ -191,7 +191,7 @@ const result = await execute(query, { }) ``` -Note: The opposite directive is `@skip(if: ...)`. +注意:相反的指令是@skip(if:…)。 ### 性能提示 @@ -199,13 +199,13 @@ Note: The opposite directive is `@skip(if: ...)`. GraphQL became famous for its "Ask for what you want" tagline. -For this reason, there is no way, in GraphQL, to get all available fields without having to list them individually. +因此,在GraphQL中,不单独列出所有可用字段,就无法获取所有可用字段。 When querying GraphQL APIs, always think of querying only the fields that will be actually used. -A common cause of over-fetching is collections of entities. By default, queries will fetch 100 entities in a collection, which is usually much more than what will actually be used, e.g., for display to the user. Queries should therefore almost always set first explicitly, and make sure they only fetch as many entities as they actually need. This applies not just to top-level collections in a query, but even more so to nested collections of entities. +过度获取的一个常见原因是实体集合。默认情况下,查询将获取集合中的100个实体,这通常比实际使用的实体多得多,例如,用于向用户显示的实体。因此,查询几乎总是首先显式设置,并确保它们只获取实际需要的实体。这不仅适用于查询中的一层集合,更适用于实体的嵌套集合。 -For example, in the following query: +例如,在以下查询中: ```graphql query listTokens { @@ -220,13 +220,13 @@ query listTokens { } ``` -The response could contain 100 transactions for each of the 100 tokens. +该响应可以包含100个代币中的100个交易。 -If the application only needs 10 transactions, the query should explicitly set `first: 10` on the transactions field. +如果应用程序只需要10个交易,那么查询应该首先在交易字段上显式设置:`first: 10`。 -**Combining multiple queries** +**组合多个查询** -Your application might require querying multiple types of data as follows: +您的应用程序可能需要查询多种类型的数据,如下所示: ```graphql import { execute } from "your-favorite-graphql-client" @@ -256,9 +256,9 @@ const [tokens, counters] = Promise.all( ) ``` -While this implementation is totally valid, it will require two round trips with the GraphQL API. +虽然这个实现是完全有效的,但它需要使用GraphQL API进行两次交互。 -Fortunately, it is also valid to send multiple queries in the same GraphQL request as follows: +幸运的是,在同一GraphQL请求中发送多个查询也是有效的,如下所示: ```graphql import { execute } from "your-favorite-graphql-client" @@ -279,13 +279,13 @@ query GetTokensandCounters { const { result: { tokens, counters } } = execute(query) ``` -This approach will **improve the overall performance** by reducing the time spent on the network (saves you a round trip to the API) and will provide a **more concise implementation**. +这种方法将通过减少在网络上花费的时间来**提高整体性能**(为您节省API交互的时间),并提供**更简洁的实现**。 ### 利用GraphQL片段 -A helpful feature to write GraphQL queries is GraphQL Fragment. +编写GraphQL查询的一个有用功能是GraphQL片段。 -Looking at the following query, you will notice that some fields are repeated across multiple Selection-Sets (`{ ... }`): +查看以下查询,您会注意到某些字段在多个选择集(`{ ... }`) 中重复: ```graphql query { @@ -305,12 +305,12 @@ query { } ``` -Such repeated fields (`id`, `active`, `status`) bring many issues: +此类重复字段(`id`、`active`、`status`)会带来许多问题: - 当查询更广泛时将更难阅读 - 当使用基于查询生成TypeScript类型的工具时(_上一节将详细介绍_),`newDelegate`和`oldDelegate`将产生两个不同的内联接口。 -A refactored version of the query would be the following: +查询的重构版本如下: ```graphql query { @@ -336,13 +336,13 @@ fragment DelegateItem on Transcoder { Using GraphQL `fragment` will improve readability (especially at scale) but also will result in better TypeScript types generation. -When using the types generation tool, the above query will generate a proper `DelegateItemFragment` type (_see last "Tools" section_). +当使用类型生成工具时,上述查询将生成一个正确的`DelegateItemFragment`类型(_请参阅上一节“工具”_)。 ### GraphQL片段的注意事项 -**Fragment base must be a type** +**片段必须是一种类型** -A Fragment cannot be based on a non-applicable type, in short, **on type not having fields**: +片段不能基于不适用的类型,简而言之,**基于没有字段的类型**: ```graphql fragment MyFragment on BigInt { @@ -350,11 +350,11 @@ fragment MyFragment on BigInt { } ``` -`BigInt` is a **scalar** (native "plain" type) that cannot be used as a fragment's base. +`BigInt`是一个**标量**(原生“纯”类型),不能用作片段的基础类型。 -**How to spread a Fragment** +**如何传播片段** -Fragments are defined on specific types and should be used accordingly in queries. +片段是在特定类型上定义的,应该在查询中相应地使用。 例子: @@ -377,17 +377,17 @@ fragment VoteItem on Vote { } ``` -`newDelegate` and `oldDelegate` are of type `Transcoder`. +`newDelegate` 和 `oldDelegate` 属于`Transcoder`类型。 -It is not possible to spread a fragment of type `Vote` here. +无法在此处传播`Vote`类型的片段。 -**Define Fragment as an atomic business unit of data** +**将片段定义为数据的原子业务单元。** -GraphQL Fragment must be defined based on their usage. +GraphQL Fragment必须根据其用法进行定义。 -For most use-case, defining one fragment per type (in the case of repeated fields usage or type generation) is sufficient. +对于大多数用例,为每个类型定义一个片段(在重复使用字段或生成类型的情况下)就足够。 -Here is a rule of thumb for using Fragment: +以下是使用Fragment的经验法则: - 当相同类型的字段在查询中重复时,将它们分组为片段 - 当重复类似但不相同的字段时,创建多个片段,例如: @@ -417,31 +417,31 @@ fragment VoteWithPoll on Vote { ### GraphQL基于web的浏览器 -Iterating over queries by running them in your application can be cumbersome. For this reason, don't hesitate to use [The Graph Explorer](https://thegraph.com/explorer) to test your queries before adding them to your application. The Graph Explorer will provide you a preconfigured GraphQL playground to test your queries. +通过在应用程序中运行查询来迭代查询可能会很麻烦。因此,在将查询添加到应用程序之前,不要犹豫使用[Graph浏览器](https://thegraph.com/explorer)来测试查询。Graph浏览器将为您提供一个预配置的GraphQL控制面板来测试您的查询。 -If you are looking for a more flexible way to debug/test your queries, other similar web-based tools are available such as [Altair](https://altair.sirmuel.design/) and [GraphiQL](https://graphiql-online.com/graphiql). +如果您正在寻找一种更灵活的方式来调试/测试查询,可以使用其他类似的基于web的工具,如[Altair](https://altair.sirmuel.design/)和[GraphiQL](https://graphiql-online.com/graphiql)。 ### GraphQL Linting -In order to keep up with the mentioned above best practices and syntactic rules, it is highly recommended to use the following workflow and IDE tools. +为了跟上提到的最佳实践和语法规则,强烈建议使用以下工作流和IDE工具。 **GraphQL ESLint** -[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint) will help you stay on top of GraphQL best practices with zero effort. +[GraphQL ESLint](https://github.com/dotansimha/graphql-eslint)将帮助您轻松掌握GraphQL最佳实践。 -[Setup the "operations-recommended"](https://github.com/dotansimha/graphql-eslint#available-configs) config will enforce essential rules such as: +[设置“操作建议”](https://github.com/dotansimha/graphql-eslint#available-configs)配置将强制执行基本规则,例如: - `@graphql-eslint/fields-on-correct-type`: 字段是否用于正确的类型? - `@graphql-eslint/no-unused variables`: 给定的变量是否应该保持未使用状态? - 还有更多! -This will allow you to **catch errors without even testing queries** on the playground or running them in production! +这将允许您**捕获错误,而无需在playground上测试查询**或在生产环境中运行查询! ### IDE插件 -**VSCode and GraphQL** +**VSCode和GraphQL** -The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql) is an excellent addition to your development workflow to get: +[GraphQL VSCode扩展](https://marketplace.visualstudio.com/items?itemName=GraphQL.vscode-graphql)是对开发工作流的一个极好补充,可以获得: - 语法高亮显示 - 自动完成建议 @@ -449,15 +449,15 @@ The [GraphQL VSCode extension](https://marketplace.visualstudio.com/items?itemNa - 片段 - 转到片段和输入类型的定义 -If you are using `graphql-eslint`, the [ESLint VSCode extension](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint) is a must-have to visualize errors and warnings inlined in your code correctly. +如果您使用的是`graphql-eslit`,[ESLintVSCode扩展](https://marketplace.visualstudio.com/items?itemName=dbaeumer.vscode-eslint)是正确可视化代码中内联的错误和警告的必备工具。 -**WebStorm/Intellij and GraphQL** +**WebStorm/Intellij和GraphQL** -The [JS GraphQL plugin](https://plugins.jetbrains.com/plugin/8097-graphql/) will significantly improve your experience while working with GraphQL by providing: +[JS GraphQL插件](https://plugins.jetbrains.com/plugin/8097-graphql/)将通过提供以下功能显著改善您在使用GraphQL时的体验: - 语法高亮显示 - 自动完成建议 - 根据模式验证 - 片段 -More information on this [WebStorm article](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/) that showcases all the plugin's main features. +有关这篇[WebStorm文章](https://blog.jetbrains.com/webstorm/2019/04/featured-plugin-js-graphql/)的更多信息,其中展示了插件的所有主要功能。 diff --git a/website/pages/zh/querying/querying-from-an-application.mdx b/website/pages/zh/querying/querying-from-an-application.mdx index 86d03ccb214f..19fa3993f822 100644 --- a/website/pages/zh/querying/querying-from-an-application.mdx +++ b/website/pages/zh/querying/querying-from-an-application.mdx @@ -33,11 +33,11 @@ Graph提供了自己的GraphQL客户端,`graph-client`支持以下独特功能 - [自动分页](https://github.com/graphprotocol/graph-client/blob/main/packages/auto-pagination/README.md) - 完全类型化的结果 -Also integrated with popular GraphQL clients such as Apollo and URQL and compatible with all environments (React, Angular, Node.js, React Native), using `graph-client` will give you the best experience for interacting with The Graph. +还与流行的GraphQL客户端(如Apollo和URQL)集成,并与所有环境(React、Angular、Node.js、React Native)兼容,使用`graph-client`将为您提供与Graph交互的最佳体验。 -Let's look at how to fetch data from a subgraph with `graphql-client`. +让我们看看如何使用 `graphql-client` 从子图获取数据。 -To get started, make sure to install The Graph Client CLI in your project: +首先,请确保在项目中安装 Graph Client CLI: ```sh yarn add -D @graphprotocol/client-cli @@ -45,7 +45,7 @@ yarn add -D @graphprotocol/client-cli npm install --save-dev @graphprotocol/client-cli ``` -Define your query in a `.graphql` file (or inlined in your `.js` or `.ts` file): +在`.graphql` 文件中定义查询(或在`.js` or `.ts`文件中内联): ```graphql query ExampleQuery { @@ -72,7 +72,7 @@ query ExampleQuery { } ``` -Then, create a configuration file (called `.graphclientrc.yml`) and point to your GraphQL endpoints provided by The Graph, for example: +然后,创建一个配置文件(名为`. grapclientrc.yml`)并指向Graph 提供的 GraphQL 端点,例如: ```yaml # .graphclientrc.yml @@ -96,7 +96,7 @@ Running the following The Graph Client CLI command will generate typed and ready graphclient build ``` -Finally, update your `.ts` file to use the generated typed GraphQL documents: +最后,更新`.ts` 文件以使用生成的类型化 GraphQL 文档: ```tsx import React, { useEffect } from 'react' @@ -134,17 +134,17 @@ function App() { export default App ``` -**⚠️ Important notice** +**⚠️重要告示** -`graph-client` is perfectly integrated with other GraphQL clients such as Apollo client, URQL, or React Query; you will [find examples in the official repository](https://github.com/graphprotocol/graph-client/tree/main/examples). +`graph-client` 与其他 GraphQL 客户端(如 Apollo client、 URQL 或 React Query)完美地集成; 您可以在[正式存储库中找到示例](https://github.com/graphprotocol/graph-client/tree/main/examples)。 -However, if you choose to go with another client, keep in mind that **you won't be able to get to use Cross-chain Subgraph Handling or Automatic Pagination, which are core features for querying The Graph**. +但是,如果您选择使用另一个客户端,请记住**您将无法使用跨链子图处理或自动分页,这是查询Graph 的核心功能**。 ### Apollo 客户端 -[Apollo client](https://www.apollographql.com/docs/) is the ubiquitous GraphQL client on the front-end ecosystem. +[Apollo 客户端](https://www.apollographql.com/docs/)是前端生态系统中无处不在的 GraphQL 客户端。 -Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, although the heaviest client, brings many features to build advanced UI on top of GraphQL: +Apollo Client可用于React、Angular、Vue、Ember、iOS和Android,虽然是重量级的客户端,但它在GraphQL之上提供了许多构建高级UI的功能: - 高级错误处理 - 分页 @@ -152,9 +152,9 @@ Available for React, Angular, Vue, Ember, iOS, and Android, Apollo Client, altho - 优化用户界面 - 本地状态管理 -Let's look at how to fetch data from a subgraph with Apollo client in a web project. +让我们看看如何在一个web项目中用 Apollo 客户端从子图中获取数据。 -First, install `@apollo/client` and `graphql`: +首先,安装`@apollo/client` 和`graphql`: ```sh npm install @apollo/client graphql @@ -193,7 +193,7 @@ client }) ``` -To use variables, you can pass in a `variables` argument to the query: +要使用变量,你可以在查询中传递一个`变量参数` 。 ```javascript const tokensQuery = ` @@ -226,16 +226,16 @@ client ### URQL -Another option is [URQL](https://formidable.com/open-source/urql/) which is available within Node.js, React/Preact, Vue, and Svelte environments, with more advanced features: +另一个选项是 [URQL](https://formidable.com/open-source/urql/),它可以在 Node.js、 React/Preact、 Vue 和 Svelte 环境中使用,具有更高级的特性: - 灵活的缓存系统 - 可扩展设计(使在它上面添加新功能变得容易) - 轻量级捆绑包(比 Apollo Client 小约5倍) - 支持文件上传和离线模式 -Let's look at how to fetch data from a subgraph with URQL in a web project. +让我们看看如何在一个网络项目中用 URQL 从子图中获取数据。 -First, install `urql` and `graphql`: +首先,安装`urql`和 `graphql`: ```sh npm install urql graphql diff --git a/website/pages/zh/querying/querying-the-hosted-service.mdx b/website/pages/zh/querying/querying-the-hosted-service.mdx index 275dec5dd235..b47509b28134 100644 --- a/website/pages/zh/querying/querying-the-hosted-service.mdx +++ b/website/pages/zh/querying/querying-the-hosted-service.mdx @@ -2,7 +2,7 @@ title: 查询托管服务 --- -部署子图后,请访问[托管服务](https://thegraph.com/hosted-service/) 以打开 [GraphiQL](https://github.com/graphql/graphiql) 界面,您可以在其中通过发出查询和查看数据模式来探索已经部署的子图的 GraphQL API。 +With the subgraph deployed, visit the [hosted service](https://thegraph.com/hosted-service/) to open up a [GraphiQL](https://github.com/graphql/graphiql) interface where you can explore the deployed GraphQL API for the subgraph by issuing queries and viewing the schema. 下面提供了一个示例,但请参阅 [查询 API](/querying/graphql-api) 以获取有关如何查询子图实体的完整参考。 @@ -19,9 +19,9 @@ title: 查询托管服务 } ``` -## 使用托管服务 +## Using the hosted service -Graph浏览器及其 GraphQL playground 是探索和查询托管服务上部署的子图的有用方式。 +The Graph Explorer and its GraphQL playground is a useful way to explore and query deployed subgraphs on the hosted service. 下面详细介绍了一些主要功能: diff --git a/website/pages/zh/querying/querying-with-python.mdx b/website/pages/zh/querying/querying-with-python.mdx new file mode 100644 index 000000000000..081203777879 --- /dev/null +++ b/website/pages/zh/querying/querying-with-python.mdx @@ -0,0 +1,56 @@ +--- +title: Subgrounds +--- + +Subgrounds is an intuitive Python library for querying subgraphs, built by [Playgrounds](https://playgrounds.network/). It allows you to directly connect subgraph data to a Python data environment, letting you use libraries like [pandas](https://pandas.pydata.org/) to perform data analysis! + +Subgrounds offers a simple Pythonic API for building GraphQL queries, automates tedious workflows such as pagination, and empowers advanced users through controlled schema transformations. + +## 开始 + +Subgrounds requires Python 3.10 or higher and is available on [pypi](https://pypi.org/project/subgrounds/). + +```bash +pip install --upgrade subgrounds +# or +python -m pip install --upgrade subgrounds +``` + +Once installed, you can test out subgrounds with the following query. The following example grabs a subgraph for the Aave v2 protocol and queries the top 5 markets ordered by TVL (Total Value Locked), selects their name and their TVL (in USD) and returns the data as a pandas [DataFrame](https://pandas.pydata.org/pandas-docs/dev/reference/api/pandas.DataFrame.html#pandas.DataFrame). + +```python +from subgrounds import Subgrounds + +sg = Subgrounds() + +# Load the subgraph +aave_v2 = sg.load_subgraph( + "https://api.thegraph.com/subgraphs/name/messari/aave-v2-ethereum") + +# Construct the query +latest_markets = aave_v2.Query.markets( + orderBy=aave_v2.Market.totalValueLockedUSD, + orderDirection='desc', + first=5, +) +# Return query to a dataframe +sg.query_df([ + latest_markets.name, + latest_markets.totalValueLockedUSD, +]) +``` + +## Documentation + +Subgrounds is built and maintained by the [Playgrounds](https://playgrounds.network/) team and can be accessed on the [Playgrounds docs](https://docs.playgrounds.network/subgrounds). + +Since subgrounds has a large feature set to explore, here are some helpful starting places: + +- [Getting Started with Querying](https://docs.playgrounds.network/subgrounds/getting_started/basics/) + - A good first step for how to build queries with subgrounds. +- [Building Synthetic Fields](https://docs.playgrounds.network/subgrounds/getting_started/synthetic_fields/) + - A gentle introduction to defining synthetic fields that transform data defined from the schema. +- [Concurrent Queries](https://docs.playgrounds.network/subgrounds/getting_started/async/) + - Learn how to level up your queries by parallelizing them. +- [Exporting Data to CSVs](https://docs.playgrounds.network/subgrounds/faq/exporting/) + - A quick article on how to seemlessly save your data as CSVs for further analysis. diff --git a/website/pages/zh/quick-start.mdx b/website/pages/zh/quick-start.mdx new file mode 100644 index 000000000000..80cf442c5207 --- /dev/null +++ b/website/pages/zh/quick-start.mdx @@ -0,0 +1,168 @@ +--- +title: 快速开始 +--- + +This guide will quickly take you through how to initialize, create, and deploy your subgraph to the Subgraph Studio or the [hosted service](#hosted-service). + +Ensure that your subgraph will be indexing data from a [supported network](/developing/supported-networks). + +本指南是在假设您具备以下条件的情况下编写的: + +- 您选择的网络上的智能合约地址 +- GRT来策划你的子图 +- 一个加密钱包 + +## 1. 在子图工作室中创建子图 + +Go to the [Subgraph Studio](https://thegraph.com/studio/) and connect your wallet. + +连接后,您可以单击“创建子图”开始。选择您选择的网络,然后单击“继续”。 + +## 2. 安装 Graph CLI + +The Graph CLI is written in JavaScript and you will need to have either `npm` or `yarn` installed to use it. + +在本地计算机上,运行以下命令之一: + +Using [npm](https://www.npmjs.com/): + +```sh +npm install -g @graphprotocol/graph-cli +``` + +Using [yarn](https://yarnpkg.com/): + +```sh +yarn global add @graphprotocol/graph-cli +``` + +## 3. 初始化子图 + +> You can find commands for your specific subgraph on the subgraph page in [Subgraph Studio](https://thegraph.com/studio/). + +初始化子图时,CLI工具会要求您提供以下信息: + +- 协议:选择子图索引数据的协议 +- 子图段塞:为您的子图创建一个名称。您的子图段塞是子图的标识符。 +- 创建子图的目录:选择您的本地目录 +- 以太坊网络(可选):您可能需要指定子图将从哪个EVM兼容网络索引数据 +- 合约地址:找到要查询数据的智能合约地址 +- ABI:如果ABI不是自动填充的,则需要将其手动输入为JSON文件 +- 起始区块:建议您在子图索引区块链数据时输入起始区块以节省时间。您可以通过查找部署合约区块来定位起始区块。 +- 合约名称:输入您的合约名称 +- 将合约事件作为实体进行索引:建议您将其设置为true,因为它将自动为每个发出的事件向子图添加映射 +- 添加其他合约(可选):您可以添加其他合约 + +通过运行以下命令从现有合约初始化子图: + +```sh +graph init --studio +``` + +请参阅下面的屏幕截图,以获取初始化子图时所需的示例: + +![Subgraph command](/img/subgraph-init-example.png) + +## 4. 编写子图 + +前面的命令创建了一个原始子图,可以将其用作构建子图的起点。当对子图进行更改时,将主要使用三个文件: + +- 清单(subgraph.yaml)--清单定义子图将索引哪些数据源。 +- 模式(schema.graphql)--GraphQL 模式定义从子图中检索到的数据。 +- AssemblyScript 映射(mapping.ts)--将数据源中的数据转换为模式中定义的实体的代码。 + +想了解更多如何编写子图的信息,请参阅[创建子图](/development/Creating-a-subgraph)。 + +## 5. 部署到子图工作室 + +一旦您的子图被编写好,请运行以下命令: + +```sh +$ graph codegen +$ graph build +``` + +- 认证并部署子图。部署密钥可以在子图工作室的子图页面上找到。 + +```sh +$ graph auth --studio +$ graph deploy --studio +``` + +You will be asked for a version label. It's strongly recommended to use [semver](https://semver.org/) for versioning like `0.0.1`. That said, you are free to choose any string as version such as:`v1`, `version1`, `asdf`. + +## 6. 测试子图 + +您可以通过在playground部分进行示例查询来测试子图。 + +日志会告诉你你的子图是否有任何错误。操作子图的日志如下所示: + +![Subgraph logs](/img/subgraph-logs-image.png) + +If your subgraph is failing, you can query the subgraph health by using the GraphiQL Playground. Note that you can leverage the query below and input your deployment ID for your subgraph. In this case, `Qm...` is the deployment ID (which can be located on the Subgraph page under **Details**). The query below will tell you when a subgraph fails, so you can debug accordingly: + +```graphql +{ + indexingStatuses(subgraphs: ["Qm..."]) { + node + synced + health + fatalError { + message + block { + number + hash + } + handler + } + nonFatalErrors { + message + block { + number + hash + } + handler + } + chains { + network + chainHeadBlock { + number + } + earliestBlock { + number + } + latestBlock { + number + } + lastHealthyBlock { + number + } + } + entityCount + } +} +``` + +## 7. 将你的子图发布到Graph的去中心化网络 + +一旦你的子图被部署到子图工作室,你已经测试了它,并准备把它投入生产,你就可以把它发布到去中心化的网络。 + +在子图工作室中,单击您的子图。在子图的页面上,您可以单击右上角的发布按钮。 + +Select the network you would like to publish your subgraph to. It is recommended to publish subgraphs to Arbitrum One to take advantage of the [faster transaction speeds and lower gas costs](/arbitrum/arbitrum-faq). + +在你可以查询你的子图之前,索引人需要开始在上面提供查询。为了简化这个过程,你可以使用GRT来策展你自己的子图。 + +在撰写本文时,建议您用10000 GRT策展自己的子图,以确保它被索引并可尽快查询。 + +为了节省gas成本,您可以在将子图发布到Graph的去中心化网络时选择此按钮,在发布子图的同一交易中策展子图: + +![Subgraph publish](/img/publish-and-signal-tx.png) + +## 8. 查询子图 + +现在,您可以通过将GraphQL查询发送到子图的查询URL来查询子图,您可以单击查询按钮找到该查询URL。 + +如果你没有你的API密钥,你可以通过免费的、速率有限的临时查询URL从你的去中心化应用查询,该URL可用于开发和暂存。 + +For more information about querying data from your subgraph, read more [here](../querying/querying-the-graph/). diff --git a/website/pages/zh/substreams.mdx b/website/pages/zh/substreams.mdx index daec1985df1e..5f9e111a089e 100644 --- a/website/pages/zh/substreams.mdx +++ b/website/pages/zh/substreams.mdx @@ -1,9 +1,44 @@ --- -title: Substreams +title: 子流 --- -Substreams is a new technology developed by The Graph protocol core developers, built to enable extremely fast consumption and processing of indexed blockchain data. Substreams are currently in open beta, available for testing and development across multiple blockchains. +![Substreams Logo](/img/substreams-logo.png) -访问[子流文档](https://substreams.streamingfast.io/)以了解更多信息并开始构建子流。 +Substreams is a powerful blockchain indexing technology, developed for The Graph Network. Substreams enables developers to write Rust modules, composing data streams alongside the community, and provides extremely high-performance indexing by virtue of parallelization, in a streaming-first fashion. - +With Substreams, you can extract data from different blockchains (Ethereum, BNB, Solana...) ultra-fast! Then, you can send to data to several locations (a Postgres database, a Mongo database, or a Subgraph). + +## How Substreams Works in 4 Steps + +1. **You write a Rust program, which defines the transformations that you want to apply to the blockchain data.** For example, the following Rust function extracts relevant information from an Ethereum block (number, hash, and parent hash). + +```rust +fn get_my_block(blk: Block) -> Result { + let header = blk.header.as_ref().unwrap(); + + Ok(MyBlock { + number: blk.number, + hash: Hex::encode(&blk.hash), + parent_hash: Hex::encode(&header.parent_hash), + }) +} +``` + +2. **You wrap up your Rust program into a WASM module just by running a single CLI command.** + +3. **The WASM container is sent to a Substreams endpoint for execution.** The Substreams provider feeds the WASM container with the blockchain data and the transformations are applied. + +4. **You select a [sink](https://substreams.streamingfast.io/developers-guide/sink-targets), a place where you want to send the transformed data** (a Postgres database or a Subgraph, for example). + +## Substreams Documentation + +The official Substreams documentation is currently maintained by the StreamingFast team [on the StreamingFast website](https://substreams.streamingfast.io/). + +### 开始 + +- In order to develop and deploy a Substreams, [you must install the Substreams CLI](https://substreams.streamingfast.io/getting-started/installing-the-cli). +- Then, run your first Substreams by following the [Quickstart Tutorial](https://substreams.streamingfast.io/getting-started/quickstart). + +### Expand Your Knowledge + +- Take a look at the [Ethereum Explorer Tutorial](https://substreams.streamingfast.io/tutorials/overview/) to learn about the basic transformations you can create with Substreams. diff --git a/website/pages/zh/sunrise.mdx b/website/pages/zh/sunrise.mdx new file mode 100644 index 000000000000..20e06d584e4b --- /dev/null +++ b/website/pages/zh/sunrise.mdx @@ -0,0 +1,113 @@ +--- +title: Sunrise of decentralized data FAQ +--- + +> Note: this document is continually updated to ensure the most accurate and helpful information is provided. New questions and answers are added on a regular basis. If you can’t find the information you’re looking for, or if you require immediate assistance [reach out on Discord](https://discord.gg/vtvv7FP). + +## What is the sunrise of decentralized data? + +The sunrise of decentralized data is an initiative spearheaded by Edge & Node, working on The Graph. The goal is to seamlessly enable subgraph developers and data consumers to upgrade to The Graph’s decentralized network. + +This plan draws on many previous developments from The Graph ecosystem, including an upgrade Indexer to serve queries on newly published subgraphs, and the ability to integrate new blockchain networks to The Graph. + +### What are the phases of the sunrise of decentralized data? + +**Sunray**: Enable support for hosted service chains, offer a seamless upgrade flow, offer a free plan on The Graph Network.\ +**Sunbeam**: The upgrade window that subgraph developers will have to upgrade their subgraphs to The Graph Network. The start and length of this will be announced soon.\ +**Sunrise**: Hosted service endpoints will expire as traffic moves to The Graph Network. + +### Do I need to run my own infrastructure? + +No, all infrastructure is operated by independent Indexers on The Graph Network, including the upgrade Indexer ([read more below](#what-is-an-upgrade-indexer)). + +You can use the [Subgraph Studio](https://thegraph.com/studio/) to create, test, and publish your subgraph. All hosted service users are encouraged to upgrade their subgraphs to The Graph Network. The upgrade Indexer ensures you can query your subgraph even without curation signal. + +Once your subgraph has reached adequate curation signal and other Indexers begin supporting it, the upgrade Indexer will gradually taper off, allowing other Indexers to collect indexing rewards and query fees. + +### Should I host my own indexing infrastructure? + +Running infrastructure for your own project is [significantly more resource intensive](/network/benefits/) when compared to using The Graph Network. + +Additionally, The Graph Network is significantly more robust, reliable, and cost-efficient than anything provided by a single organization or team. Hundreds of independent Indexers around the world power The Graph Network, ensuring safety, security and redundancy. + +That being said, if you’re still interested in running a [Graph Node](https://github.com/graphprotocol/graph-node), consider joining The Graph Network [as an Indexer](https://thegraph.com/blog/how-to-become-indexer/) to earn indexing rewards and query fees by serving data on your subgraph and others. + +### Should I use a centralized indexing provider? + +If you are building in web3, the moment you use a centralized indexing provider, you are giving them control of your dapp and data. The Graph’s decentralized network offers [superior quality of service](https://thegraph.com/blog/qos-the-graph-network/), reliability with unbeatable uptime thanks to node redundancy, as well as significantly [lower costs](/network/benefits/), and you won’t be held hostage at the data layer. + +With The Graph Network, your subgraph is public and anyone can query it openly, which increases the usage and network effects of your dapp. With a centralized indexing solution, the subgraph is private to the centralized provider. + +Here's a detailed breakdown of the benefits of The Graph over centralized hosting: + +- **Resilience and Redundancy**: Decentralized systems are inherently more robust and resilient due to their distributed nature. Data isn't stored on a single server or location. Instead, it's served by hundreds of independent Indexers around the globe. This reduces the risk of data loss or service interruptions if one node fails, leading to exceptional uptime (99.99%). + +- **Quality of Service**: In addition to the impressive uptime, The Graph Network features a ~106ms median query speed (latency), and higher query success rates compared to hosted alternatives. Read more in [this blog](https://thegraph.com/blog/qos-the-graph-network/). + +- **Censorship Resistance**: Centralized systems can become targets for censorship, either through regulatory pressures or network attacks. In contrast, decentralized systems, due to their dispersed architecture, are much harder to censor, ensuring continuous data availability. + +- **Transparency and Trust**: Decentralized systems operate openly, enabling anyone to independently verify the data. This transparency builds trust among network participants, as they can verify the system's integrity without relying on a central authority. + +Just as you've chosen your blockchain network for its decentralized nature, security, and transparency, opting for The Graph Network is an extension of those same principles. By aligning your data infrastructure with these values, you ensure a cohesive, resilient, and trust-driven development environment. + +### Will my hosted service subgraph be supported by the upgrade Indexer? + +Yes, the upgrade Indexer will support all hosted service subgraphs published to The Graph Network. + +However, some subgraphs may not be eligible for indexing rewards, and as a result, may have difficulty attracting further Indexers. For example, indexing rewards may not be available for subgraphs on certain chains. Members from these blockchain communities are encouraged to integrate their chain through the [Chain Integration Process](/chain-integration-overview/). + +## What is an upgrade Indexer? + +### What does "upgrade Indexer" mean? + +It is designed to improve the experience of upgrading subgraphs from the hosted service to The Graph Network and supporting new versions of existing subgraphs that have not yet been indexed. + +The upgrade Indexer is aimed at bootstrapping chains that do not yet have indexing rewards on the network, as well as a fallback for new subgraph versions. The goal is to ensure that an Indexer is available to serve queries as quickly as possible after a subgraph is published. + +### What chains will the upgrade Indexer support? + +The upgrade Indexer will support chains that are currently only available on the hosted service. This will include many hosted service subgraphs that have already been synced. + +Find a comprehensive list of supported chains [here](/developing/supported-networks/). + +### Why is Edge & Node running the upgrade Indexer? + +Edge and Node has historically maintained the hosted service and, as a result, has already synced data for hosted service subgraphs. + +Any and all Indexers are encouraged to become upgrade Indexers as well. However, note that operating an upgrade Indexer is largely provided as a public service to support new subgraphs and additional chains due to the lack of indexing rewards before they are approved by The Graph Council. + +### What does this mean for existing Indexers? + +Chains that are currently exclusively supported on the hosted service will be made available to developers on The Graph without indexing rewards at first, though this does unlock query fees for any Indexer that is interested. This is expected to lead to an increase in the number of subgraphs being published on the network, providing more opportunities for Indexers to index and serve these subgraphs in return for query fees, even before indexing rewards are enabled for a chain. + +The upgrade Indexer also provides the Indexer community with information about potential demand for subgraphs and new chains on The Graph Network. + +### What does this mean for Delegators? + +The upgrade Indexer offers a powerful opportunity for Delegators. As more subgraphs are upgraded from the hosted service to The Graph Network, Delegators stand to benefit from the increased network activity. + +### Will the upgrade Indexer compete with existing Indexers for rewards? + +No, the upgrade Indexer will only allocate the minimum amount per subgraph and will not collect indexing rewards. + +It operates on an “as needed” basis, and serves as a fallback until sufficient service quality is achieved by at least 3 other Indexers in the network for respective chains and subgraphs. + +### How will this affect subgraph developers? + +Subgraph developers will be able to query their subgraphs on the network almost immediately after upgrading them from the hosted service or publishing them from the Subgraph Studio, as no lead time will be required for indexing. + +### How does this benefit data consumers? + +The upgrade Indexer enables chains on the network that are currently only supported on the hosted service. Therefore, it widens the scope and availability of data that can be queried on the network. + +### How will the upgrade Indexer price queries? + +The upgrade Indexer will price queries at the market rate so as not to influence the query fee market. + +### What are the criteria for the upgrade Indexer to stop supporting a subgraph? + +The upgrade Indexer will serve a subgraph until it is sufficiently and successfully served with consistent queries served by at least 3 other Indexers. + +Furthermore, the upgrade Indexer will stop supporting a subgraph if it has not been queried in the last 30 days. + +Other Indexers are incentivized to support subgraphs with ongoing query volume, so the query volume to the upgrade Indexer should trend towards zero because the Indexer will have a small allocation size and other Indexers will be chosen for queries ahead of the upgrade Indexer. diff --git a/website/pages/zh/tokenomics.mdx b/website/pages/zh/tokenomics.mdx index 7f42844abbb1..5c3af861018c 100644 --- a/website/pages/zh/tokenomics.mdx +++ b/website/pages/zh/tokenomics.mdx @@ -11,7 +11,7 @@ Graph是一种去中心化协议,可以轻松访问区块链数据。 它类似于B2B2C模式,只是由去中心化的参与者网络提供支持。网络参与者共同向最终用户提供数据,以换取GRT奖励。GRT是协调数据提供者和消费者的工作工具代币。GRT作为协调网络内数据提供商和消费者的工具,并激励协议参与者有效组织数据。 -通过使用Graph,用户可以轻松地从区块链访问数据,只需支付他们所需的特定信息。如今,web3生态系统中的许多[流行应用程序](https://thegraph.com/explorer)都在使用。 +By using The Graph, users can easily access data from the blockchain, paying only for the specific information they need. The Graph is used by many [popular dapps](https://thegraph.com/explorer) in the web3 ecosystem today. Graph索引区块链数据的方式与Google索引web的方式类似。事实上,您可能已经在使用Graph而没有意识到它。如果您查看了从子图获取数据的dapp的前端,那么您已经从子图查询数据! @@ -75,7 +75,7 @@ Graph索引区块链数据的方式与Google索引web的方式类似。事实上 索引人可以通过两种方式获得GRT奖励: -1. 查询费用:开发者或用户为子图数据查询支付的GRT。查询费用存入回扣池并分配给索引人。 +1. Query fees: GRT paid by developers or users for subgraph data queries. Query fees are directly distributed to Indexers according to the exponential rebate function (see GIP [here](https://forum.thegraph.com/t/gip-0051-exponential-query-fee-rebates-for-indexers/4162)). 2. 索引奖励:每年3%的发行量根据索引子图的数量分配给索引者。这些奖励激励索引者对子图进行索引,有时在查询费用开始之前,累积并提交索引证明(POI),以验证他们是否已准确索引数据。 diff --git a/website/route-lockfile.txt b/website/route-lockfile.txt index 4b6085ce1fe8..4cea12483d5a 100644 --- a/website/route-lockfile.txt +++ b/website/route-lockfile.txt @@ -6,6 +6,7 @@ /ar/arbitrum/l2-transfer-tools-faq/ /ar/arbitrum/l2-transfer-tools-guide/ /ar/billing/ +/ar/chain-integration-overview/ /ar/cookbook/arweave/ /ar/cookbook/base-testnet/ /ar/cookbook/cosmos/ @@ -24,6 +25,8 @@ /ar/developing/assemblyscript-api/ /ar/developing/creating-a-subgraph/ /ar/developing/developer-faqs/ +/ar/developing/graph-ts/api/ +/ar/developing/graph-ts/common-issues/ /ar/developing/substreams-powered-subgraphs-faq/ /ar/developing/supported-networks/ /ar/developing/unit-testing-framework/ @@ -51,15 +54,19 @@ /ar/querying/querying-from-an-application/ /ar/querying/querying-the-graph/ /ar/querying/querying-the-hosted-service/ +/ar/querying/querying-with-python/ +/ar/quick-start/ /ar/release-notes/assemblyscript-migration-guide/ /ar/release-notes/graphql-validations-migration-guide/ /ar/substreams/ +/ar/sunrise/ /ar/tokenomics/ /cs/about/ /cs/arbitrum/arbitrum-faq/ /cs/arbitrum/l2-transfer-tools-faq/ /cs/arbitrum/l2-transfer-tools-guide/ /cs/billing/ +/cs/chain-integration-overview/ /cs/cookbook/arweave/ /cs/cookbook/base-testnet/ /cs/cookbook/cosmos/ @@ -78,9 +85,12 @@ /cs/developing/assemblyscript-api/ /cs/developing/creating-a-subgraph/ /cs/developing/developer-faqs/ +/cs/developing/graph-ts/api/ +/cs/developing/graph-ts/common-issues/ /cs/developing/substreams-powered-subgraphs-faq/ /cs/developing/supported-networks/ /cs/developing/unit-testing-framework/ +/cs/firehose/ /cs/glossary/ /cs/graphcast/ /cs/managing/deprecating-a-subgraph/ @@ -104,14 +114,19 @@ /cs/querying/querying-from-an-application/ /cs/querying/querying-the-graph/ /cs/querying/querying-the-hosted-service/ +/cs/querying/querying-with-python/ +/cs/quick-start/ /cs/release-notes/assemblyscript-migration-guide/ /cs/release-notes/graphql-validations-migration-guide/ +/cs/substreams/ +/cs/sunrise/ /cs/tokenomics/ /de/about/ /de/arbitrum/arbitrum-faq/ /de/arbitrum/l2-transfer-tools-faq/ /de/arbitrum/l2-transfer-tools-guide/ /de/billing/ +/de/chain-integration-overview/ /de/cookbook/arweave/ /de/cookbook/base-testnet/ /de/cookbook/cosmos/ @@ -130,6 +145,8 @@ /de/developing/assemblyscript-api/ /de/developing/creating-a-subgraph/ /de/developing/developer-faqs/ +/de/developing/graph-ts/api/ +/de/developing/graph-ts/common-issues/ /de/developing/substreams-powered-subgraphs-faq/ /de/developing/supported-networks/ /de/developing/unit-testing-framework/ @@ -157,9 +174,12 @@ /de/querying/querying-from-an-application/ /de/querying/querying-the-graph/ /de/querying/querying-the-hosted-service/ +/de/querying/querying-with-python/ +/de/quick-start/ /de/release-notes/assemblyscript-migration-guide/ /de/release-notes/graphql-validations-migration-guide/ /de/substreams/ +/de/sunrise/ /de/tokenomics/ /en/ /en/404/ @@ -237,6 +257,7 @@ /es/arbitrum/l2-transfer-tools-faq/ /es/arbitrum/l2-transfer-tools-guide/ /es/billing/ +/es/chain-integration-overview/ /es/cookbook/arweave/ /es/cookbook/base-testnet/ /es/cookbook/cosmos/ @@ -255,6 +276,8 @@ /es/developing/assemblyscript-api/ /es/developing/creating-a-subgraph/ /es/developing/developer-faqs/ +/es/developing/graph-ts/api/ +/es/developing/graph-ts/common-issues/ /es/developing/substreams-powered-subgraphs-faq/ /es/developing/supported-networks/ /es/developing/unit-testing-framework/ @@ -282,15 +305,19 @@ /es/querying/querying-from-an-application/ /es/querying/querying-the-graph/ /es/querying/querying-the-hosted-service/ +/es/querying/querying-with-python/ +/es/quick-start/ /es/release-notes/assemblyscript-migration-guide/ /es/release-notes/graphql-validations-migration-guide/ /es/substreams/ +/es/sunrise/ /es/tokenomics/ /fr/about/ /fr/arbitrum/arbitrum-faq/ /fr/arbitrum/l2-transfer-tools-faq/ /fr/arbitrum/l2-transfer-tools-guide/ /fr/billing/ +/fr/chain-integration-overview/ /fr/cookbook/arweave/ /fr/cookbook/base-testnet/ /fr/cookbook/cosmos/ @@ -309,6 +336,8 @@ /fr/developing/assemblyscript-api/ /fr/developing/creating-a-subgraph/ /fr/developing/developer-faqs/ +/fr/developing/graph-ts/api/ +/fr/developing/graph-ts/common-issues/ /fr/developing/substreams-powered-subgraphs-faq/ /fr/developing/supported-networks/ /fr/developing/unit-testing-framework/ @@ -336,15 +365,19 @@ /fr/querying/querying-from-an-application/ /fr/querying/querying-the-graph/ /fr/querying/querying-the-hosted-service/ +/fr/querying/querying-with-python/ +/fr/quick-start/ /fr/release-notes/assemblyscript-migration-guide/ /fr/release-notes/graphql-validations-migration-guide/ /fr/substreams/ +/fr/sunrise/ /fr/tokenomics/ /ha/about/ /ha/arbitrum/arbitrum-faq/ /ha/arbitrum/l2-transfer-tools-faq/ /ha/arbitrum/l2-transfer-tools-guide/ /ha/billing/ +/ha/chain-integration-overview/ /ha/cookbook/arweave/ /ha/cookbook/base-testnet/ /ha/cookbook/cosmos/ @@ -363,9 +396,12 @@ /ha/developing/assemblyscript-api/ /ha/developing/creating-a-subgraph/ /ha/developing/developer-faqs/ +/ha/developing/graph-ts/api/ +/ha/developing/graph-ts/common-issues/ /ha/developing/substreams-powered-subgraphs-faq/ /ha/developing/supported-networks/ /ha/developing/unit-testing-framework/ +/ha/firehose/ /ha/glossary/ /ha/graphcast/ /ha/managing/deprecating-a-subgraph/ @@ -389,8 +425,12 @@ /ha/querying/querying-from-an-application/ /ha/querying/querying-the-graph/ /ha/querying/querying-the-hosted-service/ +/ha/querying/querying-with-python/ +/ha/quick-start/ /ha/release-notes/assemblyscript-migration-guide/ /ha/release-notes/graphql-validations-migration-guide/ +/ha/substreams/ +/ha/sunrise/ /ha/tokenomics/ /hi/ /hi/404/ @@ -399,6 +439,7 @@ /hi/arbitrum/l2-transfer-tools-faq/ /hi/arbitrum/l2-transfer-tools-guide/ /hi/billing/ +/hi/chain-integration-overview/ /hi/cookbook/arweave/ /hi/cookbook/base-testnet/ /hi/cookbook/cosmos/ @@ -417,6 +458,8 @@ /hi/developing/assemblyscript-api/ /hi/developing/creating-a-subgraph/ /hi/developing/developer-faqs/ +/hi/developing/graph-ts/api/ +/hi/developing/graph-ts/common-issues/ /hi/developing/substreams-powered-subgraphs-faq/ /hi/developing/supported-networks/ /hi/developing/unit-testing-framework/ @@ -444,15 +487,19 @@ /hi/querying/querying-from-an-application/ /hi/querying/querying-the-graph/ /hi/querying/querying-the-hosted-service/ +/hi/querying/querying-with-python/ +/hi/quick-start/ /hi/release-notes/assemblyscript-migration-guide/ /hi/release-notes/graphql-validations-migration-guide/ /hi/substreams/ +/hi/sunrise/ /hi/tokenomics/ /it/about/ /it/arbitrum/arbitrum-faq/ /it/arbitrum/l2-transfer-tools-faq/ /it/arbitrum/l2-transfer-tools-guide/ /it/billing/ +/it/chain-integration-overview/ /it/cookbook/arweave/ /it/cookbook/base-testnet/ /it/cookbook/cosmos/ @@ -471,6 +518,8 @@ /it/developing/assemblyscript-api/ /it/developing/creating-a-subgraph/ /it/developing/developer-faqs/ +/it/developing/graph-ts/api/ +/it/developing/graph-ts/common-issues/ /it/developing/substreams-powered-subgraphs-faq/ /it/developing/supported-networks/ /it/developing/unit-testing-framework/ @@ -498,9 +547,12 @@ /it/querying/querying-from-an-application/ /it/querying/querying-the-graph/ /it/querying/querying-the-hosted-service/ +/it/querying/querying-with-python/ +/it/quick-start/ /it/release-notes/assemblyscript-migration-guide/ /it/release-notes/graphql-validations-migration-guide/ /it/substreams/ +/it/sunrise/ /it/tokenomics/ /ja/ /ja/404/ @@ -509,6 +561,7 @@ /ja/arbitrum/l2-transfer-tools-faq/ /ja/arbitrum/l2-transfer-tools-guide/ /ja/billing/ +/ja/chain-integration-overview/ /ja/cookbook/arweave/ /ja/cookbook/base-testnet/ /ja/cookbook/cosmos/ @@ -527,6 +580,8 @@ /ja/developing/assemblyscript-api/ /ja/developing/creating-a-subgraph/ /ja/developing/developer-faqs/ +/ja/developing/graph-ts/api/ +/ja/developing/graph-ts/common-issues/ /ja/developing/substreams-powered-subgraphs-faq/ /ja/developing/supported-networks/ /ja/developing/unit-testing-framework/ @@ -554,15 +609,19 @@ /ja/querying/querying-from-an-application/ /ja/querying/querying-the-graph/ /ja/querying/querying-the-hosted-service/ +/ja/querying/querying-with-python/ +/ja/quick-start/ /ja/release-notes/assemblyscript-migration-guide/ /ja/release-notes/graphql-validations-migration-guide/ /ja/substreams/ +/ja/sunrise/ /ja/tokenomics/ /ko/about/ /ko/arbitrum/arbitrum-faq/ /ko/arbitrum/l2-transfer-tools-faq/ /ko/arbitrum/l2-transfer-tools-guide/ /ko/billing/ +/ko/chain-integration-overview/ /ko/cookbook/arweave/ /ko/cookbook/base-testnet/ /ko/cookbook/cosmos/ @@ -581,6 +640,8 @@ /ko/developing/assemblyscript-api/ /ko/developing/creating-a-subgraph/ /ko/developing/developer-faqs/ +/ko/developing/graph-ts/api/ +/ko/developing/graph-ts/common-issues/ /ko/developing/substreams-powered-subgraphs-faq/ /ko/developing/supported-networks/ /ko/developing/unit-testing-framework/ @@ -608,9 +669,12 @@ /ko/querying/querying-from-an-application/ /ko/querying/querying-the-graph/ /ko/querying/querying-the-hosted-service/ +/ko/querying/querying-with-python/ +/ko/quick-start/ /ko/release-notes/assemblyscript-migration-guide/ /ko/release-notes/graphql-validations-migration-guide/ /ko/substreams/ +/ko/sunrise/ /ko/tokenomics/ /mr/ /mr/404/ @@ -619,6 +683,7 @@ /mr/arbitrum/l2-transfer-tools-faq/ /mr/arbitrum/l2-transfer-tools-guide/ /mr/billing/ +/mr/chain-integration-overview/ /mr/cookbook/arweave/ /mr/cookbook/base-testnet/ /mr/cookbook/cosmos/ @@ -637,6 +702,8 @@ /mr/developing/assemblyscript-api/ /mr/developing/creating-a-subgraph/ /mr/developing/developer-faqs/ +/mr/developing/graph-ts/api/ +/mr/developing/graph-ts/common-issues/ /mr/developing/substreams-powered-subgraphs-faq/ /mr/developing/supported-networks/ /mr/developing/unit-testing-framework/ @@ -664,15 +731,19 @@ /mr/querying/querying-from-an-application/ /mr/querying/querying-the-graph/ /mr/querying/querying-the-hosted-service/ +/mr/querying/querying-with-python/ +/mr/quick-start/ /mr/release-notes/assemblyscript-migration-guide/ /mr/release-notes/graphql-validations-migration-guide/ /mr/substreams/ +/mr/sunrise/ /mr/tokenomics/ /nl/about/ /nl/arbitrum/arbitrum-faq/ /nl/arbitrum/l2-transfer-tools-faq/ /nl/arbitrum/l2-transfer-tools-guide/ /nl/billing/ +/nl/chain-integration-overview/ /nl/cookbook/arweave/ /nl/cookbook/base-testnet/ /nl/cookbook/cosmos/ @@ -691,6 +762,8 @@ /nl/developing/assemblyscript-api/ /nl/developing/creating-a-subgraph/ /nl/developing/developer-faqs/ +/nl/developing/graph-ts/api/ +/nl/developing/graph-ts/common-issues/ /nl/developing/substreams-powered-subgraphs-faq/ /nl/developing/supported-networks/ /nl/developing/unit-testing-framework/ @@ -718,15 +791,19 @@ /nl/querying/querying-from-an-application/ /nl/querying/querying-the-graph/ /nl/querying/querying-the-hosted-service/ +/nl/querying/querying-with-python/ +/nl/quick-start/ /nl/release-notes/assemblyscript-migration-guide/ /nl/release-notes/graphql-validations-migration-guide/ /nl/substreams/ +/nl/sunrise/ /nl/tokenomics/ /pl/about/ /pl/arbitrum/arbitrum-faq/ /pl/arbitrum/l2-transfer-tools-faq/ /pl/arbitrum/l2-transfer-tools-guide/ /pl/billing/ +/pl/chain-integration-overview/ /pl/cookbook/arweave/ /pl/cookbook/base-testnet/ /pl/cookbook/cosmos/ @@ -745,6 +822,8 @@ /pl/developing/assemblyscript-api/ /pl/developing/creating-a-subgraph/ /pl/developing/developer-faqs/ +/pl/developing/graph-ts/api/ +/pl/developing/graph-ts/common-issues/ /pl/developing/substreams-powered-subgraphs-faq/ /pl/developing/supported-networks/ /pl/developing/unit-testing-framework/ @@ -772,9 +851,12 @@ /pl/querying/querying-from-an-application/ /pl/querying/querying-the-graph/ /pl/querying/querying-the-hosted-service/ +/pl/querying/querying-with-python/ +/pl/quick-start/ /pl/release-notes/assemblyscript-migration-guide/ /pl/release-notes/graphql-validations-migration-guide/ /pl/substreams/ +/pl/sunrise/ /pl/tokenomics/ /pt/ /pt/404/ @@ -783,6 +865,7 @@ /pt/arbitrum/l2-transfer-tools-faq/ /pt/arbitrum/l2-transfer-tools-guide/ /pt/billing/ +/pt/chain-integration-overview/ /pt/cookbook/arweave/ /pt/cookbook/base-testnet/ /pt/cookbook/cosmos/ @@ -801,6 +884,8 @@ /pt/developing/assemblyscript-api/ /pt/developing/creating-a-subgraph/ /pt/developing/developer-faqs/ +/pt/developing/graph-ts/api/ +/pt/developing/graph-ts/common-issues/ /pt/developing/substreams-powered-subgraphs-faq/ /pt/developing/supported-networks/ /pt/developing/unit-testing-framework/ @@ -828,15 +913,19 @@ /pt/querying/querying-from-an-application/ /pt/querying/querying-the-graph/ /pt/querying/querying-the-hosted-service/ +/pt/querying/querying-with-python/ +/pt/quick-start/ /pt/release-notes/assemblyscript-migration-guide/ /pt/release-notes/graphql-validations-migration-guide/ /pt/substreams/ +/pt/sunrise/ /pt/tokenomics/ /ro/about/ /ro/arbitrum/arbitrum-faq/ /ro/arbitrum/l2-transfer-tools-faq/ /ro/arbitrum/l2-transfer-tools-guide/ /ro/billing/ +/ro/chain-integration-overview/ /ro/cookbook/arweave/ /ro/cookbook/base-testnet/ /ro/cookbook/cosmos/ @@ -855,9 +944,12 @@ /ro/developing/assemblyscript-api/ /ro/developing/creating-a-subgraph/ /ro/developing/developer-faqs/ +/ro/developing/graph-ts/api/ +/ro/developing/graph-ts/common-issues/ /ro/developing/substreams-powered-subgraphs-faq/ /ro/developing/supported-networks/ /ro/developing/unit-testing-framework/ +/ro/firehose/ /ro/glossary/ /ro/graphcast/ /ro/managing/deprecating-a-subgraph/ @@ -881,8 +973,12 @@ /ro/querying/querying-from-an-application/ /ro/querying/querying-the-graph/ /ro/querying/querying-the-hosted-service/ +/ro/querying/querying-with-python/ +/ro/quick-start/ /ro/release-notes/assemblyscript-migration-guide/ /ro/release-notes/graphql-validations-migration-guide/ +/ro/substreams/ +/ro/sunrise/ /ro/tokenomics/ /ru/ /ru/404/ @@ -891,6 +987,7 @@ /ru/arbitrum/l2-transfer-tools-faq/ /ru/arbitrum/l2-transfer-tools-guide/ /ru/billing/ +/ru/chain-integration-overview/ /ru/cookbook/arweave/ /ru/cookbook/base-testnet/ /ru/cookbook/cosmos/ @@ -909,6 +1006,8 @@ /ru/developing/assemblyscript-api/ /ru/developing/creating-a-subgraph/ /ru/developing/developer-faqs/ +/ru/developing/graph-ts/api/ +/ru/developing/graph-ts/common-issues/ /ru/developing/substreams-powered-subgraphs-faq/ /ru/developing/supported-networks/ /ru/developing/unit-testing-framework/ @@ -936,15 +1035,21 @@ /ru/querying/querying-from-an-application/ /ru/querying/querying-the-graph/ /ru/querying/querying-the-hosted-service/ +/ru/querying/querying-with-python/ +/ru/quick-start/ /ru/release-notes/assemblyscript-migration-guide/ /ru/release-notes/graphql-validations-migration-guide/ /ru/substreams/ +/ru/sunrise/ /ru/tokenomics/ +/sv/ +/sv/404/ /sv/about/ /sv/arbitrum/arbitrum-faq/ /sv/arbitrum/l2-transfer-tools-faq/ /sv/arbitrum/l2-transfer-tools-guide/ /sv/billing/ +/sv/chain-integration-overview/ /sv/cookbook/arweave/ /sv/cookbook/base-testnet/ /sv/cookbook/cosmos/ @@ -963,6 +1068,8 @@ /sv/developing/assemblyscript-api/ /sv/developing/creating-a-subgraph/ /sv/developing/developer-faqs/ +/sv/developing/graph-ts/api/ +/sv/developing/graph-ts/common-issues/ /sv/developing/substreams-powered-subgraphs-faq/ /sv/developing/supported-networks/ /sv/developing/unit-testing-framework/ @@ -990,15 +1097,21 @@ /sv/querying/querying-from-an-application/ /sv/querying/querying-the-graph/ /sv/querying/querying-the-hosted-service/ +/sv/querying/querying-with-python/ +/sv/quick-start/ /sv/release-notes/assemblyscript-migration-guide/ /sv/release-notes/graphql-validations-migration-guide/ /sv/substreams/ +/sv/sunrise/ /sv/tokenomics/ +/tr/ +/tr/404/ /tr/about/ /tr/arbitrum/arbitrum-faq/ /tr/arbitrum/l2-transfer-tools-faq/ /tr/arbitrum/l2-transfer-tools-guide/ /tr/billing/ +/tr/chain-integration-overview/ /tr/cookbook/arweave/ /tr/cookbook/base-testnet/ /tr/cookbook/cosmos/ @@ -1017,6 +1130,8 @@ /tr/developing/assemblyscript-api/ /tr/developing/creating-a-subgraph/ /tr/developing/developer-faqs/ +/tr/developing/graph-ts/api/ +/tr/developing/graph-ts/common-issues/ /tr/developing/substreams-powered-subgraphs-faq/ /tr/developing/supported-networks/ /tr/developing/unit-testing-framework/ @@ -1044,15 +1159,19 @@ /tr/querying/querying-from-an-application/ /tr/querying/querying-the-graph/ /tr/querying/querying-the-hosted-service/ +/tr/querying/querying-with-python/ +/tr/quick-start/ /tr/release-notes/assemblyscript-migration-guide/ /tr/release-notes/graphql-validations-migration-guide/ /tr/substreams/ +/tr/sunrise/ /tr/tokenomics/ /uk/about/ /uk/arbitrum/arbitrum-faq/ /uk/arbitrum/l2-transfer-tools-faq/ /uk/arbitrum/l2-transfer-tools-guide/ /uk/billing/ +/uk/chain-integration-overview/ /uk/cookbook/arweave/ /uk/cookbook/base-testnet/ /uk/cookbook/cosmos/ @@ -1071,6 +1190,8 @@ /uk/developing/assemblyscript-api/ /uk/developing/creating-a-subgraph/ /uk/developing/developer-faqs/ +/uk/developing/graph-ts/api/ +/uk/developing/graph-ts/common-issues/ /uk/developing/substreams-powered-subgraphs-faq/ /uk/developing/supported-networks/ /uk/developing/unit-testing-framework/ @@ -1098,9 +1219,12 @@ /uk/querying/querying-from-an-application/ /uk/querying/querying-the-graph/ /uk/querying/querying-the-hosted-service/ +/uk/querying/querying-with-python/ +/uk/quick-start/ /uk/release-notes/assemblyscript-migration-guide/ /uk/release-notes/graphql-validations-migration-guide/ /uk/substreams/ +/uk/sunrise/ /uk/tokenomics/ /ur/ /ur/404/ @@ -1109,6 +1233,7 @@ /ur/arbitrum/l2-transfer-tools-faq/ /ur/arbitrum/l2-transfer-tools-guide/ /ur/billing/ +/ur/chain-integration-overview/ /ur/cookbook/arweave/ /ur/cookbook/base-testnet/ /ur/cookbook/cosmos/ @@ -1127,6 +1252,8 @@ /ur/developing/assemblyscript-api/ /ur/developing/creating-a-subgraph/ /ur/developing/developer-faqs/ +/ur/developing/graph-ts/api/ +/ur/developing/graph-ts/common-issues/ /ur/developing/substreams-powered-subgraphs-faq/ /ur/developing/supported-networks/ /ur/developing/unit-testing-framework/ @@ -1154,15 +1281,19 @@ /ur/querying/querying-from-an-application/ /ur/querying/querying-the-graph/ /ur/querying/querying-the-hosted-service/ +/ur/querying/querying-with-python/ +/ur/quick-start/ /ur/release-notes/assemblyscript-migration-guide/ /ur/release-notes/graphql-validations-migration-guide/ /ur/substreams/ +/ur/sunrise/ /ur/tokenomics/ /vi/about/ /vi/arbitrum/arbitrum-faq/ /vi/arbitrum/l2-transfer-tools-faq/ /vi/arbitrum/l2-transfer-tools-guide/ /vi/billing/ +/vi/chain-integration-overview/ /vi/cookbook/arweave/ /vi/cookbook/base-testnet/ /vi/cookbook/cosmos/ @@ -1181,6 +1312,8 @@ /vi/developing/assemblyscript-api/ /vi/developing/creating-a-subgraph/ /vi/developing/developer-faqs/ +/vi/developing/graph-ts/api/ +/vi/developing/graph-ts/common-issues/ /vi/developing/substreams-powered-subgraphs-faq/ /vi/developing/supported-networks/ /vi/developing/unit-testing-framework/ @@ -1208,15 +1341,19 @@ /vi/querying/querying-from-an-application/ /vi/querying/querying-the-graph/ /vi/querying/querying-the-hosted-service/ +/vi/querying/querying-with-python/ +/vi/quick-start/ /vi/release-notes/assemblyscript-migration-guide/ /vi/release-notes/graphql-validations-migration-guide/ /vi/substreams/ +/vi/sunrise/ /vi/tokenomics/ /yo/about/ /yo/arbitrum/arbitrum-faq/ /yo/arbitrum/l2-transfer-tools-faq/ /yo/arbitrum/l2-transfer-tools-guide/ /yo/billing/ +/yo/chain-integration-overview/ /yo/cookbook/arweave/ /yo/cookbook/base-testnet/ /yo/cookbook/cosmos/ @@ -1235,9 +1372,12 @@ /yo/developing/assemblyscript-api/ /yo/developing/creating-a-subgraph/ /yo/developing/developer-faqs/ +/yo/developing/graph-ts/api/ +/yo/developing/graph-ts/common-issues/ /yo/developing/substreams-powered-subgraphs-faq/ /yo/developing/supported-networks/ /yo/developing/unit-testing-framework/ +/yo/firehose/ /yo/glossary/ /yo/graphcast/ /yo/managing/deprecating-a-subgraph/ @@ -1261,8 +1401,12 @@ /yo/querying/querying-from-an-application/ /yo/querying/querying-the-graph/ /yo/querying/querying-the-hosted-service/ +/yo/querying/querying-with-python/ +/yo/quick-start/ /yo/release-notes/assemblyscript-migration-guide/ /yo/release-notes/graphql-validations-migration-guide/ +/yo/substreams/ +/yo/sunrise/ /yo/tokenomics/ /zh/ /zh/404/ @@ -1271,6 +1415,7 @@ /zh/arbitrum/l2-transfer-tools-faq/ /zh/arbitrum/l2-transfer-tools-guide/ /zh/billing/ +/zh/chain-integration-overview/ /zh/cookbook/arweave/ /zh/cookbook/base-testnet/ /zh/cookbook/cosmos/ @@ -1289,6 +1434,8 @@ /zh/developing/assemblyscript-api/ /zh/developing/creating-a-subgraph/ /zh/developing/developer-faqs/ +/zh/developing/graph-ts/api/ +/zh/developing/graph-ts/common-issues/ /zh/developing/substreams-powered-subgraphs-faq/ /zh/developing/supported-networks/ /zh/developing/unit-testing-framework/ @@ -1316,7 +1463,10 @@ /zh/querying/querying-from-an-application/ /zh/querying/querying-the-graph/ /zh/querying/querying-the-hosted-service/ +/zh/querying/querying-with-python/ +/zh/quick-start/ /zh/release-notes/assemblyscript-migration-guide/ /zh/release-notes/graphql-validations-migration-guide/ /zh/substreams/ +/zh/sunrise/ /zh/tokenomics/