diff --git a/test/scripts/fullNetBootstrap.ts b/test/scripts/fullNetBootstrap.ts index d1328306c..a54e863a6 100644 --- a/test/scripts/fullNetBootstrap.ts +++ b/test/scripts/fullNetBootstrap.ts @@ -1,86 +1,86 @@ -import { - BspNetTestApi, - registerToxics, - type BspNetConfig, - type EnrichedBspApi, - type ToxicInfo -} from "../util"; -import * as ShConsts from "../util/bspNet/consts"; -import { runFullNet } from "../util/fullNet/helpers"; - -let api: EnrichedBspApi | undefined; -const fullNetConfig: BspNetConfig = { - noisy: process.env.NOISY === "1", - rocksdb: process.env.ROCKSDB === "1" -}; - -const CONFIG = { - bucketName: "nothingmuch-0", - localPath: "res/whatsup.jpg", - remotePath: "cat/whatsup.jpg" -}; - -async function bootStrapNetwork() { - await runFullNet(fullNetConfig); - - if (fullNetConfig.noisy) { - // For more info on the kind of toxics you can register, - // see: https://github.com/Shopify/toxiproxy?tab=readme-ov-file#toxics - const reqToxics = [ - { - type: "latency", - name: "lag-down", - stream: "upstream", - toxicity: 0.8, - attributes: { - latency: 25, - jitter: 7 - } - }, - { - type: "bandwidth", - name: "low-band", - // Setting as upstream simulates slow user connection - stream: "upstream", - // 50% of the time, the toxic will be applied - toxicity: 0.5, - attributes: { - // 10kbps - rate: 10 - } - } - ] satisfies ToxicInfo[]; - - await registerToxics(reqToxics); - } - - api = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - await api.file.newStorageRequest( - CONFIG.localPath, - CONFIG.remotePath, - CONFIG.bucketName, - ShConsts.DUMMY_MSP_ID - ); - - await api.wait.bspVolunteer(); - await api.wait.bspStored(); - - if (fullNetConfig.noisy) { - console.log("✅ NoisyNet Bootstrap success"); - } else { - console.log("✅ BSPNet Bootstrap success"); - } -} - -bootStrapNetwork() - .catch((e) => { - console.error("Error running bootstrap script:", e); - if (fullNetConfig.noisy) { - console.log("❌ NoisyNet Bootstrap failure"); - } else { - console.log("❌ BSPNet Bootstrap failure"); - } - process.exitCode = 1; - }) - .finally(async () => await api?.disconnect()); +import { + BspNetTestApi, + registerToxics, + type BspNetConfig, + type EnrichedBspApi, + type ToxicInfo +} from "../util"; +import * as ShConsts from "../util/bspNet/consts"; +import { runFullNet } from "../util/fullNet/helpers"; + +let api: EnrichedBspApi | undefined; +const fullNetConfig: BspNetConfig = { + noisy: process.env.NOISY === "1", + rocksdb: process.env.ROCKSDB === "1" +}; + +const CONFIG = { + bucketName: "nothingmuch-0", + localPath: "res/whatsup.jpg", + remotePath: "cat/whatsup.jpg" +}; + +async function bootStrapNetwork() { + await runFullNet(fullNetConfig); + + if (fullNetConfig.noisy) { + // For more info on the kind of toxics you can register, + // see: https://github.com/Shopify/toxiproxy?tab=readme-ov-file#toxics + const reqToxics = [ + { + type: "latency", + name: "lag-down", + stream: "upstream", + toxicity: 0.8, + attributes: { + latency: 25, + jitter: 7 + } + }, + { + type: "bandwidth", + name: "low-band", + // Setting as upstream simulates slow user connection + stream: "upstream", + // 50% of the time, the toxic will be applied + toxicity: 0.5, + attributes: { + // 10kbps + rate: 10 + } + } + ] satisfies ToxicInfo[]; + + await registerToxics(reqToxics); + } + + api = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + await api.file.newStorageRequest( + CONFIG.localPath, + CONFIG.remotePath, + CONFIG.bucketName, + ShConsts.DUMMY_MSP_ID + ); + + await api.wait.bspVolunteer(); + await api.wait.bspStored(); + + if (fullNetConfig.noisy) { + console.log("✅ NoisyNet Bootstrap success"); + } else { + console.log("✅ BSPNet Bootstrap success"); + } +} + +bootStrapNetwork() + .catch((e) => { + console.error("Error running bootstrap script:", e); + if (fullNetConfig.noisy) { + console.log("❌ NoisyNet Bootstrap failure"); + } else { + console.log("❌ BSPNet Bootstrap failure"); + } + process.exitCode = 1; + }) + .finally(async () => await api?.disconnect()); diff --git a/test/suites/integration/bsp/debt-collection.test.ts b/test/suites/integration/bsp/debt-collection.test.ts index 342bb28e8..129bc8f50 100644 --- a/test/suites/integration/bsp/debt-collection.test.ts +++ b/test/suites/integration/bsp/debt-collection.test.ts @@ -1,695 +1,695 @@ -import assert, { strictEqual } from "node:assert"; -import { after } from "node:test"; -import { - describeBspNet, - fetchEventData, - ShConsts, - sleep, - type EnrichedBspApi -} from "../../../util"; - -describeBspNet( - "BSPNet: Collect users debt", - { initialised: "multi", networkConfig: "standard" }, - ({ before, it, createUserApi, createBspApi, getLaunchResponse, createApi }) => { - let userApi: EnrichedBspApi; - let bspApi: EnrichedBspApi; - let bspTwoApi: EnrichedBspApi; - let bspThreeApi: EnrichedBspApi; - let userAddress: string; - - before(async () => { - const launchResponse = await getLaunchResponse(); - assert(launchResponse, "BSPNet failed to initialise"); - userApi = await createUserApi(); - bspApi = await createBspApi(); - bspTwoApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspTwoRpcPort}`); - bspThreeApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspThreeRpcPort}`); - userAddress = ShConsts.NODE_INFOS.user.AddressId; - }); - - after(async () => { - await bspTwoApi.disconnect(); - await bspThreeApi.disconnect(); - }); - - it("BSP correctly charges payment stream", async () => { - // Make sure the payment stream between the user and the DUMMY_BSP_ID actually exists - const paymentStreamExistsResult = - await userApi.call.paymentStreamsApi.getUsersOfPaymentStreamsOfProvider( - ShConsts.DUMMY_BSP_ID - ); - // Check if the first element of the returned vector is the user - assert(paymentStreamExistsResult[0].toString() === userAddress); - assert(paymentStreamExistsResult.length === 1); - - // Seal one more block. - await userApi.sealBlock(); - - // Check if the user owes the provider. - let usersWithDebtResult = await bspApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( - ShConsts.DUMMY_BSP_ID, - 0 - ); - assert(usersWithDebtResult.isOk); - assert(usersWithDebtResult.asOk.length === 1); - assert(usersWithDebtResult.asOk[0].toString() === userAddress); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Calculate the next challenge tick for the BSPs. It should be the same for all BSPs, - // since they all have the same file they were initialised with, and responded to it at - // the same time. - // We first get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - ShConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; - - // Calculate how many blocks to advance until next challenge tick. - let currentBlock = await userApi.rpc.chain.getBlock(); - let currentBlockNumber = currentBlock.block.header.number.toNumber(); - const blocksToAdvance = nextChallengeTick - currentBlockNumber; - - // Advance blocksToAdvance blocks. - for (let i = 0; i < blocksToAdvance; i++) { - await userApi.sealBlock(); - } - - await userApi.assert.extrinsicPresent({ - method: "submitProof", - module: "proofsDealer", - checkTxPool: true, - assertLength: 3 - }); - - // Check that no Providers have submitted a valid proof yet. - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - let providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Assert for the the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); - - // Check that the Providers were added to the list of Providers that have submitted proofs - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert( - providersWithProofs.isSome, - "There should be Providers that have submitted a valid proof" - ); - assert( - providersWithProofs.unwrap().size === 3, - "There should be three Providers that have submitted a valid proof" - ); - - // Check that the last chargeable info of the dummy BSP has not been updated yet - let lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( - ShConsts.DUMMY_BSP_ID - ); - assert(lastChargeableInfo.priceIndex.toNumber() === 0); - - // Seal one more block to update the last chargeable info of the Provider - await userApi.sealBlock(); - - // Assert for the the event of the last chargeable info of the Providers being updated - const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( - "paymentStreams", - "LastChargeableInfoUpdated" - ); - strictEqual( - lastChargeableInfoUpdatedEvents.length, - 3, - "There should be three last chargeable info updated events" - ); - - // Check the last chargeable info of the dummy BSP - lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( - ShConsts.DUMMY_BSP_ID - ); - - // Check the info of the payment stream between the user and the DUMMY_BSP_ID - const paymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - - // Check that the last chargeable price index of the dummy BSP is greater than the last charged price index of the payment stream - // so that the payment stream can be charged by the BSP - assert( - paymentStreamInfo.unwrap().priceIndexWhenLastCharged.lt(lastChargeableInfo.priceIndex) - ); - - // Check that the user now owes the provider. - usersWithDebtResult = await userApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( - ShConsts.DUMMY_BSP_ID, - 1 - ); - assert(usersWithDebtResult.isOk); - assert(usersWithDebtResult.asOk.length === 1); - assert(usersWithDebtResult.asOk[0].toString() === userAddress); - - // Check that the three Providers have tried to charge the user - // since the user has a payment stream with each of them - await userApi.assert.extrinsicPresent({ - method: "chargePaymentStreams", - module: "paymentStreams", - checkTxPool: true, - assertLength: 3 - }); - - // Seal a block to allow BSPs to charge the payment stream - await userApi.sealBlock(); - - // Assert that event for the BSP charging its payment stream was emitted - await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); - }); - - it("Correctly updates payment stream on-chain to make user insolvent", async () => { - // Make sure the payment stream between the user and the DUMMY_BSP_ID actually exists - const paymentStreamExistsResult = - await userApi.call.paymentStreamsApi.getUsersOfPaymentStreamsOfProvider( - ShConsts.DUMMY_BSP_ID - ); - // Check if the first element of the returned vector is the user - assert(paymentStreamExistsResult[0].toString() === userAddress); - assert(paymentStreamExistsResult.length === 1); - - // Check the payment stream info between the user and the DUMMY_BSP_ID - const paymentStreamInfoBeforeDeletion = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - - // Add extra files to the user's storage with the DUMMY_BSP_ID - await userApi.file.newStorageRequest("res/cloud.jpg", "test/cloud.jpg", "bucket-1"); - await userApi.wait.bspVolunteer(); - await userApi.wait.bspStored(); - await userApi.file.newStorageRequest("res/adolphus.jpg", "test/adolphus.jpg", "bucket-3"); - await userApi.wait.bspVolunteer(); - await userApi.wait.bspStored(); - - // Check the payment stream info after adding the new files - const paymentStreamInfoAfterAddingFiles = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - - // The amount provided of the payment stream should be higher after adding the new files - assert( - paymentStreamInfoAfterAddingFiles - .unwrap() - .amountProvided.gt(paymentStreamInfoBeforeDeletion.unwrap().amountProvided) - ); - - // Seal one more block. - await userApi.sealBlock(); - - // Check if the user owes the provider. - const usersWithDebtResult = await bspApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( - ShConsts.DUMMY_BSP_ID, - 0 - ); - assert(usersWithDebtResult.isOk); - assert(usersWithDebtResult.asOk.length === 1); - assert(usersWithDebtResult.asOk[0].toString() === userAddress); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Get the current price of storage from the runtime, the new stream deposit and the ED - const currentPriceOfStorage = await userApi.query.paymentStreams.currentPricePerUnitPerTick(); - const newStreamDeposit = userApi.consts.paymentStreams.newStreamDeposit; - const existentialDeposit = userApi.consts.balances.existentialDeposit; - - // Get the current free balance of the user - const freeBalance = (await userApi.query.system.account(userAddress)).data.free; - - // To make the user insolvent, we need to update the payment stream with a very high amount - // and advance new stream deposit blocks - // To do this, the new amount provided should be equal to the free balance of the user divided by - // the current price of storage multiplied by the new stream deposit - const newAmountProvidedForInsolvency = freeBalance - .div(currentPriceOfStorage.mul(newStreamDeposit)) - .sub(existentialDeposit); - - // Make the user insolvent by updating the payment stream with a very high amount - const updateDynamicRatePaymentStreamResult = await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.paymentStreams.updateDynamicRatePaymentStream( - ShConsts.DUMMY_BSP_ID, - userAddress, - newAmountProvidedForInsolvency - ) - ) - ); - const { extSuccess } = updateDynamicRatePaymentStreamResult; - strictEqual(extSuccess, true, "Extrinsic should be successful"); - - // Assert that event dynamic-rate payment stream update was emitted - userApi.assertEvent( - "paymentStreams", - "DynamicRatePaymentStreamUpdated", - updateDynamicRatePaymentStreamResult.events - ); - // Get the on-chain payment stream information - const [userAccount, providerId, newAmountProvided] = fetchEventData( - userApi.events.paymentStreams.DynamicRatePaymentStreamUpdated, - await userApi.query.system.events() - ); - // Assert that the information on-chain is correct - strictEqual(userAccount.toString(), userAddress); - strictEqual(providerId.toString(), ShConsts.DUMMY_BSP_ID.toString()); - strictEqual(newAmountProvided.toNumber(), newAmountProvidedForInsolvency.toNumber()); - }); - - it("Correctly flags update payment stream as without funds after charging", async () => { - // Get the last chargeable info of the dummy BSP before proof submission - const lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( - ShConsts.DUMMY_BSP_ID - ); - // Calculate the next challenge tick for the DUMMY_BSP_ID. - // We first get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - ShConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; - - // Calculate how many blocks to advance until next challenge tick. - let currentBlock = await userApi.rpc.chain.getBlock(); - let currentBlockNumber = currentBlock.block.header.number.toNumber(); - const blocksToAdvance = nextChallengeTick - currentBlockNumber; - - // Advance blocksToAdvance blocks. - for (let i = 0; i < blocksToAdvance; i++) { - await userApi.sealBlock(); - } - - await userApi.assert.extrinsicPresent({ - method: "submitProof", - module: "proofsDealer", - checkTxPool: true, - assertLength: 3 - }); - - // Check that no Providers have submitted a valid proof yet. - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - let providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Assert for the the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); - - // Check that the Providers were added to the list of Providers that have submitted proofs - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert( - providersWithProofs.isSome, - "There should be Providers that have submitted a valid proof" - ); - assert( - providersWithProofs.unwrap().size === 3, - "There should be three Providers that have submitted a valid proof" - ); - - // Check that the last chargeable info of the dummy BSP has not been updated yet - const lastChargeableInfoAfterProofSubmission = - await userApi.query.paymentStreams.lastChargeableInfo(ShConsts.DUMMY_BSP_ID); - assert( - lastChargeableInfo.priceIndex.toNumber() === - lastChargeableInfoAfterProofSubmission.priceIndex.toNumber() - ); - - // Seal one more block to update the last chargeable info of the Provider - await userApi.sealBlock(); - - // Assert for the the event of the last chargeable info of the Providers being updated - const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( - "paymentStreams", - "LastChargeableInfoUpdated" - ); - strictEqual( - lastChargeableInfoUpdatedEvents.length, - 3, - "There should be three last chargeable info updated events" - ); - - // Get the last chargeable info of the dummy BSP after it's updated - const lastChargeableInfoAfterUpdate = await userApi.query.paymentStreams.lastChargeableInfo( - ShConsts.DUMMY_BSP_ID - ); - - // Check the info of the payment stream between the user and the DUMMY_BSP_ID - const paymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - - // Check that the last chargeable price index of the dummy BSP is greater than the last charged price index of the payment stream - // so that the payment stream can be charged by the BSP - assert( - paymentStreamInfo - .unwrap() - .priceIndexWhenLastCharged.lt(lastChargeableInfoAfterUpdate.priceIndex) - ); - - // Check that the user now owes the provider. - const usersWithDebtResult = - await userApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( - ShConsts.DUMMY_BSP_ID, - 1 - ); - assert(usersWithDebtResult.isOk); - assert(usersWithDebtResult.asOk.length === 1); - assert(usersWithDebtResult.asOk[0].toString() === userAddress); - - // Check that the three Providers have tried to charge the user - // since the user has a payment stream with each of them - await userApi.assert.extrinsicPresent({ - method: "chargePaymentStreams", - module: "paymentStreams", - checkTxPool: true, - assertLength: 3 - }); - - // Seal a block to allow BSPs to charge the payment stream - await userApi.sealBlock(); - await sleep(500); - - // Assert that event for the BSP charging its payment stream was emitted - await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); - - // Assert that the payment stream between the user and the DUMMY_BSP_ID has been flagged as without - // funds, but the other two ones haven't - const insolventPaymentStreamInfoAfterCharging = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - assert(insolventPaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isSome); - const solventTwoPaymentStreamInfoAfterCharging = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.BSP_TWO_ID, - userAddress - ); - assert(solventTwoPaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isNone); - const solventThreePaymentStreamInfoAfterCharging = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.BSP_THREE_ID, - userAddress - ); - assert(solventThreePaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isNone); - }); - - it("Correctly flags user as without funds after grace period, emits event and deletes payment stream", async () => { - // Get the last chargeable info of the dummy BSP before proof submission - const lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( - ShConsts.DUMMY_BSP_ID - ); - // Calculate the next challenge tick for the DUMMY_BSP_ID. - // We first get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - ShConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; - - // Calculate how many blocks to advance until next challenge tick. - let currentBlock = await userApi.rpc.chain.getBlock(); - let currentBlockNumber = currentBlock.block.header.number.toNumber(); - const blocksToAdvance = nextChallengeTick - currentBlockNumber; - - // Advance blocksToAdvance blocks - for (let i = 0; i < blocksToAdvance; i++) { - await userApi.sealBlock(); - } - - await userApi.assert.extrinsicPresent({ - method: "submitProof", - module: "proofsDealer", - checkTxPool: true, - assertLength: 3 - }); - - // Check that no Providers have submitted a valid proof yet. - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - let providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Assert for the the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); - - // Check that the Providers were added to the list of Providers that have submitted proofs - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - providersWithProofs = - await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); - assert( - providersWithProofs.isSome, - "There should be Providers that have submitted a valid proof" - ); - assert( - providersWithProofs.unwrap().size === 3, - "There should be three Providers that have submitted a valid proof" - ); - - // Check that the last chargeable info of the dummy BSP has not been updated yet - const lastChargeableInfoAfterProofSubmission = - await userApi.query.paymentStreams.lastChargeableInfo(ShConsts.DUMMY_BSP_ID); - assert( - lastChargeableInfo.priceIndex.toNumber() === - lastChargeableInfoAfterProofSubmission.priceIndex.toNumber() - ); - - // Seal one more block to update the last chargeable info of the Provider - await userApi.sealBlock(); - - // Assert for the the event of the last chargeable info of the Providers being updated - const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( - "paymentStreams", - "LastChargeableInfoUpdated" - ); - strictEqual( - lastChargeableInfoUpdatedEvents.length, - 3, - "There should be three last chargeable info updated events" - ); - - // Check that the three Providers have tried to charge the user - // since the user has a payment stream with each of them - await userApi.assert.extrinsicPresent({ - method: "chargePaymentStreams", - module: "paymentStreams", - checkTxPool: true, - assertLength: 3 - }); - - // Seal a block to allow BSPs to charge the payment stream - const blockResult = await userApi.sealBlock(); - - // Assert that event for the BSP charging its payment stream was emitted - await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); - - // Check if the "UserWithoutFunds" event was emitted. If it wasn't, advance until - // the next challenge period and check again - if (!blockResult.events?.find((event) => event.event.method === "UserWithoutFunds")) { - // Calculate the next challenge tick for the DUMMY_BSP_ID. - // We first get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - ShConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; - - // Calculate how many blocks to advance until next challenge tick. - currentBlock = await userApi.rpc.chain.getBlock(); - currentBlockNumber = currentBlock.block.header.number.toNumber(); - const blocksToAdvance = nextChallengeTick - currentBlockNumber; - // Advance blocksToAdvance blocks - for (let i = 0; i < blocksToAdvance; i++) { - await userApi.sealBlock(); - } - - await userApi.assert.extrinsicPresent({ - method: "submitProof", - module: "proofsDealer", - checkTxPool: true, - assertLength: 3 - }); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Seal another block so the last chargeable info of the providers is updated - await userApi.sealBlock(); - - // Check that the three Providers have tried to charge the user - // since the user has a payment stream with each of them - await userApi.assert.extrinsicPresent({ - method: "chargePaymentStreams", - module: "paymentStreams", - checkTxPool: true, - assertLength: 3 - }); - - // Seal a block to allow BSPs to charge the payment stream - await userApi.sealBlock(); - } - - // Assert that the user without funds event was emitted - await userApi.assert.eventPresent("paymentStreams", "UserWithoutFunds"); - - // Check that the payment stream between the user and the DUMMY_BSP_ID has been deleted - const deletedPaymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - assert(deletedPaymentStreamInfo.isNone); - }); - - it("BSP correctly deletes all files from an insolvent user", async () => { - // We execute this loop three times since that's the amount of files the user has stored with the BSPs - for (let i = 0; i < 3; i++) { - // Check that the three Providers are trying to delete the files of the user - await userApi.assert.extrinsicPresent({ - method: "stopStoringForInsolventUser", - module: "fileSystem", - checkTxPool: true, - assertLength: 3 - }); - - // Seal a block to allow BSPs to delete the files of the user - await userApi.sealBlock(); - - // Assert that event for the BSP deleting the files of the user was emitted - const spStopStoringForInsolventUserEvents = await userApi.assert.eventMany( - "fileSystem", - "SpStopStoringInsolventUser" - ); - strictEqual( - spStopStoringForInsolventUserEvents.length, - 3, - "There should be three stop storing for insolvent user events" - ); - - // For each event, fetch its info and check if the BSP correctly deleted the files of the user - for (const event of spStopStoringForInsolventUserEvents) { - const stopStoringInsolventUserBlob = - userApi.events.fileSystem.SpStopStoringInsolventUser.is(event.event) && - event.event.data; - assert(stopStoringInsolventUserBlob, "Event doesn't match Type"); - if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.DUMMY_BSP_ID) { - assert( - ( - await bspApi.rpc.storagehubclient.isFileInForest( - null, - stopStoringInsolventUserBlob.fileKey - ) - ).isFalse - ); - } else if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.BSP_TWO_ID) { - assert( - ( - await bspTwoApi.rpc.storagehubclient.isFileInForest( - null, - stopStoringInsolventUserBlob.fileKey - ) - ).isFalse - ); - } else if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.BSP_THREE_ID) { - assert( - ( - await bspThreeApi.rpc.storagehubclient.isFileInForest( - null, - stopStoringInsolventUserBlob.fileKey - ) - ).isFalse - ); - } - } - - // Seal a block to allow BSPs to delete the files of the user - await userApi.sealBlock(); - } - - // After deleting all the files, the user should have no payment streams with any provider - const paymentStreamInfoAfterDeletion = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.DUMMY_BSP_ID, - userAddress - ); - assert(paymentStreamInfoAfterDeletion.isNone); - const paymentStreamInfoAfterDeletionTwo = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.BSP_TWO_ID, - userAddress - ); - assert(paymentStreamInfoAfterDeletionTwo.isNone); - const paymentStreamInfoAfterDeletionThree = - await userApi.query.paymentStreams.dynamicRatePaymentStreams( - ShConsts.BSP_THREE_ID, - userAddress - ); - assert(paymentStreamInfoAfterDeletionThree.isNone); - }); - } -); +import assert, { strictEqual } from "node:assert"; +import { after } from "node:test"; +import { + describeBspNet, + fetchEventData, + ShConsts, + sleep, + type EnrichedBspApi +} from "../../../util"; + +describeBspNet( + "BSPNet: Collect users debt", + { initialised: "multi", networkConfig: "standard" }, + ({ before, it, createUserApi, createBspApi, getLaunchResponse, createApi }) => { + let userApi: EnrichedBspApi; + let bspApi: EnrichedBspApi; + let bspTwoApi: EnrichedBspApi; + let bspThreeApi: EnrichedBspApi; + let userAddress: string; + + before(async () => { + const launchResponse = await getLaunchResponse(); + assert(launchResponse, "BSPNet failed to initialise"); + userApi = await createUserApi(); + bspApi = await createBspApi(); + bspTwoApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspTwoRpcPort}`); + bspThreeApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspThreeRpcPort}`); + userAddress = ShConsts.NODE_INFOS.user.AddressId; + }); + + after(async () => { + await bspTwoApi.disconnect(); + await bspThreeApi.disconnect(); + }); + + it("BSP correctly charges payment stream", async () => { + // Make sure the payment stream between the user and the DUMMY_BSP_ID actually exists + const paymentStreamExistsResult = + await userApi.call.paymentStreamsApi.getUsersOfPaymentStreamsOfProvider( + ShConsts.DUMMY_BSP_ID + ); + // Check if the first element of the returned vector is the user + assert(paymentStreamExistsResult[0].toString() === userAddress); + assert(paymentStreamExistsResult.length === 1); + + // Seal one more block. + await userApi.sealBlock(); + + // Check if the user owes the provider. + let usersWithDebtResult = await bspApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( + ShConsts.DUMMY_BSP_ID, + 0 + ); + assert(usersWithDebtResult.isOk); + assert(usersWithDebtResult.asOk.length === 1); + assert(usersWithDebtResult.asOk[0].toString() === userAddress); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Calculate the next challenge tick for the BSPs. It should be the same for all BSPs, + // since they all have the same file they were initialised with, and responded to it at + // the same time. + // We first get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + ShConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; + + // Calculate how many blocks to advance until next challenge tick. + let currentBlock = await userApi.rpc.chain.getBlock(); + let currentBlockNumber = currentBlock.block.header.number.toNumber(); + const blocksToAdvance = nextChallengeTick - currentBlockNumber; + + // Advance blocksToAdvance blocks. + for (let i = 0; i < blocksToAdvance; i++) { + await userApi.sealBlock(); + } + + await userApi.assert.extrinsicPresent({ + method: "submitProof", + module: "proofsDealer", + checkTxPool: true, + assertLength: 3 + }); + + // Check that no Providers have submitted a valid proof yet. + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + let providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Assert for the the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); + + // Check that the Providers were added to the list of Providers that have submitted proofs + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert( + providersWithProofs.isSome, + "There should be Providers that have submitted a valid proof" + ); + assert( + providersWithProofs.unwrap().size === 3, + "There should be three Providers that have submitted a valid proof" + ); + + // Check that the last chargeable info of the dummy BSP has not been updated yet + let lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( + ShConsts.DUMMY_BSP_ID + ); + assert(lastChargeableInfo.priceIndex.toNumber() === 0); + + // Seal one more block to update the last chargeable info of the Provider + await userApi.sealBlock(); + + // Assert for the the event of the last chargeable info of the Providers being updated + const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( + "paymentStreams", + "LastChargeableInfoUpdated" + ); + strictEqual( + lastChargeableInfoUpdatedEvents.length, + 3, + "There should be three last chargeable info updated events" + ); + + // Check the last chargeable info of the dummy BSP + lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( + ShConsts.DUMMY_BSP_ID + ); + + // Check the info of the payment stream between the user and the DUMMY_BSP_ID + const paymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + + // Check that the last chargeable price index of the dummy BSP is greater than the last charged price index of the payment stream + // so that the payment stream can be charged by the BSP + assert( + paymentStreamInfo.unwrap().priceIndexWhenLastCharged.lt(lastChargeableInfo.priceIndex) + ); + + // Check that the user now owes the provider. + usersWithDebtResult = await userApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( + ShConsts.DUMMY_BSP_ID, + 1 + ); + assert(usersWithDebtResult.isOk); + assert(usersWithDebtResult.asOk.length === 1); + assert(usersWithDebtResult.asOk[0].toString() === userAddress); + + // Check that the three Providers have tried to charge the user + // since the user has a payment stream with each of them + await userApi.assert.extrinsicPresent({ + method: "chargePaymentStreams", + module: "paymentStreams", + checkTxPool: true, + assertLength: 3 + }); + + // Seal a block to allow BSPs to charge the payment stream + await userApi.sealBlock(); + + // Assert that event for the BSP charging its payment stream was emitted + await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); + }); + + it("Correctly updates payment stream on-chain to make user insolvent", async () => { + // Make sure the payment stream between the user and the DUMMY_BSP_ID actually exists + const paymentStreamExistsResult = + await userApi.call.paymentStreamsApi.getUsersOfPaymentStreamsOfProvider( + ShConsts.DUMMY_BSP_ID + ); + // Check if the first element of the returned vector is the user + assert(paymentStreamExistsResult[0].toString() === userAddress); + assert(paymentStreamExistsResult.length === 1); + + // Check the payment stream info between the user and the DUMMY_BSP_ID + const paymentStreamInfoBeforeDeletion = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + + // Add extra files to the user's storage with the DUMMY_BSP_ID + await userApi.file.newStorageRequest("res/cloud.jpg", "test/cloud.jpg", "bucket-1"); + await userApi.wait.bspVolunteer(); + await userApi.wait.bspStored(); + await userApi.file.newStorageRequest("res/adolphus.jpg", "test/adolphus.jpg", "bucket-3"); + await userApi.wait.bspVolunteer(); + await userApi.wait.bspStored(); + + // Check the payment stream info after adding the new files + const paymentStreamInfoAfterAddingFiles = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + + // The amount provided of the payment stream should be higher after adding the new files + assert( + paymentStreamInfoAfterAddingFiles + .unwrap() + .amountProvided.gt(paymentStreamInfoBeforeDeletion.unwrap().amountProvided) + ); + + // Seal one more block. + await userApi.sealBlock(); + + // Check if the user owes the provider. + const usersWithDebtResult = await bspApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( + ShConsts.DUMMY_BSP_ID, + 0 + ); + assert(usersWithDebtResult.isOk); + assert(usersWithDebtResult.asOk.length === 1); + assert(usersWithDebtResult.asOk[0].toString() === userAddress); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Get the current price of storage from the runtime, the new stream deposit and the ED + const currentPriceOfStorage = await userApi.query.paymentStreams.currentPricePerUnitPerTick(); + const newStreamDeposit = userApi.consts.paymentStreams.newStreamDeposit; + const existentialDeposit = userApi.consts.balances.existentialDeposit; + + // Get the current free balance of the user + const freeBalance = (await userApi.query.system.account(userAddress)).data.free; + + // To make the user insolvent, we need to update the payment stream with a very high amount + // and advance new stream deposit blocks + // To do this, the new amount provided should be equal to the free balance of the user divided by + // the current price of storage multiplied by the new stream deposit + const newAmountProvidedForInsolvency = freeBalance + .div(currentPriceOfStorage.mul(newStreamDeposit)) + .sub(existentialDeposit); + + // Make the user insolvent by updating the payment stream with a very high amount + const updateDynamicRatePaymentStreamResult = await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.paymentStreams.updateDynamicRatePaymentStream( + ShConsts.DUMMY_BSP_ID, + userAddress, + newAmountProvidedForInsolvency + ) + ) + ); + const { extSuccess } = updateDynamicRatePaymentStreamResult; + strictEqual(extSuccess, true, "Extrinsic should be successful"); + + // Assert that event dynamic-rate payment stream update was emitted + userApi.assertEvent( + "paymentStreams", + "DynamicRatePaymentStreamUpdated", + updateDynamicRatePaymentStreamResult.events + ); + // Get the on-chain payment stream information + const [userAccount, providerId, newAmountProvided] = fetchEventData( + userApi.events.paymentStreams.DynamicRatePaymentStreamUpdated, + await userApi.query.system.events() + ); + // Assert that the information on-chain is correct + strictEqual(userAccount.toString(), userAddress); + strictEqual(providerId.toString(), ShConsts.DUMMY_BSP_ID.toString()); + strictEqual(newAmountProvided.toNumber(), newAmountProvidedForInsolvency.toNumber()); + }); + + it("Correctly flags update payment stream as without funds after charging", async () => { + // Get the last chargeable info of the dummy BSP before proof submission + const lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( + ShConsts.DUMMY_BSP_ID + ); + // Calculate the next challenge tick for the DUMMY_BSP_ID. + // We first get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + ShConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; + + // Calculate how many blocks to advance until next challenge tick. + let currentBlock = await userApi.rpc.chain.getBlock(); + let currentBlockNumber = currentBlock.block.header.number.toNumber(); + const blocksToAdvance = nextChallengeTick - currentBlockNumber; + + // Advance blocksToAdvance blocks. + for (let i = 0; i < blocksToAdvance; i++) { + await userApi.sealBlock(); + } + + await userApi.assert.extrinsicPresent({ + method: "submitProof", + module: "proofsDealer", + checkTxPool: true, + assertLength: 3 + }); + + // Check that no Providers have submitted a valid proof yet. + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + let providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Assert for the the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); + + // Check that the Providers were added to the list of Providers that have submitted proofs + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert( + providersWithProofs.isSome, + "There should be Providers that have submitted a valid proof" + ); + assert( + providersWithProofs.unwrap().size === 3, + "There should be three Providers that have submitted a valid proof" + ); + + // Check that the last chargeable info of the dummy BSP has not been updated yet + const lastChargeableInfoAfterProofSubmission = + await userApi.query.paymentStreams.lastChargeableInfo(ShConsts.DUMMY_BSP_ID); + assert( + lastChargeableInfo.priceIndex.toNumber() === + lastChargeableInfoAfterProofSubmission.priceIndex.toNumber() + ); + + // Seal one more block to update the last chargeable info of the Provider + await userApi.sealBlock(); + + // Assert for the the event of the last chargeable info of the Providers being updated + const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( + "paymentStreams", + "LastChargeableInfoUpdated" + ); + strictEqual( + lastChargeableInfoUpdatedEvents.length, + 3, + "There should be three last chargeable info updated events" + ); + + // Get the last chargeable info of the dummy BSP after it's updated + const lastChargeableInfoAfterUpdate = await userApi.query.paymentStreams.lastChargeableInfo( + ShConsts.DUMMY_BSP_ID + ); + + // Check the info of the payment stream between the user and the DUMMY_BSP_ID + const paymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + + // Check that the last chargeable price index of the dummy BSP is greater than the last charged price index of the payment stream + // so that the payment stream can be charged by the BSP + assert( + paymentStreamInfo + .unwrap() + .priceIndexWhenLastCharged.lt(lastChargeableInfoAfterUpdate.priceIndex) + ); + + // Check that the user now owes the provider. + const usersWithDebtResult = + await userApi.call.paymentStreamsApi.getUsersWithDebtOverThreshold( + ShConsts.DUMMY_BSP_ID, + 1 + ); + assert(usersWithDebtResult.isOk); + assert(usersWithDebtResult.asOk.length === 1); + assert(usersWithDebtResult.asOk[0].toString() === userAddress); + + // Check that the three Providers have tried to charge the user + // since the user has a payment stream with each of them + await userApi.assert.extrinsicPresent({ + method: "chargePaymentStreams", + module: "paymentStreams", + checkTxPool: true, + assertLength: 3 + }); + + // Seal a block to allow BSPs to charge the payment stream + await userApi.sealBlock(); + await sleep(500); + + // Assert that event for the BSP charging its payment stream was emitted + await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); + + // Assert that the payment stream between the user and the DUMMY_BSP_ID has been flagged as without + // funds, but the other two ones haven't + const insolventPaymentStreamInfoAfterCharging = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + assert(insolventPaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isSome); + const solventTwoPaymentStreamInfoAfterCharging = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.BSP_TWO_ID, + userAddress + ); + assert(solventTwoPaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isNone); + const solventThreePaymentStreamInfoAfterCharging = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.BSP_THREE_ID, + userAddress + ); + assert(solventThreePaymentStreamInfoAfterCharging.unwrap().outOfFundsTick.isNone); + }); + + it("Correctly flags user as without funds after grace period, emits event and deletes payment stream", async () => { + // Get the last chargeable info of the dummy BSP before proof submission + const lastChargeableInfo = await userApi.query.paymentStreams.lastChargeableInfo( + ShConsts.DUMMY_BSP_ID + ); + // Calculate the next challenge tick for the DUMMY_BSP_ID. + // We first get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + ShConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; + + // Calculate how many blocks to advance until next challenge tick. + let currentBlock = await userApi.rpc.chain.getBlock(); + let currentBlockNumber = currentBlock.block.header.number.toNumber(); + const blocksToAdvance = nextChallengeTick - currentBlockNumber; + + // Advance blocksToAdvance blocks + for (let i = 0; i < blocksToAdvance; i++) { + await userApi.sealBlock(); + } + + await userApi.assert.extrinsicPresent({ + method: "submitProof", + module: "proofsDealer", + checkTxPool: true, + assertLength: 3 + }); + + // Check that no Providers have submitted a valid proof yet. + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + let providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert(providersWithProofs.isEmpty, "No Providers should have submitted a valid proof yet"); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Assert for the the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); + + // Check that the Providers were added to the list of Providers that have submitted proofs + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + providersWithProofs = + await userApi.query.proofsDealer.validProofSubmittersLastTicks(currentBlockNumber); + assert( + providersWithProofs.isSome, + "There should be Providers that have submitted a valid proof" + ); + assert( + providersWithProofs.unwrap().size === 3, + "There should be three Providers that have submitted a valid proof" + ); + + // Check that the last chargeable info of the dummy BSP has not been updated yet + const lastChargeableInfoAfterProofSubmission = + await userApi.query.paymentStreams.lastChargeableInfo(ShConsts.DUMMY_BSP_ID); + assert( + lastChargeableInfo.priceIndex.toNumber() === + lastChargeableInfoAfterProofSubmission.priceIndex.toNumber() + ); + + // Seal one more block to update the last chargeable info of the Provider + await userApi.sealBlock(); + + // Assert for the the event of the last chargeable info of the Providers being updated + const lastChargeableInfoUpdatedEvents = await userApi.assert.eventMany( + "paymentStreams", + "LastChargeableInfoUpdated" + ); + strictEqual( + lastChargeableInfoUpdatedEvents.length, + 3, + "There should be three last chargeable info updated events" + ); + + // Check that the three Providers have tried to charge the user + // since the user has a payment stream with each of them + await userApi.assert.extrinsicPresent({ + method: "chargePaymentStreams", + module: "paymentStreams", + checkTxPool: true, + assertLength: 3 + }); + + // Seal a block to allow BSPs to charge the payment stream + const blockResult = await userApi.sealBlock(); + + // Assert that event for the BSP charging its payment stream was emitted + await userApi.assert.eventPresent("paymentStreams", "PaymentStreamCharged"); + + // Check if the "UserWithoutFunds" event was emitted. If it wasn't, advance until + // the next challenge period and check again + if (!blockResult.events?.find((event) => event.event.method === "UserWithoutFunds")) { + // Calculate the next challenge tick for the DUMMY_BSP_ID. + // We first get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + ShConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; + + // Calculate how many blocks to advance until next challenge tick. + currentBlock = await userApi.rpc.chain.getBlock(); + currentBlockNumber = currentBlock.block.header.number.toNumber(); + const blocksToAdvance = nextChallengeTick - currentBlockNumber; + // Advance blocksToAdvance blocks + for (let i = 0; i < blocksToAdvance; i++) { + await userApi.sealBlock(); + } + + await userApi.assert.extrinsicPresent({ + method: "submitProof", + module: "proofsDealer", + checkTxPool: true, + assertLength: 3 + }); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Seal another block so the last chargeable info of the providers is updated + await userApi.sealBlock(); + + // Check that the three Providers have tried to charge the user + // since the user has a payment stream with each of them + await userApi.assert.extrinsicPresent({ + method: "chargePaymentStreams", + module: "paymentStreams", + checkTxPool: true, + assertLength: 3 + }); + + // Seal a block to allow BSPs to charge the payment stream + await userApi.sealBlock(); + } + + // Assert that the user without funds event was emitted + await userApi.assert.eventPresent("paymentStreams", "UserWithoutFunds"); + + // Check that the payment stream between the user and the DUMMY_BSP_ID has been deleted + const deletedPaymentStreamInfo = await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + assert(deletedPaymentStreamInfo.isNone); + }); + + it("BSP correctly deletes all files from an insolvent user", async () => { + // We execute this loop three times since that's the amount of files the user has stored with the BSPs + for (let i = 0; i < 3; i++) { + // Check that the three Providers are trying to delete the files of the user + await userApi.assert.extrinsicPresent({ + method: "stopStoringForInsolventUser", + module: "fileSystem", + checkTxPool: true, + assertLength: 3 + }); + + // Seal a block to allow BSPs to delete the files of the user + await userApi.sealBlock(); + + // Assert that event for the BSP deleting the files of the user was emitted + const spStopStoringForInsolventUserEvents = await userApi.assert.eventMany( + "fileSystem", + "SpStopStoringInsolventUser" + ); + strictEqual( + spStopStoringForInsolventUserEvents.length, + 3, + "There should be three stop storing for insolvent user events" + ); + + // For each event, fetch its info and check if the BSP correctly deleted the files of the user + for (const event of spStopStoringForInsolventUserEvents) { + const stopStoringInsolventUserBlob = + userApi.events.fileSystem.SpStopStoringInsolventUser.is(event.event) && + event.event.data; + assert(stopStoringInsolventUserBlob, "Event doesn't match Type"); + if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.DUMMY_BSP_ID) { + assert( + ( + await bspApi.rpc.storagehubclient.isFileInForest( + null, + stopStoringInsolventUserBlob.fileKey + ) + ).isFalse + ); + } else if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.BSP_TWO_ID) { + assert( + ( + await bspTwoApi.rpc.storagehubclient.isFileInForest( + null, + stopStoringInsolventUserBlob.fileKey + ) + ).isFalse + ); + } else if (stopStoringInsolventUserBlob.spId.toString() === ShConsts.BSP_THREE_ID) { + assert( + ( + await bspThreeApi.rpc.storagehubclient.isFileInForest( + null, + stopStoringInsolventUserBlob.fileKey + ) + ).isFalse + ); + } + } + + // Seal a block to allow BSPs to delete the files of the user + await userApi.sealBlock(); + } + + // After deleting all the files, the user should have no payment streams with any provider + const paymentStreamInfoAfterDeletion = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.DUMMY_BSP_ID, + userAddress + ); + assert(paymentStreamInfoAfterDeletion.isNone); + const paymentStreamInfoAfterDeletionTwo = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.BSP_TWO_ID, + userAddress + ); + assert(paymentStreamInfoAfterDeletionTwo.isNone); + const paymentStreamInfoAfterDeletionThree = + await userApi.query.paymentStreams.dynamicRatePaymentStreams( + ShConsts.BSP_THREE_ID, + userAddress + ); + assert(paymentStreamInfoAfterDeletionThree.isNone); + }); + } +); diff --git a/test/suites/integration/bsp/onboard.test.ts b/test/suites/integration/bsp/onboard.test.ts index 22f7334dc..ffdbc23a0 100644 --- a/test/suites/integration/bsp/onboard.test.ts +++ b/test/suites/integration/bsp/onboard.test.ts @@ -1,171 +1,171 @@ -import Docker from "dockerode"; -import assert, { strictEqual } from "node:assert"; -import { - addBspContainer, - describeBspNet, - DOCKER_IMAGE, - sleep, - type EnrichedBspApi -} from "../../../util"; -import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; - -describeBspNet("BSPNet: Adding new BSPs", ({ before, createBspApi, createApi, it }) => { - let api: EnrichedBspApi; - - before(async () => { - api = await createBspApi(); - }); - - it("New BSP can be created", async () => { - const { containerName, rpcPort, p2pPort, peerId } = await addBspContainer({ - name: "nueva", - additionalArgs: [ - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - - await it("is in a running container", async () => { - const docker = new Docker(); - const { - State: { Status } - } = await docker.getContainer(containerName).inspect(); - strictEqual(Status, "running"); - }); - - await it("can open new API connection with", async () => { - console.log(`connecting to rpcPort ${rpcPort}`); - await using newApi = await createApi(`ws://127.0.0.1:${rpcPort}`); - - await it("has correct reported peerId", async () => { - const localPeerId = await newApi.rpc.system.localPeerId(); - strictEqual(localPeerId.toString(), peerId); - }); - - await it("is synced with current block", async () => { - // Give some time to the BSP to catch up - await sleep(500); - - const syncHeight = (await newApi.rpc.chain.getHeader()).number.toNumber(); - const currentHeight = (await api.rpc.chain.getHeader()).number.toNumber(); - strictEqual(syncHeight, currentHeight); - }); - - await it("is listening on the correct P2P port", async () => { - const listenAddresses = (await newApi.rpc.system.localListenAddresses()).map((address) => - address.toString() - ); - const matchingAddress = listenAddresses.filter((address) => - address.includes(`/tcp/${p2pPort}/p2p/`) - ); - strictEqual(matchingAddress.length > 1, true); - }); - }); - - await it("is peer of other nodes", async () => { - const peers = (await api.rpc.system.peers()).map(({ peerId }) => peerId.toString()); - strictEqual(peers.includes(peerId), true, `PeerId ${peerId} not found in ${peers}`); - }); - }); - - it("Lots of BSPs can be created", async () => { - await addBspContainer({ - name: "timbo1", - additionalArgs: [ - "--database=rocksdb", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - await addBspContainer({ - name: "timbo2", - additionalArgs: [ - "--database=paritydb", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - await addBspContainer({ - name: "timbo3", - additionalArgs: [ - "--database=auto", - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - - const docker = new Docker(); - const sh_nodes = ( - await docker.listContainers({ - filters: { ancestor: [DOCKER_IMAGE] } - }) - ).flatMap(({ Names }) => Names); - - strictEqual(sh_nodes.length > 3, true); - }); - - it("Inserts new blockchain service keys (BCSV)", async () => { - const keystorePath = "/tmp/test/insert/keystore"; - const { rpcPort } = await addBspContainer({ - name: "insert-keys-container", - additionalArgs: [ - `--keystore-path=${keystorePath}`, - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - await using insertKeysApi = await createApi(`ws://127.0.0.1:${rpcPort}`); - - const alicePubKey = "0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"; - const bobPubKey = "0x8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"; - const bcsvKeyType = "bcsv"; - const bobSeed = "//Bob"; - - const hasAliceKey = await insertKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); - strictEqual(hasAliceKey.isTrue, true); - - let hasBobKey = await insertKeysApi.rpc.author.hasKey(bobPubKey, bcsvKeyType); - strictEqual(hasBobKey.isTrue, false); - - // Rotate keys and check that Bob's pub key is now in Keystore. - await insertKeysApi.rpc.storagehubclient.insertBcsvKeys(bobSeed); - hasBobKey = await insertKeysApi.rpc.author.hasKey(bobPubKey, bcsvKeyType); - strictEqual(hasBobKey.isTrue, true); - }); - - it("Removes BCSV keys from keystore", async () => { - const keystore_path = "/tmp/test/remove/keystore"; - const { rpcPort } = await addBspContainer({ - name: "remove-keys-container", - additionalArgs: [ - `--keystore-path=${keystore_path}`, - `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, - `--jump-capacity=${CAPACITY[1024]}` - ] - }); - await using removeKeysApi = await createApi(`ws://127.0.0.1:${rpcPort}`); - const alicePubKey = "0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"; - const davePubKey = "0x306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"; - const bcsvKeyType = "bcsv"; - const daveSeed = "//Dave"; - - let hasAliceKey = await removeKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); - strictEqual(hasAliceKey.isTrue, true); - - let hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); - strictEqual(hasDaveKey.isTrue, false); - - // Rotate keys and check that Dave's pub key is now in Keystore. - await removeKeysApi.rpc.storagehubclient.insertBcsvKeys(daveSeed); - hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); - strictEqual(hasDaveKey.isTrue, true); - - await removeKeysApi.rpc.storagehubclient.removeBcsvKeys(keystore_path); - - // We still have Alice's key in `--dev` mode because it's inserted into the in-memory Keystore. - hasAliceKey = await removeKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); - strictEqual(hasAliceKey.isTrue, true); - hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); - assert(hasDaveKey.isFalse); - }); -}); +import Docker from "dockerode"; +import assert, { strictEqual } from "node:assert"; +import { + addBspContainer, + describeBspNet, + DOCKER_IMAGE, + sleep, + type EnrichedBspApi +} from "../../../util"; +import { CAPACITY, MAX_STORAGE_CAPACITY } from "../../../util/bspNet/consts.ts"; + +describeBspNet("BSPNet: Adding new BSPs", ({ before, createBspApi, createApi, it }) => { + let api: EnrichedBspApi; + + before(async () => { + api = await createBspApi(); + }); + + it("New BSP can be created", async () => { + const { containerName, rpcPort, p2pPort, peerId } = await addBspContainer({ + name: "nueva", + additionalArgs: [ + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + + await it("is in a running container", async () => { + const docker = new Docker(); + const { + State: { Status } + } = await docker.getContainer(containerName).inspect(); + strictEqual(Status, "running"); + }); + + await it("can open new API connection with", async () => { + console.log(`connecting to rpcPort ${rpcPort}`); + await using newApi = await createApi(`ws://127.0.0.1:${rpcPort}`); + + await it("has correct reported peerId", async () => { + const localPeerId = await newApi.rpc.system.localPeerId(); + strictEqual(localPeerId.toString(), peerId); + }); + + await it("is synced with current block", async () => { + // Give some time to the BSP to catch up + await sleep(500); + + const syncHeight = (await newApi.rpc.chain.getHeader()).number.toNumber(); + const currentHeight = (await api.rpc.chain.getHeader()).number.toNumber(); + strictEqual(syncHeight, currentHeight); + }); + + await it("is listening on the correct P2P port", async () => { + const listenAddresses = (await newApi.rpc.system.localListenAddresses()).map((address) => + address.toString() + ); + const matchingAddress = listenAddresses.filter((address) => + address.includes(`/tcp/${p2pPort}/p2p/`) + ); + strictEqual(matchingAddress.length > 1, true); + }); + }); + + await it("is peer of other nodes", async () => { + const peers = (await api.rpc.system.peers()).map(({ peerId }) => peerId.toString()); + strictEqual(peers.includes(peerId), true, `PeerId ${peerId} not found in ${peers}`); + }); + }); + + it("Lots of BSPs can be created", async () => { + await addBspContainer({ + name: "timbo1", + additionalArgs: [ + "--database=rocksdb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + await addBspContainer({ + name: "timbo2", + additionalArgs: [ + "--database=paritydb", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + await addBspContainer({ + name: "timbo3", + additionalArgs: [ + "--database=auto", + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + + const docker = new Docker(); + const sh_nodes = ( + await docker.listContainers({ + filters: { ancestor: [DOCKER_IMAGE] } + }) + ).flatMap(({ Names }) => Names); + + strictEqual(sh_nodes.length > 3, true); + }); + + it("Inserts new blockchain service keys (BCSV)", async () => { + const keystorePath = "/tmp/test/insert/keystore"; + const { rpcPort } = await addBspContainer({ + name: "insert-keys-container", + additionalArgs: [ + `--keystore-path=${keystorePath}`, + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + await using insertKeysApi = await createApi(`ws://127.0.0.1:${rpcPort}`); + + const alicePubKey = "0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"; + const bobPubKey = "0x8eaf04151687736326c9fea17e25fc5287613693c912909cb226aa4794f26a48"; + const bcsvKeyType = "bcsv"; + const bobSeed = "//Bob"; + + const hasAliceKey = await insertKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); + strictEqual(hasAliceKey.isTrue, true); + + let hasBobKey = await insertKeysApi.rpc.author.hasKey(bobPubKey, bcsvKeyType); + strictEqual(hasBobKey.isTrue, false); + + // Rotate keys and check that Bob's pub key is now in Keystore. + await insertKeysApi.rpc.storagehubclient.insertBcsvKeys(bobSeed); + hasBobKey = await insertKeysApi.rpc.author.hasKey(bobPubKey, bcsvKeyType); + strictEqual(hasBobKey.isTrue, true); + }); + + it("Removes BCSV keys from keystore", async () => { + const keystore_path = "/tmp/test/remove/keystore"; + const { rpcPort } = await addBspContainer({ + name: "remove-keys-container", + additionalArgs: [ + `--keystore-path=${keystore_path}`, + `--max-storage-capacity=${MAX_STORAGE_CAPACITY}`, + `--jump-capacity=${CAPACITY[1024]}` + ] + }); + await using removeKeysApi = await createApi(`ws://127.0.0.1:${rpcPort}`); + const alicePubKey = "0xd43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"; + const davePubKey = "0x306721211d5404bd9da88e0204360a1a9ab8b87c66c1bc2fcdd37f3c2222cc20"; + const bcsvKeyType = "bcsv"; + const daveSeed = "//Dave"; + + let hasAliceKey = await removeKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); + strictEqual(hasAliceKey.isTrue, true); + + let hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); + strictEqual(hasDaveKey.isTrue, false); + + // Rotate keys and check that Dave's pub key is now in Keystore. + await removeKeysApi.rpc.storagehubclient.insertBcsvKeys(daveSeed); + hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); + strictEqual(hasDaveKey.isTrue, true); + + await removeKeysApi.rpc.storagehubclient.removeBcsvKeys(keystore_path); + + // We still have Alice's key in `--dev` mode because it's inserted into the in-memory Keystore. + hasAliceKey = await removeKeysApi.rpc.author.hasKey(alicePubKey, bcsvKeyType); + strictEqual(hasAliceKey.isTrue, true); + hasDaveKey = await removeKeysApi.rpc.author.hasKey(davePubKey, bcsvKeyType); + assert(hasDaveKey.isFalse); + }); +}); diff --git a/test/suites/integration/bsp/submit-proofs.test.ts b/test/suites/integration/bsp/submit-proofs.test.ts index 35bd4142a..f49dd7320 100644 --- a/test/suites/integration/bsp/submit-proofs.test.ts +++ b/test/suites/integration/bsp/submit-proofs.test.ts @@ -1,678 +1,678 @@ -import assert, { strictEqual } from "node:assert"; -import { - describeBspNet, - shUser, - sleep, - type EnrichedBspApi, - type FileMetadata, - ShConsts -} from "../../../util"; -import { BSP_THREE_ID, BSP_TWO_ID, DUMMY_BSP_ID } from "../../../util/bspNet/consts"; - -describeBspNet( - "BSP: Many BSPs Submit Proofs", - { initialised: "multi", networkConfig: "standard" }, - ({ before, createUserApi, after, it, createApi, createBspApi, getLaunchResponse }) => { - let userApi: EnrichedBspApi; - let bspApi: EnrichedBspApi; - let bspTwoApi: EnrichedBspApi; - let bspThreeApi: EnrichedBspApi; - let fileData: FileMetadata; - let oneBspFileData: FileMetadata; - let rootBeforeDeletion: string; - - before(async () => { - const launchResponse = await getLaunchResponse(); - assert(launchResponse, "BSPNet failed to initialise"); - fileData = launchResponse.fileData; - userApi = await createUserApi(); - bspApi = await createBspApi(); - bspTwoApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspTwoRpcPort}`); - bspThreeApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspThreeRpcPort}`); - }); - - after(async () => { - await bspTwoApi.disconnect(); - await bspThreeApi.disconnect(); - }); - - it("Network launches and can be queried", async () => { - const userNodePeerId = await userApi.rpc.system.localPeerId(); - strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId); - const bspNodePeerId = await bspApi.rpc.system.localPeerId(); - strictEqual(bspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.bsp.expectedPeerId); - }); - - it("Many BSPs are challenged and correctly submit proofs", async () => { - // Calculate the next challenge tick for the BSPs. It should be the same for all BSPs, - // since they all have the same file they were initialised with, and responded to it at - // the same time. - // We first get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - userApi.shConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - userApi.shConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; - // Finally, advance to the next challenge tick. - await userApi.advanceToBlock(nextChallengeTick); - - await userApi.assert.extrinsicPresent({ - module: "proofsDealer", - method: "submitProof", - checkTxPool: true, - assertLength: 3, - timeout: 10000 - }); - - // Seal one more block with the pending extrinsics. - await userApi.sealBlock(); - - // Assert for the the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); - - // Get the new last tick for which the BSP submitted a proof. - // It should be the previous last tick plus one BSP period. - const lastTickResultAfterProof = - await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - userApi.shConsts.DUMMY_BSP_ID - ); - assert(lastTickResultAfterProof.isOk); - const lastTickBspSubmittedProofAfterProof = lastTickResultAfterProof.asOk.toNumber(); - strictEqual( - lastTickBspSubmittedProofAfterProof, - lastTickBspSubmittedProof + challengePeriod, - "The last tick for which the BSP submitted a proof should be the previous last tick plus one BSP period" - ); - - // Get the new deadline for the BSP. - // It should be the current last tick, plus one BSP period, plus the challenges tick tolerance. - const challengesTickTolerance = Number(userApi.consts.proofsDealer.challengeTicksTolerance); - const newDeadline = - lastTickBspSubmittedProofAfterProof + challengePeriod + challengesTickTolerance; - const newDeadlineResult = await userApi.call.proofsDealerApi.getNextDeadlineTick( - userApi.shConsts.DUMMY_BSP_ID - ); - assert(newDeadlineResult.isOk); - const newDeadlineOnChain = newDeadlineResult.asOk.toNumber(); - strictEqual( - newDeadline, - newDeadlineOnChain, - "The deadline should be the same as the one we just calculated" - ); - }); - - it("BSP fails to submit proof and is marked as slashable", async () => { - // Get BSP-Down's deadline. - const bspDownDeadlineResult = await userApi.call.proofsDealerApi.getNextDeadlineTick( - userApi.shConsts.BSP_DOWN_ID - ); - assert(bspDownDeadlineResult.isOk); - const bspDownDeadline = bspDownDeadlineResult.asOk.toNumber(); - - // Get the last tick for which the BSP-Down submitted a proof before advancing to the deadline. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - userApi.shConsts.BSP_DOWN_ID - ); - assert(lastTickResult.isOk); - const lastTickBspDownSubmittedProof = lastTickResult.asOk.toNumber(); - // Finally, advance to the next challenge tick. - await userApi.advanceToBlock(bspDownDeadline); - - // Expect to see a `SlashableProvider` event in the last block. - const slashableProviderEvent = await userApi.assert.eventPresent( - "proofsDealer", - "SlashableProvider" - ); - const slashableProviderEventDataBlob = - userApi.events.proofsDealer.SlashableProvider.is(slashableProviderEvent.event) && - slashableProviderEvent.event.data; - assert(slashableProviderEventDataBlob, "Event doesn't match Type"); - strictEqual( - slashableProviderEventDataBlob.provider.toString(), - userApi.shConsts.BSP_DOWN_ID, - "The BSP-Down should be slashable" - ); - - // Get the last tick for which the BSP-Down submitted a proof after advancing to the deadline. - const lastTickResultAfterSlashable = - await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - userApi.shConsts.BSP_DOWN_ID - ); - assert(lastTickResultAfterSlashable.isOk); - const lastTickBspDownSubmittedProofAfterSlashable = - lastTickResultAfterSlashable.asOk.toNumber(); - - // The new last tick should be equal to the last tick before BSP-Down was marked as slashable plus one challenge period. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - userApi.shConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - strictEqual( - lastTickBspDownSubmittedProofAfterSlashable, - lastTickBspDownSubmittedProof + challengePeriod, - "The last tick for which the BSP-Down submitted a proof should be the last tick before BSP-Down was marked as slashable plus one challenge period" - ); - }); - - it( - "BSP stops storing last file", - { skip: "Not implemented yet. Needs RPC method to build proofs." }, - async () => { - // TODO: Build inclusion forest proof for file. - // TODO: BSP-Three sends transaction to stop storing the only file it has. - console.log(fileData); - // // Build transaction for BSP-Three to stop storing the only file it has. - // const call = bspThreeApi.sealBlock( - // bspThreeApi.tx.fileSystem.bspStopStoring( - // fileData.fileKey, - // fileData.bucketId, - // fileData.location, - // fileData.owner, - // fileData.fingerprint, - // fileData.fileSize, - // false - // ), - // bspThreeKey - // ); - } - ); - - it( - "BSP can correctly delete a file from its forest and runtime correctly updates its root", - { skip: "Not implemented yet. Needs RPC method to build proofs." }, - async () => { - // TODO: Setup a BSP that has two files which lie under the same NibbledBranch in the forest. - // TODO: Generate the proof to delete one of the files. - /* let inclusionForestProof = bspThreeApi.rpc.storagehubclient.buildForestRoot(fileData.fileKey); */ - // TODO: Request the deletion of the file: - /* const fileDeletionRequestResult = bspThreeApi.sealBlock(bspThreeApi.tx.fileSystem.bspRequestStopStoring( - fileData.fileKey, - fileData.bucketId, - fileData.location, - fileData.owner, - fileData.fingerprint, - fileData.fileSize, - false, - inclusion_forest_proof: ForestProof, - ); */ - // Wait enough blocks for the deletion to be allowed. - /* const currentBlock = await bspThreeApi.rpc.chain.getBlock(); - const currentBlockNumber = currentBlock.block.header.number.toNumber(); - const cooldown = currentBlockNumber + bspThreeApi.consts.fileSystem.minWaitForStopStoring.toNumber(); - await bspThreeApi.advanceToBlock(cooldown); */ - // TODO: Confirm the request of deletion. Make sure the extrinsic doesn't fail and the root is updated correctly. - /* const fileDeletionConfirmResult = bspThreeApi.sealBlock(bspThreeApi.tx.fileSystem.bspConfirmStopStoring( - fileData.fileKey, - inclusionForestProof, - )); - // Check for the confirm stopped storing event. - let confirmStopStoringEvent = bspThreeApi.assert.eventPresent( - "fileSystem", - "BspConfirmStoppedStoring", - fileDeletionConfirmResult.events - ); - // Make sure the new root was updated correctly. - bspThreeApi.rpc.storagehubclient.deleteFile(fileData.fileKey); // Not sure if this is the correct way to do it. - const newRoot = bspThreeApi.rpc.storagehubclient.getForestRoot(); - const newRootInRuntime = confirmStopStoringEvent.event.data.newRoot; - assert(newRoot === newRootInRuntime, "The new root should be updated correctly"); - */ - } - ); - - it("BSP is not challenged any more", { skip: "Not implemented yet." }, async () => { - // TODO: Check that BSP-Three no longer has a challenge deadline. - }); - - it( - "BSP submits proof, transaction gets dropped, BSP-resubmits and succeeds", - { skip: "Dropping transactions is not implemented as testing utility yet." }, - async () => {} - ); - - it("New storage request sent by user, to only one BSP", async () => { - // Pause BSP-Two and BSP-Three. - await userApi.docker.pauseBspContainer("sh-bsp-two"); - await userApi.docker.pauseBspContainer("sh-bsp-three"); - - // Send transaction to create new storage request. - const source = "res/adolphus.jpg"; - const location = "test/adolphus.jpg"; - const bucketName = "nothingmuch-2"; - const fileData = await userApi.file.newStorageRequest(source, location, bucketName); - oneBspFileData = fileData; - }); - - it("Only one BSP confirms it", async () => { - await userApi.wait.bspVolunteer(1); - await userApi.wait.bspStored(1); - }); - - it("BSP correctly responds to challenge with new forest root", async () => { - // Advance to two challenge periods ahead for first BSP. - // This is because in the odd case that we're exactly on the next challenge tick right now, - // there is a race condition chance where the BSP will send the submit proof extrinsic in the - // next block, since the Forest write lock is released as a consequence of the confirm storing - // extrinsic. So we advance two challenge periods ahead to be sure. - - // First we get the last tick for which the BSP submitted a proof. - const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( - ShConsts.DUMMY_BSP_ID - ); - assert(lastTickResult.isOk); - const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate two challenge ticks ahead. - const nextChallengeTick = lastTickBspSubmittedProof + 2 * challengePeriod; - // Finally, advance two challenge ticks ahead. - await userApi.advanceToBlock(nextChallengeTick); - - // Wait for BSP to submit proof. - await sleep(1000); - - // There should be at least one pending submit proof transaction. - const submitProofsPending = await userApi.assert.extrinsicPresent({ - module: "proofsDealer", - method: "submitProof", - checkTxPool: true - }); - assert(submitProofsPending.length > 0); - - // Seal block and check that the transaction was successful. - await userApi.sealBlock(); - - // Assert for the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual( - proofAcceptedEvents.length, - submitProofsPending.length, - "All pending submit proof transactions should have been successful" - ); - }); - - it("Resume BSPs, and they shouldn't volunteer for the expired storage request", async () => { - // Advance a number of blocks up to when the storage request times out for sure. - const storageRequestTtl = Number(userApi.consts.fileSystem.storageRequestTtl); - const currentBlock = await userApi.rpc.chain.getBlock(); - const currentBlockNumber = currentBlock.block.header.number.toNumber(); - await userApi.advanceToBlock(currentBlockNumber + storageRequestTtl, { - waitForBspProofs: [ShConsts.DUMMY_BSP_ID] - }); - - // Resume BSP-Two and BSP-Three. - await userApi.docker.resumeBspContainer({ containerName: "sh-bsp-two" }); - await userApi.docker.resumeBspContainer({ containerName: "sh-bsp-three" }); - - // Wait for BSPs to resync. - await sleep(1000); - - // There shouldn't be any pending volunteer transactions. - await assert.rejects( - async () => { - await userApi.assert.extrinsicPresent({ - module: "fileSystem", - method: "bspVolunteer", - checkTxPool: true - }); - }, - /No matching extrinsic found for fileSystem\.bspVolunteer/, - "There should be no pending volunteer transactions" - ); - }); - - it("BSP-Two still correctly responds to challenges with same forest root", async () => { - // Advance some blocks to allow the BSP to process the challenges and submit proofs. - for (let i = 0; i < 20; i++) { - await userApi.sealBlock(); - await sleep(500); - } - - // Advance to next challenge tick for BSP-Two. - // First we get the last tick for which the BSP submitted a proof. - const lastTickResult = - await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(BSP_TWO_ID); - assert(lastTickResult.isOk); - const lastTickBspTwoSubmittedProof = lastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const challengePeriodResult = - await userApi.call.proofsDealerApi.getChallengePeriod(BSP_TWO_ID); - assert(challengePeriodResult.isOk); - const challengePeriod = challengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - const nextChallengeTick = lastTickBspTwoSubmittedProof + challengePeriod; - // Finally, advance to the next challenge tick. - await userApi.advanceToBlock(nextChallengeTick); - - // Wait for tasks to execute and for the BSPs to submit proofs. - await sleep(500); - - // There should be at least one pending submit proof transaction. - const submitProofsPending = await userApi.assert.extrinsicPresent({ - module: "proofsDealer", - method: "submitProof", - checkTxPool: true - }); - assert(submitProofsPending.length > 0); - - // Seal block and check that the transaction was successful. - await userApi.sealBlock(); - - // Assert for the event of the proof successfully submitted and verified. - const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); - strictEqual( - proofAcceptedEvents.length, - submitProofsPending.length, - "All pending submit proof transactions should have been successful" - ); - }); - - it( - "Custom challenge is added", - { skip: "Not implemented yet. All BSPs have the same files." }, - async () => { - await it("Custom challenge is included in checkpoint challenge round", async () => { - // TODO: Send transaction for custom challenge with new file key. - // TODO: Advance until next checkpoint challenge block. - // TODO: Check that custom challenge was included in checkpoint challenge round. - }); - - await it("BSP that has it responds to custom challenge with proof of inclusion", async () => { - // TODO: Advance until next challenge for BSP. - // TODO: Build block with proof submission. - // TODO: Check that proof submission was successful, including the custom challenge. - }); - - await it("BSPs who don't have it respond non-inclusion proof", async () => { - // TODO: Advance until next challenge for BSP-Two and BSP-Three. - // TODO: Build block with proof submission. - // TODO: Check that proof submission was successful, with proof of non-inclusion. - }); - } - ); - - it("File is deleted by user", async () => { - // Get the root of the BSP that has the file before deletion. - const bspMetadata = await userApi.query.providers.backupStorageProviders( - ShConsts.DUMMY_BSP_ID - ); - assert(bspMetadata, "BSP metadata should exist"); - assert(bspMetadata.isSome, "BSP metadata should be Some"); - const bspMetadataBlob = bspMetadata.unwrap(); - rootBeforeDeletion = bspMetadataBlob.root.toHex(); - // Make sure it matches the one of the actual merkle forest. - const actualRoot = await bspApi.rpc.storagehubclient.getForestRoot(null); - strictEqual( - rootBeforeDeletion, - actualRoot.toHex(), - "The root of the BSP should match the actual merkle forest root." - ); - - // User sends file deletion request. - await userApi.sealBlock( - userApi.tx.fileSystem.deleteFile( - oneBspFileData.bucketId, - oneBspFileData.fileKey, - oneBspFileData.location, - oneBspFileData.fileSize, - oneBspFileData.fingerprint, - null - ), - shUser - ); - - // Check for a file deletion request event. - await userApi.assert.eventPresent("fileSystem", "FileDeletionRequest"); - - // Advance until the deletion request expires so that it can be processed. - const deletionRequestTtl = Number(userApi.consts.fileSystem.pendingFileDeletionRequestTtl); - const currentBlock = await userApi.rpc.chain.getBlock(); - const currentBlockNumber = currentBlock.block.header.number.toNumber(); - await userApi.advanceToBlock(currentBlockNumber + deletionRequestTtl, { - waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] - }); - - // Check for a file deletion request event. - await userApi.assert.eventPresent("fileSystem", "PriorityChallengeForFileDeletionQueued"); - }); - - it("Priority challenge is included in checkpoint challenge round", async () => { - // Advance to next checkpoint challenge block. - const checkpointChallengePeriod = Number( - userApi.consts.proofsDealer.checkpointChallengePeriod - ); - const lastCheckpointChallengeTick = Number( - await userApi.call.proofsDealerApi.getLastCheckpointChallengeTick() - ); - const nextCheckpointChallengeBlock = lastCheckpointChallengeTick + checkpointChallengePeriod; - await userApi.advanceToBlock(nextCheckpointChallengeBlock, { - waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] - }); - - // Check that the event for the priority challenge is emitted. - const newCheckpointChallengesEvent = await userApi.assert.eventPresent( - "proofsDealer", - "NewCheckpointChallenge" - ); - - // Check that the file key is in the included checkpoint challenges. - const newCheckpointChallengesEventDataBlob = - userApi.events.proofsDealer.NewCheckpointChallenge.is(newCheckpointChallengesEvent.event) && - newCheckpointChallengesEvent.event.data; - assert(newCheckpointChallengesEventDataBlob, "Event doesn't match Type"); - let containsFileKey = false; - for (const checkpointChallenge of newCheckpointChallengesEventDataBlob.challenges) { - if (checkpointChallenge[0].toHuman() === oneBspFileData.fileKey) { - containsFileKey = true; - break; - } - } - assert(containsFileKey, "The file key should be included in the checkpoint challenge."); - }); - - it("BSP that has the file responds with correct proof including the file key, and BSP that doesn't have the file responds with correct proof non-including the file key", async () => { - // Check who has a challenge tick coming up first: the BSP that has the file or BSP-Two who doesn't have it. - // Whoever has the challenge tick first, we check that they submitted a proof successfully first. - const currentTick = (await userApi.call.proofsDealerApi.getCurrentTick()).toNumber(); - - // Calculate next challenge tick for the BSP that has the file. - // We first get the last tick for which the BSP submitted a proof. - const dummyBspLastTickResult = - await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(ShConsts.DUMMY_BSP_ID); - assert(dummyBspLastTickResult.isOk); - const lastTickBspSubmittedProof = dummyBspLastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const dummyBspChallengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.DUMMY_BSP_ID - ); - assert(dummyBspChallengePeriodResult.isOk); - const dummyBspChallengePeriod = dummyBspChallengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - let dummyBspNextChallengeTick = lastTickBspSubmittedProof + dummyBspChallengePeriod; - // If it is exactly equal to the current tick, we take the next challenge tick. - if (dummyBspNextChallengeTick === currentTick) { - dummyBspNextChallengeTick += dummyBspChallengePeriod; - } - - // Calculate next challenge tick for BSP-Two. - // We first get the last tick for which the BSP submitted a proof. - const bspTwoLastTickResult = - await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(ShConsts.BSP_TWO_ID); - assert(bspTwoLastTickResult.isOk); - const bspTwoLastTickBspTwoSubmittedProof = bspTwoLastTickResult.asOk.toNumber(); - // Then we get the challenge period for the BSP. - const bspTwoChallengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( - ShConsts.BSP_TWO_ID - ); - assert(bspTwoChallengePeriodResult.isOk); - const bspTwoChallengePeriod = bspTwoChallengePeriodResult.asOk.toNumber(); - // Then we calculate the next challenge tick. - let bspTwoNextChallengeTick = bspTwoLastTickBspTwoSubmittedProof + bspTwoChallengePeriod; - // If it is exactly equal to the current tick, we take the next challenge tick. - if (bspTwoNextChallengeTick === currentTick) { - bspTwoNextChallengeTick += bspTwoChallengePeriod; - } - - const firstBspToRespond = - dummyBspNextChallengeTick < bspTwoNextChallengeTick - ? ShConsts.DUMMY_BSP_ID - : ShConsts.BSP_TWO_ID; - const secondBspToRespond = - dummyBspNextChallengeTick < bspTwoNextChallengeTick - ? ShConsts.BSP_TWO_ID - : ShConsts.DUMMY_BSP_ID; - const firstBlockToAdvance = - dummyBspNextChallengeTick < bspTwoNextChallengeTick - ? dummyBspNextChallengeTick - : bspTwoNextChallengeTick; - const secondBlockToAdvance = - dummyBspNextChallengeTick < bspTwoNextChallengeTick - ? bspTwoNextChallengeTick - : dummyBspNextChallengeTick; - - // Advance to first next challenge block. - await userApi.advanceToBlock(firstBlockToAdvance, { - waitForBspProofs: [DUMMY_BSP_ID, BSP_TWO_ID, BSP_THREE_ID] - }); - - // Wait for BSP to generate the proof and advance one more block. - await sleep(500); - await userApi.sealBlock(); - - // Check for a ProofAccepted event. - const firstChallengeBlockEvents = await userApi.assert.eventMany( - "proofsDealer", - "ProofAccepted" - ); - - // Check that at least one of the `ProofAccepted` events belongs to `firstBspToRespond`. - const atLeastOneEventBelongsToFirstBsp = firstChallengeBlockEvents.some((eventRecord) => { - const firstChallengeBlockEventDataBlob = - userApi.events.proofsDealer.ProofAccepted.is(eventRecord.event) && eventRecord.event.data; - assert(firstChallengeBlockEventDataBlob, "Event doesn't match Type"); - - return firstChallengeBlockEventDataBlob.provider.toString() === firstBspToRespond; - }); - assert(atLeastOneEventBelongsToFirstBsp, "No ProofAccepted event belongs to the first BSP"); - - // If the first BSP is the one removing the file, assert for the event of the mutations successfully applied in the runtime. - if (firstBspToRespond === ShConsts.DUMMY_BSP_ID) { - const mutationsAppliedEvents = await userApi.assert.eventMany( - "proofsDealer", - "MutationsApplied" - ); - strictEqual( - mutationsAppliedEvents.length, - 1, - "There should be one mutations applied event" - ); - - // Check that the mutations applied event belongs to the dummy BSP. - const mutationsAppliedEventDataBlob = - userApi.events.proofsDealer.MutationsApplied.is(mutationsAppliedEvents[0].event) && - mutationsAppliedEvents[0].event.data; - assert(mutationsAppliedEventDataBlob, "Event doesn't match Type"); - strictEqual( - mutationsAppliedEventDataBlob.provider.toString(), - ShConsts.DUMMY_BSP_ID, - "The mutations applied event should belong to the dummy BSP" - ); - } - - // Advance to second next challenge block. - await userApi.advanceToBlock(secondBlockToAdvance, { - waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] - }); - - // Wait for BSP to generate the proof and advance one more block. - await sleep(500); - const secondChallengeBlockResult = await userApi.sealBlock(); - - // Check for a ProofAccepted event. - const secondChallengeBlockEvents = await userApi.assert.eventMany( - "proofsDealer", - "ProofAccepted", - secondChallengeBlockResult.events - ); - - // Check that at least one of the `ProofAccepted` events belongs to `secondBspToRespond`. - const atLeastOneEventBelongsToSecondBsp = secondChallengeBlockEvents.some((eventRecord) => { - const secondChallengeBlockEventDataBlob = - userApi.events.proofsDealer.ProofAccepted.is(eventRecord.event) && eventRecord.event.data; - assert(secondChallengeBlockEventDataBlob, "Event doesn't match Type"); - - return secondChallengeBlockEventDataBlob.provider.toString() === secondBspToRespond; - }); - assert(atLeastOneEventBelongsToSecondBsp, "No ProofAccepted event belongs to the second BSP"); - - // If the second BSP is the one removing the file, assert for the event of the mutations successfully applied in the runtime. - if (secondBspToRespond === ShConsts.DUMMY_BSP_ID) { - const mutationsAppliedEvents = await userApi.assert.eventMany( - "proofsDealer", - "MutationsApplied" - ); - strictEqual( - mutationsAppliedEvents.length, - 1, - "There should be one mutations applied event" - ); - - // Check that the mutations applied event belongs to the dummy BSP. - const mutationsAppliedEventDataBlob = - userApi.events.proofsDealer.MutationsApplied.is(mutationsAppliedEvents[0].event) && - mutationsAppliedEvents[0].event.data; - assert(mutationsAppliedEventDataBlob, "Event doesn't match Type"); - strictEqual( - mutationsAppliedEventDataBlob.provider.toString(), - ShConsts.DUMMY_BSP_ID, - "The mutations applied event should belong to the dummy BSP" - ); - } - }); - - it("File is removed from Forest by BSP", async () => { - // Make sure the root was updated in the runtime - const bspMetadataAfterDeletion = await userApi.query.providers.backupStorageProviders( - ShConsts.DUMMY_BSP_ID - ); - assert(bspMetadataAfterDeletion, "BSP metadata should exist"); - assert(bspMetadataAfterDeletion.isSome, "BSP metadata should be Some"); - const bspMetadataAfterDeletionBlob = bspMetadataAfterDeletion.unwrap(); - assert( - bspMetadataAfterDeletionBlob.root.toHex() !== rootBeforeDeletion, - "The root should have been updated on chain" - ); - - // Check that the runtime root matches the forest root of the BSP. - const forestRoot = await bspApi.rpc.storagehubclient.getForestRoot(null); - strictEqual( - bspMetadataAfterDeletionBlob.root.toString(), - forestRoot.toString(), - "The runtime root should match the forest root of the BSP" - ); - }); - - it( - "File mutation is finalised and BSP removes it from File Storage", - { skip: "Not implemented yet." }, - async () => { - // TODO: Finalise block with mutations. - // TODO: Check that file is removed from File Storage. Need a RPC method for this. - } - ); - } -); +import assert, { strictEqual } from "node:assert"; +import { + describeBspNet, + shUser, + sleep, + type EnrichedBspApi, + type FileMetadata, + ShConsts +} from "../../../util"; +import { BSP_THREE_ID, BSP_TWO_ID, DUMMY_BSP_ID } from "../../../util/bspNet/consts"; + +describeBspNet( + "BSP: Many BSPs Submit Proofs", + { initialised: "multi", networkConfig: "standard" }, + ({ before, createUserApi, after, it, createApi, createBspApi, getLaunchResponse }) => { + let userApi: EnrichedBspApi; + let bspApi: EnrichedBspApi; + let bspTwoApi: EnrichedBspApi; + let bspThreeApi: EnrichedBspApi; + let fileData: FileMetadata; + let oneBspFileData: FileMetadata; + let rootBeforeDeletion: string; + + before(async () => { + const launchResponse = await getLaunchResponse(); + assert(launchResponse, "BSPNet failed to initialise"); + fileData = launchResponse.fileData; + userApi = await createUserApi(); + bspApi = await createBspApi(); + bspTwoApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspTwoRpcPort}`); + bspThreeApi = await createApi(`ws://127.0.0.1:${launchResponse?.bspThreeRpcPort}`); + }); + + after(async () => { + await bspTwoApi.disconnect(); + await bspThreeApi.disconnect(); + }); + + it("Network launches and can be queried", async () => { + const userNodePeerId = await userApi.rpc.system.localPeerId(); + strictEqual(userNodePeerId.toString(), userApi.shConsts.NODE_INFOS.user.expectedPeerId); + const bspNodePeerId = await bspApi.rpc.system.localPeerId(); + strictEqual(bspNodePeerId.toString(), userApi.shConsts.NODE_INFOS.bsp.expectedPeerId); + }); + + it("Many BSPs are challenged and correctly submit proofs", async () => { + // Calculate the next challenge tick for the BSPs. It should be the same for all BSPs, + // since they all have the same file they were initialised with, and responded to it at + // the same time. + // We first get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + userApi.shConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + userApi.shConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspSubmittedProof + challengePeriod; + // Finally, advance to the next challenge tick. + await userApi.advanceToBlock(nextChallengeTick); + + await userApi.assert.extrinsicPresent({ + module: "proofsDealer", + method: "submitProof", + checkTxPool: true, + assertLength: 3, + timeout: 10000 + }); + + // Seal one more block with the pending extrinsics. + await userApi.sealBlock(); + + // Assert for the the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual(proofAcceptedEvents.length, 3, "There should be three proofs accepted events"); + + // Get the new last tick for which the BSP submitted a proof. + // It should be the previous last tick plus one BSP period. + const lastTickResultAfterProof = + await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + userApi.shConsts.DUMMY_BSP_ID + ); + assert(lastTickResultAfterProof.isOk); + const lastTickBspSubmittedProofAfterProof = lastTickResultAfterProof.asOk.toNumber(); + strictEqual( + lastTickBspSubmittedProofAfterProof, + lastTickBspSubmittedProof + challengePeriod, + "The last tick for which the BSP submitted a proof should be the previous last tick plus one BSP period" + ); + + // Get the new deadline for the BSP. + // It should be the current last tick, plus one BSP period, plus the challenges tick tolerance. + const challengesTickTolerance = Number(userApi.consts.proofsDealer.challengeTicksTolerance); + const newDeadline = + lastTickBspSubmittedProofAfterProof + challengePeriod + challengesTickTolerance; + const newDeadlineResult = await userApi.call.proofsDealerApi.getNextDeadlineTick( + userApi.shConsts.DUMMY_BSP_ID + ); + assert(newDeadlineResult.isOk); + const newDeadlineOnChain = newDeadlineResult.asOk.toNumber(); + strictEqual( + newDeadline, + newDeadlineOnChain, + "The deadline should be the same as the one we just calculated" + ); + }); + + it("BSP fails to submit proof and is marked as slashable", async () => { + // Get BSP-Down's deadline. + const bspDownDeadlineResult = await userApi.call.proofsDealerApi.getNextDeadlineTick( + userApi.shConsts.BSP_DOWN_ID + ); + assert(bspDownDeadlineResult.isOk); + const bspDownDeadline = bspDownDeadlineResult.asOk.toNumber(); + + // Get the last tick for which the BSP-Down submitted a proof before advancing to the deadline. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + userApi.shConsts.BSP_DOWN_ID + ); + assert(lastTickResult.isOk); + const lastTickBspDownSubmittedProof = lastTickResult.asOk.toNumber(); + // Finally, advance to the next challenge tick. + await userApi.advanceToBlock(bspDownDeadline); + + // Expect to see a `SlashableProvider` event in the last block. + const slashableProviderEvent = await userApi.assert.eventPresent( + "proofsDealer", + "SlashableProvider" + ); + const slashableProviderEventDataBlob = + userApi.events.proofsDealer.SlashableProvider.is(slashableProviderEvent.event) && + slashableProviderEvent.event.data; + assert(slashableProviderEventDataBlob, "Event doesn't match Type"); + strictEqual( + slashableProviderEventDataBlob.provider.toString(), + userApi.shConsts.BSP_DOWN_ID, + "The BSP-Down should be slashable" + ); + + // Get the last tick for which the BSP-Down submitted a proof after advancing to the deadline. + const lastTickResultAfterSlashable = + await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + userApi.shConsts.BSP_DOWN_ID + ); + assert(lastTickResultAfterSlashable.isOk); + const lastTickBspDownSubmittedProofAfterSlashable = + lastTickResultAfterSlashable.asOk.toNumber(); + + // The new last tick should be equal to the last tick before BSP-Down was marked as slashable plus one challenge period. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + userApi.shConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + strictEqual( + lastTickBspDownSubmittedProofAfterSlashable, + lastTickBspDownSubmittedProof + challengePeriod, + "The last tick for which the BSP-Down submitted a proof should be the last tick before BSP-Down was marked as slashable plus one challenge period" + ); + }); + + it( + "BSP stops storing last file", + { skip: "Not implemented yet. Needs RPC method to build proofs." }, + async () => { + // TODO: Build inclusion forest proof for file. + // TODO: BSP-Three sends transaction to stop storing the only file it has. + console.log(fileData); + // // Build transaction for BSP-Three to stop storing the only file it has. + // const call = bspThreeApi.sealBlock( + // bspThreeApi.tx.fileSystem.bspStopStoring( + // fileData.fileKey, + // fileData.bucketId, + // fileData.location, + // fileData.owner, + // fileData.fingerprint, + // fileData.fileSize, + // false + // ), + // bspThreeKey + // ); + } + ); + + it( + "BSP can correctly delete a file from its forest and runtime correctly updates its root", + { skip: "Not implemented yet. Needs RPC method to build proofs." }, + async () => { + // TODO: Setup a BSP that has two files which lie under the same NibbledBranch in the forest. + // TODO: Generate the proof to delete one of the files. + /* let inclusionForestProof = bspThreeApi.rpc.storagehubclient.buildForestRoot(fileData.fileKey); */ + // TODO: Request the deletion of the file: + /* const fileDeletionRequestResult = bspThreeApi.sealBlock(bspThreeApi.tx.fileSystem.bspRequestStopStoring( + fileData.fileKey, + fileData.bucketId, + fileData.location, + fileData.owner, + fileData.fingerprint, + fileData.fileSize, + false, + inclusion_forest_proof: ForestProof, + ); */ + // Wait enough blocks for the deletion to be allowed. + /* const currentBlock = await bspThreeApi.rpc.chain.getBlock(); + const currentBlockNumber = currentBlock.block.header.number.toNumber(); + const cooldown = currentBlockNumber + bspThreeApi.consts.fileSystem.minWaitForStopStoring.toNumber(); + await bspThreeApi.advanceToBlock(cooldown); */ + // TODO: Confirm the request of deletion. Make sure the extrinsic doesn't fail and the root is updated correctly. + /* const fileDeletionConfirmResult = bspThreeApi.sealBlock(bspThreeApi.tx.fileSystem.bspConfirmStopStoring( + fileData.fileKey, + inclusionForestProof, + )); + // Check for the confirm stopped storing event. + let confirmStopStoringEvent = bspThreeApi.assert.eventPresent( + "fileSystem", + "BspConfirmStoppedStoring", + fileDeletionConfirmResult.events + ); + // Make sure the new root was updated correctly. + bspThreeApi.rpc.storagehubclient.deleteFile(fileData.fileKey); // Not sure if this is the correct way to do it. + const newRoot = bspThreeApi.rpc.storagehubclient.getForestRoot(); + const newRootInRuntime = confirmStopStoringEvent.event.data.newRoot; + assert(newRoot === newRootInRuntime, "The new root should be updated correctly"); + */ + } + ); + + it("BSP is not challenged any more", { skip: "Not implemented yet." }, async () => { + // TODO: Check that BSP-Three no longer has a challenge deadline. + }); + + it( + "BSP submits proof, transaction gets dropped, BSP-resubmits and succeeds", + { skip: "Dropping transactions is not implemented as testing utility yet." }, + async () => {} + ); + + it("New storage request sent by user, to only one BSP", async () => { + // Pause BSP-Two and BSP-Three. + await userApi.docker.pauseBspContainer("sh-bsp-two"); + await userApi.docker.pauseBspContainer("sh-bsp-three"); + + // Send transaction to create new storage request. + const source = "res/adolphus.jpg"; + const location = "test/adolphus.jpg"; + const bucketName = "nothingmuch-2"; + const fileData = await userApi.file.newStorageRequest(source, location, bucketName); + oneBspFileData = fileData; + }); + + it("Only one BSP confirms it", async () => { + await userApi.wait.bspVolunteer(1); + await userApi.wait.bspStored(1); + }); + + it("BSP correctly responds to challenge with new forest root", async () => { + // Advance to two challenge periods ahead for first BSP. + // This is because in the odd case that we're exactly on the next challenge tick right now, + // there is a race condition chance where the BSP will send the submit proof extrinsic in the + // next block, since the Forest write lock is released as a consequence of the confirm storing + // extrinsic. So we advance two challenge periods ahead to be sure. + + // First we get the last tick for which the BSP submitted a proof. + const lastTickResult = await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof( + ShConsts.DUMMY_BSP_ID + ); + assert(lastTickResult.isOk); + const lastTickBspSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate two challenge ticks ahead. + const nextChallengeTick = lastTickBspSubmittedProof + 2 * challengePeriod; + // Finally, advance two challenge ticks ahead. + await userApi.advanceToBlock(nextChallengeTick); + + // Wait for BSP to submit proof. + await sleep(1000); + + // There should be at least one pending submit proof transaction. + const submitProofsPending = await userApi.assert.extrinsicPresent({ + module: "proofsDealer", + method: "submitProof", + checkTxPool: true + }); + assert(submitProofsPending.length > 0); + + // Seal block and check that the transaction was successful. + await userApi.sealBlock(); + + // Assert for the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual( + proofAcceptedEvents.length, + submitProofsPending.length, + "All pending submit proof transactions should have been successful" + ); + }); + + it("Resume BSPs, and they shouldn't volunteer for the expired storage request", async () => { + // Advance a number of blocks up to when the storage request times out for sure. + const storageRequestTtl = Number(userApi.consts.fileSystem.storageRequestTtl); + const currentBlock = await userApi.rpc.chain.getBlock(); + const currentBlockNumber = currentBlock.block.header.number.toNumber(); + await userApi.advanceToBlock(currentBlockNumber + storageRequestTtl, { + waitForBspProofs: [ShConsts.DUMMY_BSP_ID] + }); + + // Resume BSP-Two and BSP-Three. + await userApi.docker.resumeBspContainer({ containerName: "sh-bsp-two" }); + await userApi.docker.resumeBspContainer({ containerName: "sh-bsp-three" }); + + // Wait for BSPs to resync. + await sleep(1000); + + // There shouldn't be any pending volunteer transactions. + await assert.rejects( + async () => { + await userApi.assert.extrinsicPresent({ + module: "fileSystem", + method: "bspVolunteer", + checkTxPool: true + }); + }, + /No matching extrinsic found for fileSystem\.bspVolunteer/, + "There should be no pending volunteer transactions" + ); + }); + + it("BSP-Two still correctly responds to challenges with same forest root", async () => { + // Advance some blocks to allow the BSP to process the challenges and submit proofs. + for (let i = 0; i < 20; i++) { + await userApi.sealBlock(); + await sleep(500); + } + + // Advance to next challenge tick for BSP-Two. + // First we get the last tick for which the BSP submitted a proof. + const lastTickResult = + await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(BSP_TWO_ID); + assert(lastTickResult.isOk); + const lastTickBspTwoSubmittedProof = lastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const challengePeriodResult = + await userApi.call.proofsDealerApi.getChallengePeriod(BSP_TWO_ID); + assert(challengePeriodResult.isOk); + const challengePeriod = challengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + const nextChallengeTick = lastTickBspTwoSubmittedProof + challengePeriod; + // Finally, advance to the next challenge tick. + await userApi.advanceToBlock(nextChallengeTick); + + // Wait for tasks to execute and for the BSPs to submit proofs. + await sleep(500); + + // There should be at least one pending submit proof transaction. + const submitProofsPending = await userApi.assert.extrinsicPresent({ + module: "proofsDealer", + method: "submitProof", + checkTxPool: true + }); + assert(submitProofsPending.length > 0); + + // Seal block and check that the transaction was successful. + await userApi.sealBlock(); + + // Assert for the event of the proof successfully submitted and verified. + const proofAcceptedEvents = await userApi.assert.eventMany("proofsDealer", "ProofAccepted"); + strictEqual( + proofAcceptedEvents.length, + submitProofsPending.length, + "All pending submit proof transactions should have been successful" + ); + }); + + it( + "Custom challenge is added", + { skip: "Not implemented yet. All BSPs have the same files." }, + async () => { + await it("Custom challenge is included in checkpoint challenge round", async () => { + // TODO: Send transaction for custom challenge with new file key. + // TODO: Advance until next checkpoint challenge block. + // TODO: Check that custom challenge was included in checkpoint challenge round. + }); + + await it("BSP that has it responds to custom challenge with proof of inclusion", async () => { + // TODO: Advance until next challenge for BSP. + // TODO: Build block with proof submission. + // TODO: Check that proof submission was successful, including the custom challenge. + }); + + await it("BSPs who don't have it respond non-inclusion proof", async () => { + // TODO: Advance until next challenge for BSP-Two and BSP-Three. + // TODO: Build block with proof submission. + // TODO: Check that proof submission was successful, with proof of non-inclusion. + }); + } + ); + + it("File is deleted by user", async () => { + // Get the root of the BSP that has the file before deletion. + const bspMetadata = await userApi.query.providers.backupStorageProviders( + ShConsts.DUMMY_BSP_ID + ); + assert(bspMetadata, "BSP metadata should exist"); + assert(bspMetadata.isSome, "BSP metadata should be Some"); + const bspMetadataBlob = bspMetadata.unwrap(); + rootBeforeDeletion = bspMetadataBlob.root.toHex(); + // Make sure it matches the one of the actual merkle forest. + const actualRoot = await bspApi.rpc.storagehubclient.getForestRoot(null); + strictEqual( + rootBeforeDeletion, + actualRoot.toHex(), + "The root of the BSP should match the actual merkle forest root." + ); + + // User sends file deletion request. + await userApi.sealBlock( + userApi.tx.fileSystem.deleteFile( + oneBspFileData.bucketId, + oneBspFileData.fileKey, + oneBspFileData.location, + oneBspFileData.fileSize, + oneBspFileData.fingerprint, + null + ), + shUser + ); + + // Check for a file deletion request event. + await userApi.assert.eventPresent("fileSystem", "FileDeletionRequest"); + + // Advance until the deletion request expires so that it can be processed. + const deletionRequestTtl = Number(userApi.consts.fileSystem.pendingFileDeletionRequestTtl); + const currentBlock = await userApi.rpc.chain.getBlock(); + const currentBlockNumber = currentBlock.block.header.number.toNumber(); + await userApi.advanceToBlock(currentBlockNumber + deletionRequestTtl, { + waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] + }); + + // Check for a file deletion request event. + await userApi.assert.eventPresent("fileSystem", "PriorityChallengeForFileDeletionQueued"); + }); + + it("Priority challenge is included in checkpoint challenge round", async () => { + // Advance to next checkpoint challenge block. + const checkpointChallengePeriod = Number( + userApi.consts.proofsDealer.checkpointChallengePeriod + ); + const lastCheckpointChallengeTick = Number( + await userApi.call.proofsDealerApi.getLastCheckpointChallengeTick() + ); + const nextCheckpointChallengeBlock = lastCheckpointChallengeTick + checkpointChallengePeriod; + await userApi.advanceToBlock(nextCheckpointChallengeBlock, { + waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] + }); + + // Check that the event for the priority challenge is emitted. + const newCheckpointChallengesEvent = await userApi.assert.eventPresent( + "proofsDealer", + "NewCheckpointChallenge" + ); + + // Check that the file key is in the included checkpoint challenges. + const newCheckpointChallengesEventDataBlob = + userApi.events.proofsDealer.NewCheckpointChallenge.is(newCheckpointChallengesEvent.event) && + newCheckpointChallengesEvent.event.data; + assert(newCheckpointChallengesEventDataBlob, "Event doesn't match Type"); + let containsFileKey = false; + for (const checkpointChallenge of newCheckpointChallengesEventDataBlob.challenges) { + if (checkpointChallenge[0].toHuman() === oneBspFileData.fileKey) { + containsFileKey = true; + break; + } + } + assert(containsFileKey, "The file key should be included in the checkpoint challenge."); + }); + + it("BSP that has the file responds with correct proof including the file key, and BSP that doesn't have the file responds with correct proof non-including the file key", async () => { + // Check who has a challenge tick coming up first: the BSP that has the file or BSP-Two who doesn't have it. + // Whoever has the challenge tick first, we check that they submitted a proof successfully first. + const currentTick = (await userApi.call.proofsDealerApi.getCurrentTick()).toNumber(); + + // Calculate next challenge tick for the BSP that has the file. + // We first get the last tick for which the BSP submitted a proof. + const dummyBspLastTickResult = + await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(ShConsts.DUMMY_BSP_ID); + assert(dummyBspLastTickResult.isOk); + const lastTickBspSubmittedProof = dummyBspLastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const dummyBspChallengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.DUMMY_BSP_ID + ); + assert(dummyBspChallengePeriodResult.isOk); + const dummyBspChallengePeriod = dummyBspChallengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + let dummyBspNextChallengeTick = lastTickBspSubmittedProof + dummyBspChallengePeriod; + // If it is exactly equal to the current tick, we take the next challenge tick. + if (dummyBspNextChallengeTick === currentTick) { + dummyBspNextChallengeTick += dummyBspChallengePeriod; + } + + // Calculate next challenge tick for BSP-Two. + // We first get the last tick for which the BSP submitted a proof. + const bspTwoLastTickResult = + await userApi.call.proofsDealerApi.getLastTickProviderSubmittedProof(ShConsts.BSP_TWO_ID); + assert(bspTwoLastTickResult.isOk); + const bspTwoLastTickBspTwoSubmittedProof = bspTwoLastTickResult.asOk.toNumber(); + // Then we get the challenge period for the BSP. + const bspTwoChallengePeriodResult = await userApi.call.proofsDealerApi.getChallengePeriod( + ShConsts.BSP_TWO_ID + ); + assert(bspTwoChallengePeriodResult.isOk); + const bspTwoChallengePeriod = bspTwoChallengePeriodResult.asOk.toNumber(); + // Then we calculate the next challenge tick. + let bspTwoNextChallengeTick = bspTwoLastTickBspTwoSubmittedProof + bspTwoChallengePeriod; + // If it is exactly equal to the current tick, we take the next challenge tick. + if (bspTwoNextChallengeTick === currentTick) { + bspTwoNextChallengeTick += bspTwoChallengePeriod; + } + + const firstBspToRespond = + dummyBspNextChallengeTick < bspTwoNextChallengeTick + ? ShConsts.DUMMY_BSP_ID + : ShConsts.BSP_TWO_ID; + const secondBspToRespond = + dummyBspNextChallengeTick < bspTwoNextChallengeTick + ? ShConsts.BSP_TWO_ID + : ShConsts.DUMMY_BSP_ID; + const firstBlockToAdvance = + dummyBspNextChallengeTick < bspTwoNextChallengeTick + ? dummyBspNextChallengeTick + : bspTwoNextChallengeTick; + const secondBlockToAdvance = + dummyBspNextChallengeTick < bspTwoNextChallengeTick + ? bspTwoNextChallengeTick + : dummyBspNextChallengeTick; + + // Advance to first next challenge block. + await userApi.advanceToBlock(firstBlockToAdvance, { + waitForBspProofs: [DUMMY_BSP_ID, BSP_TWO_ID, BSP_THREE_ID] + }); + + // Wait for BSP to generate the proof and advance one more block. + await sleep(500); + await userApi.sealBlock(); + + // Check for a ProofAccepted event. + const firstChallengeBlockEvents = await userApi.assert.eventMany( + "proofsDealer", + "ProofAccepted" + ); + + // Check that at least one of the `ProofAccepted` events belongs to `firstBspToRespond`. + const atLeastOneEventBelongsToFirstBsp = firstChallengeBlockEvents.some((eventRecord) => { + const firstChallengeBlockEventDataBlob = + userApi.events.proofsDealer.ProofAccepted.is(eventRecord.event) && eventRecord.event.data; + assert(firstChallengeBlockEventDataBlob, "Event doesn't match Type"); + + return firstChallengeBlockEventDataBlob.provider.toString() === firstBspToRespond; + }); + assert(atLeastOneEventBelongsToFirstBsp, "No ProofAccepted event belongs to the first BSP"); + + // If the first BSP is the one removing the file, assert for the event of the mutations successfully applied in the runtime. + if (firstBspToRespond === ShConsts.DUMMY_BSP_ID) { + const mutationsAppliedEvents = await userApi.assert.eventMany( + "proofsDealer", + "MutationsApplied" + ); + strictEqual( + mutationsAppliedEvents.length, + 1, + "There should be one mutations applied event" + ); + + // Check that the mutations applied event belongs to the dummy BSP. + const mutationsAppliedEventDataBlob = + userApi.events.proofsDealer.MutationsApplied.is(mutationsAppliedEvents[0].event) && + mutationsAppliedEvents[0].event.data; + assert(mutationsAppliedEventDataBlob, "Event doesn't match Type"); + strictEqual( + mutationsAppliedEventDataBlob.provider.toString(), + ShConsts.DUMMY_BSP_ID, + "The mutations applied event should belong to the dummy BSP" + ); + } + + // Advance to second next challenge block. + await userApi.advanceToBlock(secondBlockToAdvance, { + waitForBspProofs: [ShConsts.DUMMY_BSP_ID, ShConsts.BSP_TWO_ID, ShConsts.BSP_THREE_ID] + }); + + // Wait for BSP to generate the proof and advance one more block. + await sleep(500); + const secondChallengeBlockResult = await userApi.sealBlock(); + + // Check for a ProofAccepted event. + const secondChallengeBlockEvents = await userApi.assert.eventMany( + "proofsDealer", + "ProofAccepted", + secondChallengeBlockResult.events + ); + + // Check that at least one of the `ProofAccepted` events belongs to `secondBspToRespond`. + const atLeastOneEventBelongsToSecondBsp = secondChallengeBlockEvents.some((eventRecord) => { + const secondChallengeBlockEventDataBlob = + userApi.events.proofsDealer.ProofAccepted.is(eventRecord.event) && eventRecord.event.data; + assert(secondChallengeBlockEventDataBlob, "Event doesn't match Type"); + + return secondChallengeBlockEventDataBlob.provider.toString() === secondBspToRespond; + }); + assert(atLeastOneEventBelongsToSecondBsp, "No ProofAccepted event belongs to the second BSP"); + + // If the second BSP is the one removing the file, assert for the event of the mutations successfully applied in the runtime. + if (secondBspToRespond === ShConsts.DUMMY_BSP_ID) { + const mutationsAppliedEvents = await userApi.assert.eventMany( + "proofsDealer", + "MutationsApplied" + ); + strictEqual( + mutationsAppliedEvents.length, + 1, + "There should be one mutations applied event" + ); + + // Check that the mutations applied event belongs to the dummy BSP. + const mutationsAppliedEventDataBlob = + userApi.events.proofsDealer.MutationsApplied.is(mutationsAppliedEvents[0].event) && + mutationsAppliedEvents[0].event.data; + assert(mutationsAppliedEventDataBlob, "Event doesn't match Type"); + strictEqual( + mutationsAppliedEventDataBlob.provider.toString(), + ShConsts.DUMMY_BSP_ID, + "The mutations applied event should belong to the dummy BSP" + ); + } + }); + + it("File is removed from Forest by BSP", async () => { + // Make sure the root was updated in the runtime + const bspMetadataAfterDeletion = await userApi.query.providers.backupStorageProviders( + ShConsts.DUMMY_BSP_ID + ); + assert(bspMetadataAfterDeletion, "BSP metadata should exist"); + assert(bspMetadataAfterDeletion.isSome, "BSP metadata should be Some"); + const bspMetadataAfterDeletionBlob = bspMetadataAfterDeletion.unwrap(); + assert( + bspMetadataAfterDeletionBlob.root.toHex() !== rootBeforeDeletion, + "The root should have been updated on chain" + ); + + // Check that the runtime root matches the forest root of the BSP. + const forestRoot = await bspApi.rpc.storagehubclient.getForestRoot(null); + strictEqual( + bspMetadataAfterDeletionBlob.root.toString(), + forestRoot.toString(), + "The runtime root should match the forest root of the BSP" + ); + }); + + it( + "File mutation is finalised and BSP removes it from File Storage", + { skip: "Not implemented yet." }, + async () => { + // TODO: Finalise block with mutations. + // TODO: Check that file is removed from File Storage. Need a RPC method for this. + } + ); + } +); diff --git a/test/util/bspNet/consts.ts b/test/util/bspNet/consts.ts index b424fb528..1a5494633 100644 --- a/test/util/bspNet/consts.ts +++ b/test/util/bspNet/consts.ts @@ -1,100 +1,100 @@ -export const NODE_INFOS = { - user: { - containerName: "docker-sh-user-1", - port: 9888, - p2pPort: 30444, - AddressId: "5CombC1j5ZmdNMEpWYpeEWcKPPYcKsC1WgMPgzGLU72SLa4o", - expectedPeerId: "12D3KooWMvbhtYjbhgjoDzbnf71SFznJAKBBkSGYEUtnpES1y9tM" - }, - bsp: { - containerName: "docker-sh-bsp-1", - port: 9666, - p2pPort: 30350, - AddressId: "5FHSHEFWHVGDnyiw66DoRUpLyh5RouWkXo9GT1Sjk8qw7MAg", - expectedPeerId: "12D3KooWNEZ8PGNydcdXTYy1SPHvkP9mbxdtTqGGFVrhorDzeTfU" - }, - msp: { - containerName: "docker-sh-msp-1", - port: 9777, - p2pPort: 30555, - AddressId: "5E1rPv1M2mheg6pM57QqU7TZ6eCwbVpiYfyYkrugpBdEzDiU", - nodeKey: "0x12b3b1c917dda506f152816aad4685eefa54fe57792165b31141ac893610b314", - expectedPeerId: "12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV" - }, - collator: { - containerName: "docker-sh-collator-1", - port: 9955, - p2pPort: 30333, - AddressId: "5C8NC6YuAivp3knYC58Taycx2scQoDcDd3MCEEgyw36Gh1R4" - }, - toxiproxy: { - containerName: "toxiproxy", - port: 8474 - } -} as const; - -export const TEST_ARTEFACTS = { - "res/adolphus.jpg": { - size: 416400n, - checksum: "739fb97f7c2b8e7f192b608722a60dc67ee0797c85ff1ea849c41333a40194f2", - fingerprint: "0x34eb5f637e05fc18f857ccb013250076534192189894d174ee3aa6d3525f6970" - }, - "res/smile.jpg": { - size: 633160n, - checksum: "12094d47c2fdf1a984c0b950c2c0ede733722bea3bee22fef312e017383b410c", - fingerprint: "0x535dd863026735ffe0919cc0fc3d8e5da45b9203f01fbf014dbe98005bd8d2fe" - }, - "res/whatsup.jpg": { - size: 216211n, - checksum: "585ed00a96349499cbc8a3882b0bd6f6aec5ce3b7dbee2d8b3d33f3c09a38ec6", - fingerprint: "0x2b83b972e63f52abc0d4146c4aee1f1ec8aa8e274d2ad1b626529446da93736c" - }, - "res/cloud.jpg": { - size: 346248n, - checksum: "8e06811883fc3e5e6a0331825b365e4bd7b83ba7683fa9da17e4daea25d7a9f5", - fingerprint: "0x5559299bc73782b5ad7e9dd57ba01bb06b8c44f5cab8d7afab5e1db2ea93da4c" - }, - "res/empty-file": { - size: 0n, - checksum: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - fingerprint: "0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" - }, - "res/half-chunk-file": { - size: 512n, - checksum: "c7b3b7dd37d7e0947b04550613692950c72b0551e038a01ab8679a3ea5631104", - fingerprint: "0xade3ca4ff2151a2533e816eb9402ae17e21160c6c52b1855ecff29faea8880b5" - }, - "res/one-chunk-file": { - size: 1024n, - checksum: "1f006b6a97eeb0dfd8cbc91ed815e6a429dcfdc2f3f32f2ac3e7977e70df4988", - fingerprint: "0x0904317e4977ad6f872cd9672d2733da9a628fda86ee9add68623a66918cbd8c" - } -} as const; - -export const DUMMY_MSP_ID = "0x0000000000000000000000000000000000000000000000000000000000000300"; -export const VALUE_PROP = "0x0000000000000000000000000000000000000000000000000000000000000770"; - -export const DUMMY_BSP_ID = TEST_ARTEFACTS["res/whatsup.jpg"].fingerprint; -export const BSP_TWO_ID = "0x0000000000000000000000000000000000000000000000000000000000000002"; -export const BSP_THREE_ID = "0x0000000000000000000000000000000000000000000000000000000000000003"; -export const BSP_DOWN_ID = "0xf000000000000000000000000000000000000000000000000000000000000000"; - -export const CAPACITY_5 = 1024n * 1024n * 5n; // 5 MB -export const CAPACITY_256 = 1024n * 1024n * 256n; // 256 MB -export const CAPACITY_512 = 1024n * 1024n * 512n; // 512 MB -export const CAPACITY_1024 = 1024n * 1024n * 1024n; // 1024 MB - -export const CAPACITY = { - 5: CAPACITY_5, - 256: CAPACITY_256, - 512: CAPACITY_512, - 1024: CAPACITY_1024 -} as const; - -export const U32_MAX = (BigInt(1) << BigInt(32)) - BigInt(1); -export const MAX_STORAGE_CAPACITY = CAPACITY[1024] * 4n - 1n; - -export const REMARK_WEIGHT_REF_TIME = 127_121_340; -export const REMARK_WEIGHT_PROOF_SIZE = 142; -export const TRANSFER_WEIGHT_REF_TIME = 297_297_000; -export const TRANSFER_WEIGHT_PROOF_SIZE = 308; +export const NODE_INFOS = { + user: { + containerName: "docker-sh-user-1", + port: 9888, + p2pPort: 30444, + AddressId: "5CombC1j5ZmdNMEpWYpeEWcKPPYcKsC1WgMPgzGLU72SLa4o", + expectedPeerId: "12D3KooWMvbhtYjbhgjoDzbnf71SFznJAKBBkSGYEUtnpES1y9tM" + }, + bsp: { + containerName: "docker-sh-bsp-1", + port: 9666, + p2pPort: 30350, + AddressId: "5FHSHEFWHVGDnyiw66DoRUpLyh5RouWkXo9GT1Sjk8qw7MAg", + expectedPeerId: "12D3KooWNEZ8PGNydcdXTYy1SPHvkP9mbxdtTqGGFVrhorDzeTfU" + }, + msp: { + containerName: "docker-sh-msp-1", + port: 9777, + p2pPort: 30555, + AddressId: "5E1rPv1M2mheg6pM57QqU7TZ6eCwbVpiYfyYkrugpBdEzDiU", + nodeKey: "0x12b3b1c917dda506f152816aad4685eefa54fe57792165b31141ac893610b314", + expectedPeerId: "12D3KooWSUvz8QM5X4tfAaSLErAZjR2puojo16pULBHyqTMGKtNV" + }, + collator: { + containerName: "docker-sh-collator-1", + port: 9955, + p2pPort: 30333, + AddressId: "5C8NC6YuAivp3knYC58Taycx2scQoDcDd3MCEEgyw36Gh1R4" + }, + toxiproxy: { + containerName: "toxiproxy", + port: 8474 + } +} as const; + +export const TEST_ARTEFACTS = { + "res/adolphus.jpg": { + size: 416400n, + checksum: "739fb97f7c2b8e7f192b608722a60dc67ee0797c85ff1ea849c41333a40194f2", + fingerprint: "0x34eb5f637e05fc18f857ccb013250076534192189894d174ee3aa6d3525f6970" + }, + "res/smile.jpg": { + size: 633160n, + checksum: "12094d47c2fdf1a984c0b950c2c0ede733722bea3bee22fef312e017383b410c", + fingerprint: "0x535dd863026735ffe0919cc0fc3d8e5da45b9203f01fbf014dbe98005bd8d2fe" + }, + "res/whatsup.jpg": { + size: 216211n, + checksum: "585ed00a96349499cbc8a3882b0bd6f6aec5ce3b7dbee2d8b3d33f3c09a38ec6", + fingerprint: "0x2b83b972e63f52abc0d4146c4aee1f1ec8aa8e274d2ad1b626529446da93736c" + }, + "res/cloud.jpg": { + size: 346248n, + checksum: "8e06811883fc3e5e6a0331825b365e4bd7b83ba7683fa9da17e4daea25d7a9f5", + fingerprint: "0x5559299bc73782b5ad7e9dd57ba01bb06b8c44f5cab8d7afab5e1db2ea93da4c" + }, + "res/empty-file": { + size: 0n, + checksum: "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + fingerprint: "0x03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + }, + "res/half-chunk-file": { + size: 512n, + checksum: "c7b3b7dd37d7e0947b04550613692950c72b0551e038a01ab8679a3ea5631104", + fingerprint: "0xade3ca4ff2151a2533e816eb9402ae17e21160c6c52b1855ecff29faea8880b5" + }, + "res/one-chunk-file": { + size: 1024n, + checksum: "1f006b6a97eeb0dfd8cbc91ed815e6a429dcfdc2f3f32f2ac3e7977e70df4988", + fingerprint: "0x0904317e4977ad6f872cd9672d2733da9a628fda86ee9add68623a66918cbd8c" + } +} as const; + +export const DUMMY_MSP_ID = "0x0000000000000000000000000000000000000000000000000000000000000300"; +export const VALUE_PROP = "0x0000000000000000000000000000000000000000000000000000000000000770"; + +export const DUMMY_BSP_ID = TEST_ARTEFACTS["res/whatsup.jpg"].fingerprint; +export const BSP_TWO_ID = "0x0000000000000000000000000000000000000000000000000000000000000002"; +export const BSP_THREE_ID = "0x0000000000000000000000000000000000000000000000000000000000000003"; +export const BSP_DOWN_ID = "0xf000000000000000000000000000000000000000000000000000000000000000"; + +export const CAPACITY_5 = 1024n * 1024n * 5n; // 5 MB +export const CAPACITY_256 = 1024n * 1024n * 256n; // 256 MB +export const CAPACITY_512 = 1024n * 1024n * 512n; // 512 MB +export const CAPACITY_1024 = 1024n * 1024n * 1024n; // 1024 MB + +export const CAPACITY = { + 5: CAPACITY_5, + 256: CAPACITY_256, + 512: CAPACITY_512, + 1024: CAPACITY_1024 +} as const; + +export const U32_MAX = (BigInt(1) << BigInt(32)) - BigInt(1); +export const MAX_STORAGE_CAPACITY = CAPACITY[1024] * 4n - 1n; + +export const REMARK_WEIGHT_REF_TIME = 127_121_340; +export const REMARK_WEIGHT_PROOF_SIZE = 142; +export const TRANSFER_WEIGHT_REF_TIME = 297_297_000; +export const TRANSFER_WEIGHT_PROOF_SIZE = 308; diff --git a/test/util/bspNet/docker.ts b/test/util/bspNet/docker.ts index 226021a37..1dfd99db6 100644 --- a/test/util/bspNet/docker.ts +++ b/test/util/bspNet/docker.ts @@ -1,285 +1,285 @@ -import Docker from "dockerode"; -import { execSync } from "node:child_process"; -import path from "node:path"; -import { DOCKER_IMAGE } from "../constants"; -import { sendCustomRpc } from "../rpc"; -import * as NodeBspNet from "./node"; -import { BspNetTestApi } from "./test-api"; -import invariant from "tiny-invariant"; -import { PassThrough, type Readable } from "node:stream"; - -export const checkBspForFile = async (filePath: string) => { - const containerId = "docker-sh-bsp-1"; - const loc = path.join("/storage", filePath); - - for (let i = 0; i < 10; i++) { - try { - // TODO: Replace with dockerode - execSync(`docker exec ${containerId} ls ${loc}`, { stdio: "ignore" }); - return; - } catch { - await new Promise((resolve) => setTimeout(resolve, 1000)); - } - } - throw `File not found: ${loc} in ${containerId}`; -}; - -export const checkFileChecksum = async (filePath: string) => { - const containerId = "docker-sh-bsp-1"; - const loc = path.join("/storage", filePath); - const output = execSync(`docker exec ${containerId} sha256sum ${loc}`); - return output.toString().split(" ")[0]; -}; - -export const showContainers = () => { - try { - // TODO: Replace with dockerode - execSync("docker ps -a", { stdio: "inherit" }); - } catch (e) { - console.log(e); - console.log("Error displaying docker containers"); - } -}; - -export const addBspContainer = async (options?: { - name?: string; - connectToPeer?: boolean; // unused - additionalArgs?: string[]; -}) => { - const docker = new Docker(); - const existingBsps = ( - await docker.listContainers({ - filters: { ancestor: [DOCKER_IMAGE] } - }) - ) - .flatMap(({ Command }) => Command) - .filter((cmd) => cmd.includes("--provider-type=bsp")); - - const bspNum = existingBsps.length; - - invariant(bspNum > 0, "No existing BSP containers"); - - const p2pPort = 30350 + bspNum; - const rpcPort = 9888 + bspNum * 7; - const containerName = options?.name || `docker-sh-bsp-${bspNum + 1}`; - // get bootnode from docker args - - const { Args } = await docker.getContainer("docker-sh-user-1").inspect(); - - const bootNodeArg = Args.find((arg) => arg.includes("--bootnodes=")); - - invariant(bootNodeArg, "No bootnode found in docker args"); - - let keystorePath: string; - const keystoreArg = Args.find((arg) => arg.includes("--keystore-path=")); - if (keystoreArg) { - keystorePath = keystoreArg.split("=")[1]; - } else { - keystorePath = "/keystore"; - } - - const container = await docker.createContainer({ - Image: DOCKER_IMAGE, - name: containerName, - platform: "linux/amd64", - NetworkingConfig: { - EndpointsConfig: { - docker_default: {} - } - }, - HostConfig: { - PortBindings: { - "9944/tcp": [{ HostPort: rpcPort.toString() }], - [`${p2pPort}/tcp`]: [{ HostPort: p2pPort.toString() }] - }, - Binds: [`${process.cwd()}/../docker/dev-keystores:${keystorePath}:rw`] - }, - Cmd: [ - "--dev", - "--sealing=manual", - "--provider", - "--provider-type=bsp", - `--name=${containerName}`, - "--no-hardware-benchmarks", - "--unsafe-rpc-external", - "--rpc-methods=unsafe", - "--rpc-cors=all", - `--port=${p2pPort}`, - "--base-path=/data", - bootNodeArg, - ...(options?.additionalArgs || []) - ] - }); - await container.start(); - - let peerId: string | undefined; - for (let i = 0; i < 20; i++) { - try { - peerId = await sendCustomRpc(`http://127.0.0.1:${rpcPort}`, "system_localPeerId"); - break; - } catch { - await new Promise((resolve) => setTimeout(resolve, 500)); - } - } - - invariant(peerId, "Failed to connect after 10s. Exiting..."); - - const api = await BspNetTestApi.create(`ws://127.0.0.1:${rpcPort}`); - - const chainName = api.consts.system.version.specName.toString(); - - invariant( - chainName === "storage-hub-runtime", - `Error connecting to BSP via api ${containerName}` - ); - - await api.disconnect(); - - console.log( - `▶️ BSP container started with name: ${containerName}, rpc port: ${rpcPort}, p2p port: ${p2pPort}, peerId: ${peerId}` - ); - - return { containerName, rpcPort, p2pPort, peerId }; -}; - -// Make this a rusty style OO function with api contexts -export const pauseBspContainer = async (containerName: string) => { - const docker = new Docker(); - const container = docker.getContainer(containerName); - await container.pause(); -}; - -export const stopBspContainer = async (containerName: string) => { - const docker = new Docker(); - const containersToStop = await docker.listContainers({ - filters: { name: [containerName] } - }); - - await docker.getContainer(containersToStop[0].Id).stop(); - await docker.getContainer(containersToStop[0].Id).remove({ force: true }); -}; - -export const startBspContainer = async (options: { - containerName: string; -}) => { - const docker = new Docker(); - const container = docker.getContainer(options.containerName); - await container.start(); -}; - -export const restartBspContainer = async (options: { - containerName: string; -}) => { - const docker = new Docker(); - const container = docker.getContainer(options.containerName); - await container.restart(); -}; - -export const resumeBspContainer = async (options: { - containerName: string; -}) => { - const docker = new Docker(); - const container = docker.getContainer(options.containerName); - await container.unpause(); -}; - -export const dropAllTransactionsGlobally = async () => { - const docker = new Docker(); - - const containersToStop = await docker.listContainers({ - filters: { ancestor: ["storage-hub:local"] } - }); - - for (const container of containersToStop) { - const publicPort = container.Ports.filter( - ({ IP, PrivatePort }) => IP === "0.0.0.0" && PrivatePort === 9944 - )[0].PublicPort; - const endpoint: `ws://${string}` = `ws://127.0.0.1:${publicPort}`; - await using api = await BspNetTestApi.connect(endpoint); - try { - await NodeBspNet.dropTransaction(api); - } catch { - console.log(`Error dropping txn from ${container.Id}, continuing...`); - } - } -}; - -export const dropTransactionGlobally = async (options: { module: string; method: string }) => { - const docker = new Docker(); - - const containersToStop = await docker.listContainers({ - filters: { ancestor: ["storage-hub:local"] } - }); - - for (const container of containersToStop) { - const publicPort = container.Ports.filter( - ({ IP, PrivatePort }) => IP === "0.0.0.0" && PrivatePort === 9944 - )[0].PublicPort; - const endpoint: `ws://${string}` = `ws://127.0.0.1:${publicPort}`; - await using api = await BspNetTestApi.connect(endpoint); - await NodeBspNet.dropTransaction(api, { module: options.module, method: options.method }); - } -}; - -export const waitForLog = async (options: { - searchString: string; - containerName: string; - timeout?: number; -}): Promise => { - return new Promise((resolve, reject) => { - const docker = new Docker(); - const container = docker.getContainer(options.containerName); - - container.logs( - { follow: true, stdout: true, stderr: true, tail: 0, timestamps: false }, - (err, stream) => { - if (err) { - return reject(err); - } - - if (stream === undefined) { - return reject(new Error("No stream returned.")); - } - - const stdout = new PassThrough(); - const stderr = new PassThrough(); - - docker.modem.demuxStream(stream, stdout, stderr); - - let timeoutHandle: NodeJS.Timeout | undefined; - - const cleanup = () => { - (stream as Readable).destroy(); - stdout.destroy(); - stderr.destroy(); - if (timeoutHandle) { - clearTimeout(timeoutHandle); - } - }; - - const onData = (chunk: Buffer) => { - const log = chunk.toString("utf8"); - if (log.includes(options.searchString)) { - cleanup(); - resolve(log); - } - }; - - stdout.on("data", onData); - stderr.on("data", onData); - - stream.on("error", (err) => { - cleanup(); - reject(err); - }); - - if (options.timeout) { - timeoutHandle = setTimeout(() => { - cleanup(); - reject(new Error(`Timeout of ${options.timeout}ms exceeded while waiting for log.`)); - }, options.timeout); - } - } - ); - }); -}; +import Docker from "dockerode"; +import { execSync } from "node:child_process"; +import path from "node:path"; +import { DOCKER_IMAGE } from "../constants"; +import { sendCustomRpc } from "../rpc"; +import * as NodeBspNet from "./node"; +import { BspNetTestApi } from "./test-api"; +import invariant from "tiny-invariant"; +import { PassThrough, type Readable } from "node:stream"; + +export const checkBspForFile = async (filePath: string) => { + const containerId = "docker-sh-bsp-1"; + const loc = path.join("/storage", filePath); + + for (let i = 0; i < 10; i++) { + try { + // TODO: Replace with dockerode + execSync(`docker exec ${containerId} ls ${loc}`, { stdio: "ignore" }); + return; + } catch { + await new Promise((resolve) => setTimeout(resolve, 1000)); + } + } + throw `File not found: ${loc} in ${containerId}`; +}; + +export const checkFileChecksum = async (filePath: string) => { + const containerId = "docker-sh-bsp-1"; + const loc = path.join("/storage", filePath); + const output = execSync(`docker exec ${containerId} sha256sum ${loc}`); + return output.toString().split(" ")[0]; +}; + +export const showContainers = () => { + try { + // TODO: Replace with dockerode + execSync("docker ps -a", { stdio: "inherit" }); + } catch (e) { + console.log(e); + console.log("Error displaying docker containers"); + } +}; + +export const addBspContainer = async (options?: { + name?: string; + connectToPeer?: boolean; // unused + additionalArgs?: string[]; +}) => { + const docker = new Docker(); + const existingBsps = ( + await docker.listContainers({ + filters: { ancestor: [DOCKER_IMAGE] } + }) + ) + .flatMap(({ Command }) => Command) + .filter((cmd) => cmd.includes("--provider-type=bsp")); + + const bspNum = existingBsps.length; + + invariant(bspNum > 0, "No existing BSP containers"); + + const p2pPort = 30350 + bspNum; + const rpcPort = 9888 + bspNum * 7; + const containerName = options?.name || `docker-sh-bsp-${bspNum + 1}`; + // get bootnode from docker args + + const { Args } = await docker.getContainer("docker-sh-user-1").inspect(); + + const bootNodeArg = Args.find((arg) => arg.includes("--bootnodes=")); + + invariant(bootNodeArg, "No bootnode found in docker args"); + + let keystorePath: string; + const keystoreArg = Args.find((arg) => arg.includes("--keystore-path=")); + if (keystoreArg) { + keystorePath = keystoreArg.split("=")[1]; + } else { + keystorePath = "/keystore"; + } + + const container = await docker.createContainer({ + Image: DOCKER_IMAGE, + name: containerName, + platform: "linux/amd64", + NetworkingConfig: { + EndpointsConfig: { + docker_default: {} + } + }, + HostConfig: { + PortBindings: { + "9944/tcp": [{ HostPort: rpcPort.toString() }], + [`${p2pPort}/tcp`]: [{ HostPort: p2pPort.toString() }] + }, + Binds: [`${process.cwd()}/../docker/dev-keystores:${keystorePath}:rw`] + }, + Cmd: [ + "--dev", + "--sealing=manual", + "--provider", + "--provider-type=bsp", + `--name=${containerName}`, + "--no-hardware-benchmarks", + "--unsafe-rpc-external", + "--rpc-methods=unsafe", + "--rpc-cors=all", + `--port=${p2pPort}`, + "--base-path=/data", + bootNodeArg, + ...(options?.additionalArgs || []) + ] + }); + await container.start(); + + let peerId: string | undefined; + for (let i = 0; i < 20; i++) { + try { + peerId = await sendCustomRpc(`http://127.0.0.1:${rpcPort}`, "system_localPeerId"); + break; + } catch { + await new Promise((resolve) => setTimeout(resolve, 500)); + } + } + + invariant(peerId, "Failed to connect after 10s. Exiting..."); + + const api = await BspNetTestApi.create(`ws://127.0.0.1:${rpcPort}`); + + const chainName = api.consts.system.version.specName.toString(); + + invariant( + chainName === "storage-hub-runtime", + `Error connecting to BSP via api ${containerName}` + ); + + await api.disconnect(); + + console.log( + `▶️ BSP container started with name: ${containerName}, rpc port: ${rpcPort}, p2p port: ${p2pPort}, peerId: ${peerId}` + ); + + return { containerName, rpcPort, p2pPort, peerId }; +}; + +// Make this a rusty style OO function with api contexts +export const pauseBspContainer = async (containerName: string) => { + const docker = new Docker(); + const container = docker.getContainer(containerName); + await container.pause(); +}; + +export const stopBspContainer = async (containerName: string) => { + const docker = new Docker(); + const containersToStop = await docker.listContainers({ + filters: { name: [containerName] } + }); + + await docker.getContainer(containersToStop[0].Id).stop(); + await docker.getContainer(containersToStop[0].Id).remove({ force: true }); +}; + +export const startBspContainer = async (options: { + containerName: string; +}) => { + const docker = new Docker(); + const container = docker.getContainer(options.containerName); + await container.start(); +}; + +export const restartBspContainer = async (options: { + containerName: string; +}) => { + const docker = new Docker(); + const container = docker.getContainer(options.containerName); + await container.restart(); +}; + +export const resumeBspContainer = async (options: { + containerName: string; +}) => { + const docker = new Docker(); + const container = docker.getContainer(options.containerName); + await container.unpause(); +}; + +export const dropAllTransactionsGlobally = async () => { + const docker = new Docker(); + + const containersToStop = await docker.listContainers({ + filters: { ancestor: ["storage-hub:local"] } + }); + + for (const container of containersToStop) { + const publicPort = container.Ports.filter( + ({ IP, PrivatePort }) => IP === "0.0.0.0" && PrivatePort === 9944 + )[0].PublicPort; + const endpoint: `ws://${string}` = `ws://127.0.0.1:${publicPort}`; + await using api = await BspNetTestApi.connect(endpoint); + try { + await NodeBspNet.dropTransaction(api); + } catch { + console.log(`Error dropping txn from ${container.Id}, continuing...`); + } + } +}; + +export const dropTransactionGlobally = async (options: { module: string; method: string }) => { + const docker = new Docker(); + + const containersToStop = await docker.listContainers({ + filters: { ancestor: ["storage-hub:local"] } + }); + + for (const container of containersToStop) { + const publicPort = container.Ports.filter( + ({ IP, PrivatePort }) => IP === "0.0.0.0" && PrivatePort === 9944 + )[0].PublicPort; + const endpoint: `ws://${string}` = `ws://127.0.0.1:${publicPort}`; + await using api = await BspNetTestApi.connect(endpoint); + await NodeBspNet.dropTransaction(api, { module: options.module, method: options.method }); + } +}; + +export const waitForLog = async (options: { + searchString: string; + containerName: string; + timeout?: number; +}): Promise => { + return new Promise((resolve, reject) => { + const docker = new Docker(); + const container = docker.getContainer(options.containerName); + + container.logs( + { follow: true, stdout: true, stderr: true, tail: 0, timestamps: false }, + (err, stream) => { + if (err) { + return reject(err); + } + + if (stream === undefined) { + return reject(new Error("No stream returned.")); + } + + const stdout = new PassThrough(); + const stderr = new PassThrough(); + + docker.modem.demuxStream(stream, stdout, stderr); + + let timeoutHandle: NodeJS.Timeout | undefined; + + const cleanup = () => { + (stream as Readable).destroy(); + stdout.destroy(); + stderr.destroy(); + if (timeoutHandle) { + clearTimeout(timeoutHandle); + } + }; + + const onData = (chunk: Buffer) => { + const log = chunk.toString("utf8"); + if (log.includes(options.searchString)) { + cleanup(); + resolve(log); + } + }; + + stdout.on("data", onData); + stderr.on("data", onData); + + stream.on("error", (err) => { + cleanup(); + reject(err); + }); + + if (options.timeout) { + timeoutHandle = setTimeout(() => { + cleanup(); + reject(new Error(`Timeout of ${options.timeout}ms exceeded while waiting for log.`)); + }, options.timeout); + } + } + ); + }); +}; diff --git a/test/util/bspNet/helpers.ts b/test/util/bspNet/helpers.ts index 8109a8bb0..cd3410abd 100644 --- a/test/util/bspNet/helpers.ts +++ b/test/util/bspNet/helpers.ts @@ -1,527 +1,527 @@ -import type { KeyringPair } from "@polkadot/keyring/types"; -import "@storagehub/api-augment"; -import { v2 as compose } from "docker-compose"; -import Docker from "dockerode"; -import * as child_process from "node:child_process"; -import { execSync } from "node:child_process"; -import crypto from "node:crypto"; -import path from "node:path"; -import * as util from "node:util"; -import { DOCKER_IMAGE, MILLIUNIT, UNIT } from "../constants.ts"; -import { - alice, - bspDownKey, - bspDownSeed, - bspKey, - bspThreeKey, - bspThreeSeed, - bspTwoKey, - bspTwoSeed, - shUser -} from "../pjsKeyring"; -import { addBspContainer, showContainers } from "./docker"; -import type { BspNetConfig, InitialisedMultiBspNetwork } from "./types"; -import { CAPACITY, MAX_STORAGE_CAPACITY } from "./consts"; -import * as ShConsts from "./consts.ts"; -import { BspNetTestApi, type EnrichedBspApi } from "./test-api.ts"; -import invariant from "tiny-invariant"; -import * as fs from "node:fs"; -import { parse, stringify } from "yaml"; -import { sealBlock } from "./block.ts"; -import type { ApiPromise } from "@polkadot/api"; - -const exec = util.promisify(child_process.exec); - -export const getContainerIp = async (containerName: string, verbose = false): Promise => { - const maxRetries = 60; - const sleepTime = 500; - - for (let i = 0; i < maxRetries; i++) { - verbose && console.log(`Waiting for ${containerName} to launch...`); - - // TODO: Replace with dockerode command - try { - const { stdout } = await exec( - `docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${containerName}` - ); - return stdout.trim(); - } catch { - await new Promise((resolve) => setTimeout(resolve, sleepTime)); - } - } - // TODO: Replace with dockerode - execSync("docker ps -a", { stdio: "inherit" }); - try { - execSync("docker logs docker-sh-bsp-1", { stdio: "inherit" }); - execSync("docker logs docker-sh-user-1", { stdio: "inherit" }); - } catch (e) { - console.log(e); - } - console.log( - `Error fetching container IP for ${containerName} after ${ - (maxRetries * sleepTime) / 1000 - } seconds` - ); - showContainers(); - throw "Error fetching container IP"; -}; - -export const checkNodeAlive = async (url: string, verbose = false) => getContainerIp(url, verbose); -export const getContainerPeerId = async (url: string, verbose = false) => { - const maxRetries = 60; - const sleepTime = 500; - - const payload = { - id: "1", - jsonrpc: "2.0", - method: "system_localPeerId", - params: [] - }; - - for (let i = 0; i < maxRetries; i++) { - verbose && console.log(`Waiting for node at ${url} to launch...`); - - try { - const response = await fetch(url, { - method: "POST", - headers: { - "Content-Type": "application/json" - }, - body: JSON.stringify(payload) - }); - - invariant(response.ok, `HTTP error! status: ${response.status}`); - - const resp = (await response.json()) as any; - return resp.result as string; - } catch { - await new Promise((resolve) => setTimeout(resolve, sleepTime)); - } - } - - console.log(`Error fetching peerId from ${url} after ${(maxRetries * sleepTime) / 1000} seconds`); - showContainers(); - throw `Error fetching peerId from ${url}`; -}; - -export const runSimpleBspNet = async (bspNetConfig: BspNetConfig, verbose = false) => { - let userApi: EnrichedBspApi | undefined; - try { - console.log(`SH user id: ${shUser.address}`); - console.log(`SH BSP id: ${bspKey.address}`); - let file = "local-dev-bsp-compose.yml"; - if (bspNetConfig.rocksdb) { - file = "local-dev-bsp-rocksdb-compose.yml"; - } - if (bspNetConfig.noisy) { - file = "noisy-bsp-compose.yml"; - } - const composeFilePath = path.resolve(process.cwd(), "..", "docker", file); - const cwd = path.resolve(process.cwd(), "..", "docker"); - const composeFile = fs.readFileSync(composeFilePath, "utf8"); - const composeYaml = parse(composeFile); - if (bspNetConfig.extrinsicRetryTimeout) { - composeYaml.services["sh-bsp"].command.push( - `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` - ); - composeYaml.services["sh-user"].command.push( - `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` - ); - } - - const updatedCompose = stringify(composeYaml); - - if (bspNetConfig.noisy) { - await compose.upOne("toxiproxy", { cwd: cwd, configAsString: updatedCompose, log: true }); - } - - await compose.upOne("sh-bsp", { cwd: cwd, configAsString: updatedCompose, log: true }); - - const bspIp = await getContainerIp( - bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.bsp.containerName - ); - - if (bspNetConfig.noisy) { - verbose && console.log(`toxiproxy IP: ${bspIp}`); - } else { - verbose && console.log(`sh-bsp IP: ${bspIp}`); - } - - const bspPeerId = await getContainerPeerId(`http://127.0.0.1:${ShConsts.NODE_INFOS.bsp.port}`); - - process.env.BSP_IP = bspIp; - process.env.BSP_PEER_ID = bspPeerId; - - await compose.upOne("sh-user", { - cwd: cwd, - configAsString: updatedCompose, - log: true, - env: { - ...process.env, - BSP_IP: bspIp, - BSP_PEER_ID: bspPeerId - } - }); - - const peerIDUser = await getContainerPeerId( - `http://127.0.0.1:${ShConsts.NODE_INFOS.user.port}` - ); - verbose && console.log(`sh-user Peer ID: ${peerIDUser}`); - - const multiAddressBsp = `/ip4/${bspIp}/tcp/30350/p2p/${bspPeerId}`; - - // Create Connection API Object to User Node - userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - // Give Balances - const amount = 10000n * 10n ** 12n; - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(bspKey.address, amount)) - ); - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(shUser.address, amount)) - ); - - // Setting: - // replication_target = 1 -> One BSP is enough to fulfil a storage request. - // block_range_to_maximum_threshold = 1 -> The threshold goes from the minimum to the maximum in 1 tick. - await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(1, 1))); - - // Adjusting runtime parameters... - // The `set_parameter` extrinsic receives an object like this: - // { - // RuntimeConfig: Enum { - // SlashAmountPerMaxFileSize: [null, {VALUE_YOU_WANT}], - // StakeToChallengePeriod: [null, {VALUE_YOU_WANT}], - // CheckpointChallengePeriod: [null, {VALUE_YOU_WANT}], - // MinChallengePeriod: [null, {VALUE_YOU_WANT}], - // } - // } - const slashAmountPerMaxFileSizeRuntimeParameter = { - RuntimeConfig: { - SlashAmountPerMaxFileSize: [null, 20n * MILLIUNIT] - } - }; - await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.parameters.setParameter(slashAmountPerMaxFileSizeRuntimeParameter) - ) - ); - const stakeToChallengePeriodRuntimeParameter = { - RuntimeConfig: { - StakeToChallengePeriod: [null, 1000n * UNIT] - } - }; - await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.parameters.setParameter(stakeToChallengePeriodRuntimeParameter) - ) - ); - const checkpointChallengePeriodRuntimeParameter = { - RuntimeConfig: { - CheckpointChallengePeriod: [null, 10] - } - }; - await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.parameters.setParameter(checkpointChallengePeriodRuntimeParameter) - ) - ); - const minChallengePeriodRuntimeParameter = { - RuntimeConfig: { - MinChallengePeriod: [null, 5] - } - }; - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.parameters.setParameter(minChallengePeriodRuntimeParameter)) - ); - - // Make BSP - await forceSignupBsp({ - api: userApi, - who: bspKey.address, - multiaddress: multiAddressBsp, - bspId: ShConsts.DUMMY_BSP_ID, - capacity: bspNetConfig.capacity || ShConsts.CAPACITY_512, - weight: bspNetConfig.bspStartingWeight - }); - - // Make MSP - await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.providers.forceMspSignUp( - alice.address, - ShConsts.DUMMY_MSP_ID, - bspNetConfig.capacity || ShConsts.CAPACITY_512, - [multiAddressBsp], - { - identifier: ShConsts.VALUE_PROP, - dataLimit: 500, - protocols: ["https", "ssh", "telnet"] - }, - alice.address - ) - ) - ); - } catch (e) { - console.error("Error ", e); - } finally { - userApi?.disconnect(); - } -}; - -export const forceSignupBsp = async (options: { - api: EnrichedBspApi; - multiaddress: string; - who: string | Uint8Array; - bspId?: string; - capacity?: bigint; - payeeAddress?: string; - weight?: bigint; -}) => { - const bspId = options.bspId || `0x${crypto.randomBytes(32).toString("hex")}`; - const blockResults = await options.api.sealBlock( - options.api.tx.sudo.sudo( - options.api.tx.providers.forceBspSignUp( - options.who, - bspId, - options.capacity || ShConsts.CAPACITY_512, - [options.multiaddress], - options.payeeAddress || options.who, - options.weight ?? null - ) - ) - ); - return Object.assign(bspId, blockResults); -}; -export const closeSimpleBspNet = async () => { - const docker = new Docker(); - - const allContainers = await docker.listContainers({ all: true }); - - const existingNodes = allContainers.filter((container) => container.Image === DOCKER_IMAGE); - - const toxiproxyContainer = allContainers.find((container) => - container.Names.some((name) => name.includes("toxiproxy")) - ); - - const promises = existingNodes.map(async (node) => docker.getContainer(node.Id).stop()); - - if (toxiproxyContainer && toxiproxyContainer.State === "running") { - console.log("Stopping toxiproxy container"); - promises.push(docker.getContainer(toxiproxyContainer.Id).stop()); - } else { - console.log("No running toxiproxy container found, skipping"); - } - - await Promise.allSettled(promises); - - await docker.pruneContainers(); - await docker.pruneVolumes(); -}; - -export const runInitialisedBspsNet = async (bspNetConfig: BspNetConfig) => { - await runSimpleBspNet(bspNetConfig); - - let userApi: EnrichedBspApi | undefined; - try { - userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ - const source = "res/whatsup.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "nothingmuch-1"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - invariant(newBucketEventDataBlob, "Event doesn't match Type"); - - const { fingerprint, file_size, location } = - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - ShConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.sealBlock( - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - location, - fingerprint, - file_size, - ShConsts.DUMMY_MSP_ID, - [ShConsts.NODE_INFOS.user.expectedPeerId] - ), - shUser - ); - - await userApi.wait.bspVolunteer(); - await userApi.wait.bspStored(); - } catch (e) { - console.error("Error ", e); - } finally { - userApi?.disconnect(); - } -}; - -export const runMultipleInitialisedBspsNet = async ( - bspNetConfig: BspNetConfig -): Promise => { - await runSimpleBspNet(bspNetConfig); - - let userApi: EnrichedBspApi | undefined; - try { - userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(5, 1))); - - // Add more BSPs to the network. - // One BSP will be down, two more will be up. - const { containerName: bspDownContainerName } = await addBsp(userApi, bspDownKey, { - name: "sh-bsp-down", - rocksdb: bspNetConfig.rocksdb, - bspKeySeed: bspDownSeed, - bspId: ShConsts.BSP_DOWN_ID, - bspStartingWeight: bspNetConfig.capacity, - additionalArgs: ["--keystore-path=/keystore/bsp-down"] - }); - const { rpcPort: bspTwoRpcPort } = await addBsp(userApi, bspTwoKey, { - name: "sh-bsp-two", - rocksdb: bspNetConfig.rocksdb, - bspKeySeed: bspTwoSeed, - bspId: ShConsts.BSP_TWO_ID, - bspStartingWeight: bspNetConfig.capacity, - additionalArgs: ["--keystore-path=/keystore/bsp-two"] - }); - const { rpcPort: bspThreeRpcPort } = await addBsp(userApi, bspThreeKey, { - name: "sh-bsp-three", - rocksdb: bspNetConfig.rocksdb, - bspKeySeed: bspThreeSeed, - bspId: ShConsts.BSP_THREE_ID, - bspStartingWeight: bspNetConfig.capacity, - additionalArgs: ["--keystore-path=/keystore/bsp-three"] - }); - - // Everything executed below is tested in `volunteer.test.ts` and `onboard.test.ts` files. - // For the context of this test, this is a preamble, so that a BSP has a challenge cycle initiated. - - /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ - const source = "res/whatsup.jpg"; - const location = "test/smile.jpg"; - const bucketName = "nothingmuch-1"; - - const fileMetadata = await userApi.file.newStorageRequest(source, location, bucketName); - - await userApi.wait.bspVolunteer(); - await userApi.wait.bspStored(); - - // Stopping BSP that is supposed to be down. - await userApi.docker.stopBspContainer(bspDownContainerName); - - return { - bspTwoRpcPort, - bspThreeRpcPort, - fileData: { - fileKey: fileMetadata.fileKey, - bucketId: fileMetadata.bucketId, - location: location, - owner: fileMetadata.owner, - fingerprint: fileMetadata.fingerprint, - fileSize: fileMetadata.fileSize - } - }; - } catch (e) { - console.error("Error ", e); - } finally { - userApi?.disconnect(); - } -}; - -export const cleardownTest = async (cleardownOptions: { - api: EnrichedBspApi | EnrichedBspApi[]; - keepNetworkAlive?: boolean; -}) => { - try { - if (Array.isArray(cleardownOptions.api)) { - for (const api of cleardownOptions.api) { - await api.disconnect(); - } - } else { - await cleardownOptions.api.disconnect(); - } - } catch (e) { - console.error(e); - console.log("cleardown failed, but we will continue."); - } - cleardownOptions.keepNetworkAlive === true ? null : await closeSimpleBspNet(); -}; - -export const createCheckBucket = async (api: EnrichedBspApi, bucketName: string) => { - const newBucketEventEvent = await api.createBucket(bucketName); - const newBucketEventDataBlob = - api.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - invariant(newBucketEventDataBlob, "Event doesn't match Type"); - - return newBucketEventDataBlob; -}; - -export const addBsp = async ( - api: ApiPromise, - bspKey: KeyringPair, - options?: { - name?: string; - rocksdb?: boolean; - bspKeySeed?: string; - bspId?: string; - bspStartingWeight?: bigint; - maxStorageCapacity?: number; - extrinsicRetryTimeout?: number; - additionalArgs?: string[]; - } -) => { - // Launch a BSP node. - const additionalArgs = options?.additionalArgs ?? []; - if (options?.extrinsicRetryTimeout) { - additionalArgs.push(`--extrinsic-retry-timeout=${options.extrinsicRetryTimeout}`); - } - if (options?.rocksdb) { - additionalArgs.push("--storage-layer=rocks-db"); - } - additionalArgs.push(`--storage-path=/tmp/bsp/${bspKey.address}`); - additionalArgs.push( - `--max-storage-capacity=${options?.maxStorageCapacity ?? MAX_STORAGE_CAPACITY}` - ); - additionalArgs.push(`--jump-capacity=${options?.maxStorageCapacity ?? CAPACITY[1024]}`); - const { containerName, rpcPort, p2pPort, peerId } = await addBspContainer({ - ...options, - additionalArgs - }); - - //Give it some balance. - const amount = 10000n * 10n ** 12n; - await sealBlock(api, api.tx.sudo.sudo(api.tx.balances.forceSetBalance(bspKey.address, amount))); - - const bspIp = await getContainerIp(containerName); - const multiAddressBsp = `/ip4/${bspIp}/tcp/${p2pPort}/p2p/${peerId}`; - - // Make BSP - await sealBlock( - api, - api.tx.sudo.sudo( - api.tx.providers.forceBspSignUp( - bspKey.address, - options?.bspId ?? bspKey.publicKey, - ShConsts.CAPACITY_512, - [multiAddressBsp], - bspKey.address, - options?.bspStartingWeight ?? null - ) - ) - ); - - return { containerName, rpcPort, p2pPort, peerId }; -}; +import type { KeyringPair } from "@polkadot/keyring/types"; +import "@storagehub/api-augment"; +import { v2 as compose } from "docker-compose"; +import Docker from "dockerode"; +import * as child_process from "node:child_process"; +import { execSync } from "node:child_process"; +import crypto from "node:crypto"; +import path from "node:path"; +import * as util from "node:util"; +import { DOCKER_IMAGE, MILLIUNIT, UNIT } from "../constants.ts"; +import { + alice, + bspDownKey, + bspDownSeed, + bspKey, + bspThreeKey, + bspThreeSeed, + bspTwoKey, + bspTwoSeed, + shUser +} from "../pjsKeyring"; +import { addBspContainer, showContainers } from "./docker"; +import type { BspNetConfig, InitialisedMultiBspNetwork } from "./types"; +import { CAPACITY, MAX_STORAGE_CAPACITY } from "./consts"; +import * as ShConsts from "./consts.ts"; +import { BspNetTestApi, type EnrichedBspApi } from "./test-api.ts"; +import invariant from "tiny-invariant"; +import * as fs from "node:fs"; +import { parse, stringify } from "yaml"; +import { sealBlock } from "./block.ts"; +import type { ApiPromise } from "@polkadot/api"; + +const exec = util.promisify(child_process.exec); + +export const getContainerIp = async (containerName: string, verbose = false): Promise => { + const maxRetries = 60; + const sleepTime = 500; + + for (let i = 0; i < maxRetries; i++) { + verbose && console.log(`Waiting for ${containerName} to launch...`); + + // TODO: Replace with dockerode command + try { + const { stdout } = await exec( + `docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${containerName}` + ); + return stdout.trim(); + } catch { + await new Promise((resolve) => setTimeout(resolve, sleepTime)); + } + } + // TODO: Replace with dockerode + execSync("docker ps -a", { stdio: "inherit" }); + try { + execSync("docker logs docker-sh-bsp-1", { stdio: "inherit" }); + execSync("docker logs docker-sh-user-1", { stdio: "inherit" }); + } catch (e) { + console.log(e); + } + console.log( + `Error fetching container IP for ${containerName} after ${ + (maxRetries * sleepTime) / 1000 + } seconds` + ); + showContainers(); + throw "Error fetching container IP"; +}; + +export const checkNodeAlive = async (url: string, verbose = false) => getContainerIp(url, verbose); +export const getContainerPeerId = async (url: string, verbose = false) => { + const maxRetries = 60; + const sleepTime = 500; + + const payload = { + id: "1", + jsonrpc: "2.0", + method: "system_localPeerId", + params: [] + }; + + for (let i = 0; i < maxRetries; i++) { + verbose && console.log(`Waiting for node at ${url} to launch...`); + + try { + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(payload) + }); + + invariant(response.ok, `HTTP error! status: ${response.status}`); + + const resp = (await response.json()) as any; + return resp.result as string; + } catch { + await new Promise((resolve) => setTimeout(resolve, sleepTime)); + } + } + + console.log(`Error fetching peerId from ${url} after ${(maxRetries * sleepTime) / 1000} seconds`); + showContainers(); + throw `Error fetching peerId from ${url}`; +}; + +export const runSimpleBspNet = async (bspNetConfig: BspNetConfig, verbose = false) => { + let userApi: EnrichedBspApi | undefined; + try { + console.log(`SH user id: ${shUser.address}`); + console.log(`SH BSP id: ${bspKey.address}`); + let file = "local-dev-bsp-compose.yml"; + if (bspNetConfig.rocksdb) { + file = "local-dev-bsp-rocksdb-compose.yml"; + } + if (bspNetConfig.noisy) { + file = "noisy-bsp-compose.yml"; + } + const composeFilePath = path.resolve(process.cwd(), "..", "docker", file); + const cwd = path.resolve(process.cwd(), "..", "docker"); + const composeFile = fs.readFileSync(composeFilePath, "utf8"); + const composeYaml = parse(composeFile); + if (bspNetConfig.extrinsicRetryTimeout) { + composeYaml.services["sh-bsp"].command.push( + `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` + ); + composeYaml.services["sh-user"].command.push( + `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` + ); + } + + const updatedCompose = stringify(composeYaml); + + if (bspNetConfig.noisy) { + await compose.upOne("toxiproxy", { cwd: cwd, configAsString: updatedCompose, log: true }); + } + + await compose.upOne("sh-bsp", { cwd: cwd, configAsString: updatedCompose, log: true }); + + const bspIp = await getContainerIp( + bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.bsp.containerName + ); + + if (bspNetConfig.noisy) { + verbose && console.log(`toxiproxy IP: ${bspIp}`); + } else { + verbose && console.log(`sh-bsp IP: ${bspIp}`); + } + + const bspPeerId = await getContainerPeerId(`http://127.0.0.1:${ShConsts.NODE_INFOS.bsp.port}`); + + process.env.BSP_IP = bspIp; + process.env.BSP_PEER_ID = bspPeerId; + + await compose.upOne("sh-user", { + cwd: cwd, + configAsString: updatedCompose, + log: true, + env: { + ...process.env, + BSP_IP: bspIp, + BSP_PEER_ID: bspPeerId + } + }); + + const peerIDUser = await getContainerPeerId( + `http://127.0.0.1:${ShConsts.NODE_INFOS.user.port}` + ); + verbose && console.log(`sh-user Peer ID: ${peerIDUser}`); + + const multiAddressBsp = `/ip4/${bspIp}/tcp/30350/p2p/${bspPeerId}`; + + // Create Connection API Object to User Node + userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + // Give Balances + const amount = 10000n * 10n ** 12n; + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(bspKey.address, amount)) + ); + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(shUser.address, amount)) + ); + + // Setting: + // replication_target = 1 -> One BSP is enough to fulfil a storage request. + // block_range_to_maximum_threshold = 1 -> The threshold goes from the minimum to the maximum in 1 tick. + await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(1, 1))); + + // Adjusting runtime parameters... + // The `set_parameter` extrinsic receives an object like this: + // { + // RuntimeConfig: Enum { + // SlashAmountPerMaxFileSize: [null, {VALUE_YOU_WANT}], + // StakeToChallengePeriod: [null, {VALUE_YOU_WANT}], + // CheckpointChallengePeriod: [null, {VALUE_YOU_WANT}], + // MinChallengePeriod: [null, {VALUE_YOU_WANT}], + // } + // } + const slashAmountPerMaxFileSizeRuntimeParameter = { + RuntimeConfig: { + SlashAmountPerMaxFileSize: [null, 20n * MILLIUNIT] + } + }; + await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.parameters.setParameter(slashAmountPerMaxFileSizeRuntimeParameter) + ) + ); + const stakeToChallengePeriodRuntimeParameter = { + RuntimeConfig: { + StakeToChallengePeriod: [null, 1000n * UNIT] + } + }; + await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.parameters.setParameter(stakeToChallengePeriodRuntimeParameter) + ) + ); + const checkpointChallengePeriodRuntimeParameter = { + RuntimeConfig: { + CheckpointChallengePeriod: [null, 10] + } + }; + await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.parameters.setParameter(checkpointChallengePeriodRuntimeParameter) + ) + ); + const minChallengePeriodRuntimeParameter = { + RuntimeConfig: { + MinChallengePeriod: [null, 5] + } + }; + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.parameters.setParameter(minChallengePeriodRuntimeParameter)) + ); + + // Make BSP + await forceSignupBsp({ + api: userApi, + who: bspKey.address, + multiaddress: multiAddressBsp, + bspId: ShConsts.DUMMY_BSP_ID, + capacity: bspNetConfig.capacity || ShConsts.CAPACITY_512, + weight: bspNetConfig.bspStartingWeight + }); + + // Make MSP + await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + alice.address, + ShConsts.DUMMY_MSP_ID, + bspNetConfig.capacity || ShConsts.CAPACITY_512, + [multiAddressBsp], + { + identifier: ShConsts.VALUE_PROP, + dataLimit: 500, + protocols: ["https", "ssh", "telnet"] + }, + alice.address + ) + ) + ); + } catch (e) { + console.error("Error ", e); + } finally { + userApi?.disconnect(); + } +}; + +export const forceSignupBsp = async (options: { + api: EnrichedBspApi; + multiaddress: string; + who: string | Uint8Array; + bspId?: string; + capacity?: bigint; + payeeAddress?: string; + weight?: bigint; +}) => { + const bspId = options.bspId || `0x${crypto.randomBytes(32).toString("hex")}`; + const blockResults = await options.api.sealBlock( + options.api.tx.sudo.sudo( + options.api.tx.providers.forceBspSignUp( + options.who, + bspId, + options.capacity || ShConsts.CAPACITY_512, + [options.multiaddress], + options.payeeAddress || options.who, + options.weight ?? null + ) + ) + ); + return Object.assign(bspId, blockResults); +}; +export const closeSimpleBspNet = async () => { + const docker = new Docker(); + + const allContainers = await docker.listContainers({ all: true }); + + const existingNodes = allContainers.filter((container) => container.Image === DOCKER_IMAGE); + + const toxiproxyContainer = allContainers.find((container) => + container.Names.some((name) => name.includes("toxiproxy")) + ); + + const promises = existingNodes.map(async (node) => docker.getContainer(node.Id).stop()); + + if (toxiproxyContainer && toxiproxyContainer.State === "running") { + console.log("Stopping toxiproxy container"); + promises.push(docker.getContainer(toxiproxyContainer.Id).stop()); + } else { + console.log("No running toxiproxy container found, skipping"); + } + + await Promise.allSettled(promises); + + await docker.pruneContainers(); + await docker.pruneVolumes(); +}; + +export const runInitialisedBspsNet = async (bspNetConfig: BspNetConfig) => { + await runSimpleBspNet(bspNetConfig); + + let userApi: EnrichedBspApi | undefined; + try { + userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ + const source = "res/whatsup.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "nothingmuch-1"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + invariant(newBucketEventDataBlob, "Event doesn't match Type"); + + const { fingerprint, file_size, location } = + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + ShConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.sealBlock( + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + location, + fingerprint, + file_size, + ShConsts.DUMMY_MSP_ID, + [ShConsts.NODE_INFOS.user.expectedPeerId] + ), + shUser + ); + + await userApi.wait.bspVolunteer(); + await userApi.wait.bspStored(); + } catch (e) { + console.error("Error ", e); + } finally { + userApi?.disconnect(); + } +}; + +export const runMultipleInitialisedBspsNet = async ( + bspNetConfig: BspNetConfig +): Promise => { + await runSimpleBspNet(bspNetConfig); + + let userApi: EnrichedBspApi | undefined; + try { + userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(5, 1))); + + // Add more BSPs to the network. + // One BSP will be down, two more will be up. + const { containerName: bspDownContainerName } = await addBsp(userApi, bspDownKey, { + name: "sh-bsp-down", + rocksdb: bspNetConfig.rocksdb, + bspKeySeed: bspDownSeed, + bspId: ShConsts.BSP_DOWN_ID, + bspStartingWeight: bspNetConfig.capacity, + additionalArgs: ["--keystore-path=/keystore/bsp-down"] + }); + const { rpcPort: bspTwoRpcPort } = await addBsp(userApi, bspTwoKey, { + name: "sh-bsp-two", + rocksdb: bspNetConfig.rocksdb, + bspKeySeed: bspTwoSeed, + bspId: ShConsts.BSP_TWO_ID, + bspStartingWeight: bspNetConfig.capacity, + additionalArgs: ["--keystore-path=/keystore/bsp-two"] + }); + const { rpcPort: bspThreeRpcPort } = await addBsp(userApi, bspThreeKey, { + name: "sh-bsp-three", + rocksdb: bspNetConfig.rocksdb, + bspKeySeed: bspThreeSeed, + bspId: ShConsts.BSP_THREE_ID, + bspStartingWeight: bspNetConfig.capacity, + additionalArgs: ["--keystore-path=/keystore/bsp-three"] + }); + + // Everything executed below is tested in `volunteer.test.ts` and `onboard.test.ts` files. + // For the context of this test, this is a preamble, so that a BSP has a challenge cycle initiated. + + /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ + const source = "res/whatsup.jpg"; + const location = "test/smile.jpg"; + const bucketName = "nothingmuch-1"; + + const fileMetadata = await userApi.file.newStorageRequest(source, location, bucketName); + + await userApi.wait.bspVolunteer(); + await userApi.wait.bspStored(); + + // Stopping BSP that is supposed to be down. + await userApi.docker.stopBspContainer(bspDownContainerName); + + return { + bspTwoRpcPort, + bspThreeRpcPort, + fileData: { + fileKey: fileMetadata.fileKey, + bucketId: fileMetadata.bucketId, + location: location, + owner: fileMetadata.owner, + fingerprint: fileMetadata.fingerprint, + fileSize: fileMetadata.fileSize + } + }; + } catch (e) { + console.error("Error ", e); + } finally { + userApi?.disconnect(); + } +}; + +export const cleardownTest = async (cleardownOptions: { + api: EnrichedBspApi | EnrichedBspApi[]; + keepNetworkAlive?: boolean; +}) => { + try { + if (Array.isArray(cleardownOptions.api)) { + for (const api of cleardownOptions.api) { + await api.disconnect(); + } + } else { + await cleardownOptions.api.disconnect(); + } + } catch (e) { + console.error(e); + console.log("cleardown failed, but we will continue."); + } + cleardownOptions.keepNetworkAlive === true ? null : await closeSimpleBspNet(); +}; + +export const createCheckBucket = async (api: EnrichedBspApi, bucketName: string) => { + const newBucketEventEvent = await api.createBucket(bucketName); + const newBucketEventDataBlob = + api.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + invariant(newBucketEventDataBlob, "Event doesn't match Type"); + + return newBucketEventDataBlob; +}; + +export const addBsp = async ( + api: ApiPromise, + bspKey: KeyringPair, + options?: { + name?: string; + rocksdb?: boolean; + bspKeySeed?: string; + bspId?: string; + bspStartingWeight?: bigint; + maxStorageCapacity?: number; + extrinsicRetryTimeout?: number; + additionalArgs?: string[]; + } +) => { + // Launch a BSP node. + const additionalArgs = options?.additionalArgs ?? []; + if (options?.extrinsicRetryTimeout) { + additionalArgs.push(`--extrinsic-retry-timeout=${options.extrinsicRetryTimeout}`); + } + if (options?.rocksdb) { + additionalArgs.push("--storage-layer=rocks-db"); + } + additionalArgs.push(`--storage-path=/tmp/bsp/${bspKey.address}`); + additionalArgs.push( + `--max-storage-capacity=${options?.maxStorageCapacity ?? MAX_STORAGE_CAPACITY}` + ); + additionalArgs.push(`--jump-capacity=${options?.maxStorageCapacity ?? CAPACITY[1024]}`); + const { containerName, rpcPort, p2pPort, peerId } = await addBspContainer({ + ...options, + additionalArgs + }); + + //Give it some balance. + const amount = 10000n * 10n ** 12n; + await sealBlock(api, api.tx.sudo.sudo(api.tx.balances.forceSetBalance(bspKey.address, amount))); + + const bspIp = await getContainerIp(containerName); + const multiAddressBsp = `/ip4/${bspIp}/tcp/${p2pPort}/p2p/${peerId}`; + + // Make BSP + await sealBlock( + api, + api.tx.sudo.sudo( + api.tx.providers.forceBspSignUp( + bspKey.address, + options?.bspId ?? bspKey.publicKey, + ShConsts.CAPACITY_512, + [multiAddressBsp], + bspKey.address, + options?.bspStartingWeight ?? null + ) + ) + ); + + return { containerName, rpcPort, p2pPort, peerId }; +}; diff --git a/test/util/bspNet/test-api.ts b/test/util/bspNet/test-api.ts index e79da9663..9f949ff40 100644 --- a/test/util/bspNet/test-api.ts +++ b/test/util/bspNet/test-api.ts @@ -1,458 +1,458 @@ -import "@storagehub/api-augment"; -import { ApiPromise, WsProvider } from "@polkadot/api"; -import type { SubmittableExtrinsic } from "@polkadot/api/types"; -import type { KeyringPair } from "@polkadot/keyring/types"; -import type { EventRecord } from "@polkadot/types/interfaces"; -import type { ISubmittableResult } from "@polkadot/types/types"; -import type { HexString } from "@polkadot/util/types"; -import { types as BundledTypes } from "@storagehub/types-bundle"; -import type { AssertExtrinsicOptions } from "../asserts"; -import * as Assertions from "../asserts"; -import * as BspNetBlock from "./block"; -import { sealBlock } from "./block"; -import * as ShConsts from "./consts"; -import * as DockerBspNet from "./docker"; -import * as Files from "./fileHelpers"; -import * as NodeBspNet from "./node"; -import type { BspNetApi, SealBlockOptions } from "./types"; -import * as Waits from "./waits"; -import { addBsp } from "./helpers"; - -/** - * Represents an enhanced API for interacting with StorageHub BSPNet. - */ -export class BspNetTestApi implements AsyncDisposable { - private _api: ApiPromise; - private _endpoint: `ws://${string}` | `wss://${string}`; - - private constructor(api: ApiPromise, endpoint: `ws://${string}` | `wss://${string}`) { - this._api = api; - this._endpoint = endpoint; - } - - /** - * Creates a new instance of BspNetTestApi. - * - * @param endpoint - The WebSocket endpoint to connect to. - * @returns A promise that resolves to an enriched BspNetApi. - */ - public static async create(endpoint: `ws://${string}` | `wss://${string}`) { - const api = await BspNetTestApi.connect(endpoint); - await api.isReady; - - const ctx = new BspNetTestApi(api, endpoint); - - return ctx.enrichApi(); - } - - public async reconnect(): Promise { - if (!this._api.isConnected) { - await this._api.disconnect(); - const newApi = await ApiPromise.create({ - provider: new WsProvider(this._endpoint), - noInitWarn: true, - throwOnConnect: false, - throwOnUnknown: false, - typesBundle: BundledTypes - }); - await newApi.isReady; - this._api = newApi; - this.enrichApi(); - } - } - - /** - * Establishes a connection to the specified endpoint. - * Note: This method shouldn't be called directly in tests. Use `create` instead. - * - * @param endpoint - The WebSocket endpoint to connect to. - * @returns A promise that resolves to an ApiPromise with async disposal. - */ - public static async connect(endpoint: `ws://${string}` | `wss://${string}`) { - const api = await ApiPromise.create({ - provider: new WsProvider(endpoint), - noInitWarn: true, - throwOnConnect: false, - throwOnUnknown: false, - typesBundle: BundledTypes - }); - return Object.assign(api, { - [Symbol.asyncDispose]: async () => { - await api.disconnect(); - } - }); - } - - private async disconnect() { - await this._api.disconnect(); - } - - /** - * Seals a block with optional extrinsics and finalizes it. - * - * @param calls - Optional extrinsic(s) to include in the block. - * @param signer - Optional signer for the extrinsics. - * @param finaliseBlock - Whether to finalize the block. Defaults to true. - * @returns A Promise resolving to a SealedBlock object. - */ - private async sealBlock( - calls?: - | SubmittableExtrinsic<"promise", ISubmittableResult> - | SubmittableExtrinsic<"promise", ISubmittableResult>[], - signer?: KeyringPair, - finaliseBlock = true - ) { - return sealBlock(this._api, calls, signer, finaliseBlock); - } - - private async sendNewStorageRequest(source: string, location: string, bucketName: string) { - return Files.sendNewStorageRequest(this._api, source, location, bucketName); - } - - private async createBucket(bucketName: string) { - return Files.createBucket(this._api, bucketName); - } - - private assertEvent(module: string, method: string, events?: EventRecord[]) { - return Assertions.assertEventPresent(this._api, module, method, events); - } - - /** - * Advances the blockchain to a specified block number. - * - * This function seals blocks until the specified block number is reached. It can optionally - * wait between blocks and watch for BSP proofs. - * - * @param api - The ApiPromise instance to interact with the blockchain. - * @param blockNumber - The target block number to advance to. - * @param waitBetweenBlocks - Optional. If specified: - * - If a number, waits for that many milliseconds between blocks. - * - If true, waits for 500ms between blocks. - * - If false or undefined, doesn't wait between blocks. - * @param watchForBspProofs - Optional. An array of BSP IDs to watch for proofs. - * If specified, the function will wait for BSP proofs at appropriate intervals. - * - * @returns A Promise that resolves to a SealedBlock object representing the last sealed block. - * - * @throws Will throw an error if the target block number is lower than the current block number. - * - * @example - * // Advance to block 100 with no waiting - * const result = await advanceToBlock(api, 100); - * - * @example - * // Advance to block 200, waiting 1000ms between blocks - * const result = await advanceToBlock(api, 200, 1000); - * - * @example - * // Advance to block 300, watching for proofs from two BSPs - * const result = await advanceToBlock(api, 300, true, ['bsp1', 'bsp2']); - */ - private advanceToBlock( - blockNumber: number, - options?: { - waitBetweenBlocks?: number | boolean; - waitForBspProofs?: string[]; - } - ) { - return BspNetBlock.advanceToBlock( - this._api, - blockNumber, - options?.waitBetweenBlocks, - options?.waitForBspProofs - ); - } - - private enrichApi() { - const remappedAssertNs = { - fetchEventData: Assertions.fetchEventData, - - /** - * Asserts that a specific event is present in the given events or the latest block. - * @param module - The module name of the event. - * @param method - The method name of the event. - * @param events - Optional. The events to search through. If not provided, it will fetch the latest block's events. - * @returns The matching event and its data. - */ - eventPresent: async (module: string, method: string, events?: EventRecord[]) => { - const evts = events ?? ((await this._api.query.system.events()) as EventRecord[]); - return Assertions.assertEventPresent(this._api, module, method, evts); - }, - /** - * Asserts that multiple instances of a specific event are present. - * @param module - The module name of the event. - * @param method - The method name of the event. - * @param events - Optional. The events to search through. If not provided, it will fetch the latest block's events. - * @returns An array of matching events and their data. - */ - eventMany: async (module: string, method: string, events?: EventRecord[]) => { - const evts = events ?? ((await this._api.query.system.events()) as EventRecord[]); - return Assertions.assertEventMany(this._api, module, method, evts); - }, - /** - * Asserts that a specific extrinsic is present in the transaction pool or recent blocks. - * @param options - Options specifying the extrinsic to search for. - * @returns An array of matching extrinsics. - */ - extrinsicPresent: (options: AssertExtrinsicOptions) => - Assertions.assertExtrinsicPresent(this._api, options), - /** - * Asserts that a specific provider has been slashed. - * @param providerId - The ID of the provider to check. - * @returns A boolean indicating whether the provider was slashed. - */ - providerSlashed: (providerId: string) => - Assertions.checkProviderWasSlashed(this._api, providerId), - - /** - * Asserts that a specific log message appears in a Docker container's output. - * @param options - The options for the log assertion. - * @param options.searchString - The string to search for in the container's logs. - * @param options.containerName - The name of the Docker container to search logs in. - * @param options.timeout - Optional. The maximum time (in milliseconds) to wait for the log message to appear. Default 10s. - * @returns A promise that resolves to the matching log message if found, or rejects if the timeout is reached. - */ - log: async (options: { searchString: string; containerName: string; timeout?: number }) => { - return Assertions.assertDockerLog( - options.containerName, - options.searchString, - options.timeout - ); - } - }; - - /** - * Waits namespace - * Contains methods for waiting on specific events or conditions in the BSP network. - */ - const remappedWaitsNs = { - /** - * Waits for a BSP to volunteer for a storage request. - * @param expectedExts - Optional param to specify the number of expected extrinsics. - * @returns A promise that resolves when a BSP has volunteered. - */ - bspVolunteer: (expectedExts?: number) => Waits.waitForBspVolunteer(this._api, expectedExts), - - /** - * Waits for a BSP to confirm storing a file. - * @param expectedExts - Optional param to specify the number of expected extrinsics. - * @returns A promise that resolves when a BSP has confirmed storing a file. - */ - bspStored: (expectedExts?: number) => Waits.waitForBspStored(this._api, expectedExts) - }; - - /** - * File operations namespace - * Contains methods for interacting with StorageHub file system. - */ - const remappedFileNs = { - /** - * Creates a new bucket. - * - * @param bucketName - The name of the bucket to be created. - * @param mspId - Optional MSP ID to use for the new storage request. Defaults to DUMMY_MSP_ID. - * @param owner - Optional signer with which to issue the newStorageRequest Defaults to SH_USER. - * @returns A promise that resolves to a new bucket event. - */ - newBucket: (bucketName: string, owner?: KeyringPair) => - Files.createBucket(this._api, bucketName, undefined, owner), - - /** - * Creates a new bucket and submits a new storage request. - * - * @param source - The local path to the file to be uploaded. - * @param location - The StorageHub "location" field of the file to be uploaded. - * @param bucketName - The name of the bucket to be created. - * @param mspId - Optional MSP ID to use for the new storage request. Defaults to DUMMY_MSP_ID. - * @param owner - Optional signer with which to issue the newStorageRequest Defaults to SH_USER. - * @returns A promise that resolves to file metadata. - */ - newStorageRequest: ( - source: string, - location: string, - bucketName: string, - msp_id?: HexString, - owner?: KeyringPair - ) => Files.sendNewStorageRequest(this._api, source, location, bucketName, msp_id, owner) - }; - - /** - * Block operations namespace - * Contains methods for manipulating and interacting with blocks in the BSP network. - */ - const remappedBlockNs = { - /** - * Seals a block with optional extrinsics. - * @param options - Options for sealing the block, including calls, signer, and whether to finalize. - * @returns A promise that resolves to a SealedBlock object. - */ - seal: (options?: SealBlockOptions) => - BspNetBlock.sealBlock(this._api, options?.calls, options?.signer, options?.finaliseBlock), - /** - * Seal blocks until the next challenge period block. - * It will verify that the SlashableProvider event is emitted and check if the provider is slashable with an additional failed challenge deadline. - * @param nextChallengeTick - The block number of the next challenge. - * @param provider - The provider to check for slashing. - * @returns A promise that resolves when the challenge period block is reached. - */ - skipToChallengePeriod: (nextChallengeTick: number, provider: string) => - BspNetBlock.runToNextChallengePeriodBlock(this._api, nextChallengeTick, provider), - /** - * Skips a specified number of blocks. - * Note: This skips too quickly for nodes to BSPs to react. Use skipTo where reaction extrinsics are required. - * @param blocksToAdvance - The number of blocks to skip. - * @returns A promise that resolves when the specified number of blocks have been skipped. - */ - skip: (blocksToAdvance: number) => BspNetBlock.skipBlocks(this._api, blocksToAdvance), - /** - * Advances the chain to a specific block number. - * @param blockNumber - The target block number to advance to. - * @param options - Optional parameters for waiting between blocks and watching for BSP proofs. - * @returns A promise that resolves when the specified block number is reached. - */ - skipTo: ( - blockNumber: number, - options?: { - waitBetweenBlocks?: number | boolean; - waitForBspProofs?: string[]; - spam?: boolean; - verbose?: boolean; - } - ) => - BspNetBlock.advanceToBlock( - this._api, - blockNumber, - options?.waitBetweenBlocks, - options?.waitForBspProofs, - options?.spam, - options?.verbose - ), - /** - * Skips blocks until the minimum time for capacity changes is reached. - * @returns A promise that resolves when the minimum change time is reached. - */ - skipToMinChangeTime: () => BspNetBlock.skipBlocksToMinChangeTime(this._api), - /** - * Causes a chain re-org by creating a finalized block on top of the parent block. - * Note: This requires the head block to be unfinalized, otherwise it will throw! - * @returns A promise that resolves when the chain re-org is complete. - */ - reOrg: () => BspNetBlock.reOrgBlocks(this._api) - }; - - const remappedNodeNs = { - /** - * Drops transaction(s) from the node's transaction pool. - * - * @param extrinsic - Optional. Specifies which transaction(s) to drop: - * - If omitted, all transactions in the pool will be cleared. - * - If an object with module and method, it will drop matching transactions. - * - If a hex string, it will drop the transaction with the matching hash. - * @param sealAfter - Whether to seal a block after dropping the transaction(s). Defaults to false. - */ - dropTxn: (extrinsic?: { module: string; method: string } | HexString, sealAfter = false) => - NodeBspNet.dropTransaction(this._api, extrinsic, sealAfter) - }; - - const remappedDockerNs = { - ...DockerBspNet, - onboardBsp: (options: { - bspSigner: KeyringPair; - name?: string; - rocksdb?: boolean; - bspKeySeed?: string; - bspId?: string; - bspStartingWeight?: bigint; - maxStorageCapacity?: number; - additionalArgs?: string[]; - }) => addBsp(this._api, options.bspSigner, options) - }; - - return Object.assign(this._api, { - /** - * Soon Deprecated. Use api.block.seal() instead. - * @see {@link sealBlock} - */ - sealBlock: this.sealBlock.bind(this), - /** - * Soon Deprecated. Use api.file.newStorageRequest() instead. - * @see {@link sendNewStorageRequest} - */ - sendNewStorageRequest: this.sendNewStorageRequest.bind(this), - /** - * Soon Deprecated. Use api.file.newBucket() instead. - * @see {@link createBucket} - */ - createBucket: this.createBucket.bind(this), - /** - * Soon Deprecated. Use api.assert.eventPresent() instead. - * @see {@link assertEvent} - */ - assertEvent: this.assertEvent.bind(this), - /** - * Soon Deprecated. Use api.assert.eventPresent() instead. - * @see {@link advanceToBlock} - */ - advanceToBlock: this.advanceToBlock.bind(this), - /** - * Assertions namespace - * Provides methods for asserting various conditions in the BSP network tests. - */ - assert: remappedAssertNs, - /** - * Waits namespace - * Contains methods for waiting on specific events or conditions in the BSP network. - */ - wait: remappedWaitsNs, - /** - * File operations namespace - * Offers methods for file-related operations in the BSP network, such as creating buckets and storage requests. - */ - file: remappedFileNs, - /** - * Node operations namespace - * Provides methods for interacting with and manipulating nodes in the BSP network. - */ - node: remappedNodeNs, - /** - * Block operations namespace - * Contains methods for manipulating and interacting with blocks in the BSP network. - */ - block: remappedBlockNs, - /** - * StorageHub Constants namespace - * Contains static data useful for testing the BSP network. - */ - shConsts: ShConsts, - /** - * Docker operations namespace - * Offers methods for interacting with Docker containers in the BSP network test environment. - */ - docker: remappedDockerNs, - [Symbol.asyncDispose]: this.disconnect.bind(this) - }) satisfies BspNetApi; - } - - async [Symbol.asyncDispose]() { - await this._api.disconnect(); - } -} - -/** - * Represents an enhanced API for interacting with StorageHub BSPNet. - * This type extends the standard Polkadot API with additional methods and namespaces - * specifically designed for testing and interacting with a StorageHub BSP network. - * - * It includes: - * - Extended assertion capabilities (@see {@link Assertions}) - * - Waiting utilities for BSP-specific events (@see {@link Waits}) - * - File and bucket operations (@see {@link Files}) - * - Block manipulation and advancement utilities (@see {@link BspNetBlock}) - * - Node interaction methods (@see {@link NodeBspNet}) - * - Docker container management for BSP testing (@see {@link DockerBspNet}) - * - StorageHub constants (@see {@link ShConsts}) - * - * This API is created using the BspNetTestApi.create() static method and provides - * a comprehensive toolkit for testing and developing BSP network functionality. - */ -export type EnrichedBspApi = Awaited>; +import "@storagehub/api-augment"; +import { ApiPromise, WsProvider } from "@polkadot/api"; +import type { SubmittableExtrinsic } from "@polkadot/api/types"; +import type { KeyringPair } from "@polkadot/keyring/types"; +import type { EventRecord } from "@polkadot/types/interfaces"; +import type { ISubmittableResult } from "@polkadot/types/types"; +import type { HexString } from "@polkadot/util/types"; +import { types as BundledTypes } from "@storagehub/types-bundle"; +import type { AssertExtrinsicOptions } from "../asserts"; +import * as Assertions from "../asserts"; +import * as BspNetBlock from "./block"; +import { sealBlock } from "./block"; +import * as ShConsts from "./consts"; +import * as DockerBspNet from "./docker"; +import * as Files from "./fileHelpers"; +import * as NodeBspNet from "./node"; +import type { BspNetApi, SealBlockOptions } from "./types"; +import * as Waits from "./waits"; +import { addBsp } from "./helpers"; + +/** + * Represents an enhanced API for interacting with StorageHub BSPNet. + */ +export class BspNetTestApi implements AsyncDisposable { + private _api: ApiPromise; + private _endpoint: `ws://${string}` | `wss://${string}`; + + private constructor(api: ApiPromise, endpoint: `ws://${string}` | `wss://${string}`) { + this._api = api; + this._endpoint = endpoint; + } + + /** + * Creates a new instance of BspNetTestApi. + * + * @param endpoint - The WebSocket endpoint to connect to. + * @returns A promise that resolves to an enriched BspNetApi. + */ + public static async create(endpoint: `ws://${string}` | `wss://${string}`) { + const api = await BspNetTestApi.connect(endpoint); + await api.isReady; + + const ctx = new BspNetTestApi(api, endpoint); + + return ctx.enrichApi(); + } + + public async reconnect(): Promise { + if (!this._api.isConnected) { + await this._api.disconnect(); + const newApi = await ApiPromise.create({ + provider: new WsProvider(this._endpoint), + noInitWarn: true, + throwOnConnect: false, + throwOnUnknown: false, + typesBundle: BundledTypes + }); + await newApi.isReady; + this._api = newApi; + this.enrichApi(); + } + } + + /** + * Establishes a connection to the specified endpoint. + * Note: This method shouldn't be called directly in tests. Use `create` instead. + * + * @param endpoint - The WebSocket endpoint to connect to. + * @returns A promise that resolves to an ApiPromise with async disposal. + */ + public static async connect(endpoint: `ws://${string}` | `wss://${string}`) { + const api = await ApiPromise.create({ + provider: new WsProvider(endpoint), + noInitWarn: true, + throwOnConnect: false, + throwOnUnknown: false, + typesBundle: BundledTypes + }); + return Object.assign(api, { + [Symbol.asyncDispose]: async () => { + await api.disconnect(); + } + }); + } + + private async disconnect() { + await this._api.disconnect(); + } + + /** + * Seals a block with optional extrinsics and finalizes it. + * + * @param calls - Optional extrinsic(s) to include in the block. + * @param signer - Optional signer for the extrinsics. + * @param finaliseBlock - Whether to finalize the block. Defaults to true. + * @returns A Promise resolving to a SealedBlock object. + */ + private async sealBlock( + calls?: + | SubmittableExtrinsic<"promise", ISubmittableResult> + | SubmittableExtrinsic<"promise", ISubmittableResult>[], + signer?: KeyringPair, + finaliseBlock = true + ) { + return sealBlock(this._api, calls, signer, finaliseBlock); + } + + private async sendNewStorageRequest(source: string, location: string, bucketName: string) { + return Files.sendNewStorageRequest(this._api, source, location, bucketName); + } + + private async createBucket(bucketName: string) { + return Files.createBucket(this._api, bucketName); + } + + private assertEvent(module: string, method: string, events?: EventRecord[]) { + return Assertions.assertEventPresent(this._api, module, method, events); + } + + /** + * Advances the blockchain to a specified block number. + * + * This function seals blocks until the specified block number is reached. It can optionally + * wait between blocks and watch for BSP proofs. + * + * @param api - The ApiPromise instance to interact with the blockchain. + * @param blockNumber - The target block number to advance to. + * @param waitBetweenBlocks - Optional. If specified: + * - If a number, waits for that many milliseconds between blocks. + * - If true, waits for 500ms between blocks. + * - If false or undefined, doesn't wait between blocks. + * @param watchForBspProofs - Optional. An array of BSP IDs to watch for proofs. + * If specified, the function will wait for BSP proofs at appropriate intervals. + * + * @returns A Promise that resolves to a SealedBlock object representing the last sealed block. + * + * @throws Will throw an error if the target block number is lower than the current block number. + * + * @example + * // Advance to block 100 with no waiting + * const result = await advanceToBlock(api, 100); + * + * @example + * // Advance to block 200, waiting 1000ms between blocks + * const result = await advanceToBlock(api, 200, 1000); + * + * @example + * // Advance to block 300, watching for proofs from two BSPs + * const result = await advanceToBlock(api, 300, true, ['bsp1', 'bsp2']); + */ + private advanceToBlock( + blockNumber: number, + options?: { + waitBetweenBlocks?: number | boolean; + waitForBspProofs?: string[]; + } + ) { + return BspNetBlock.advanceToBlock( + this._api, + blockNumber, + options?.waitBetweenBlocks, + options?.waitForBspProofs + ); + } + + private enrichApi() { + const remappedAssertNs = { + fetchEventData: Assertions.fetchEventData, + + /** + * Asserts that a specific event is present in the given events or the latest block. + * @param module - The module name of the event. + * @param method - The method name of the event. + * @param events - Optional. The events to search through. If not provided, it will fetch the latest block's events. + * @returns The matching event and its data. + */ + eventPresent: async (module: string, method: string, events?: EventRecord[]) => { + const evts = events ?? ((await this._api.query.system.events()) as EventRecord[]); + return Assertions.assertEventPresent(this._api, module, method, evts); + }, + /** + * Asserts that multiple instances of a specific event are present. + * @param module - The module name of the event. + * @param method - The method name of the event. + * @param events - Optional. The events to search through. If not provided, it will fetch the latest block's events. + * @returns An array of matching events and their data. + */ + eventMany: async (module: string, method: string, events?: EventRecord[]) => { + const evts = events ?? ((await this._api.query.system.events()) as EventRecord[]); + return Assertions.assertEventMany(this._api, module, method, evts); + }, + /** + * Asserts that a specific extrinsic is present in the transaction pool or recent blocks. + * @param options - Options specifying the extrinsic to search for. + * @returns An array of matching extrinsics. + */ + extrinsicPresent: (options: AssertExtrinsicOptions) => + Assertions.assertExtrinsicPresent(this._api, options), + /** + * Asserts that a specific provider has been slashed. + * @param providerId - The ID of the provider to check. + * @returns A boolean indicating whether the provider was slashed. + */ + providerSlashed: (providerId: string) => + Assertions.checkProviderWasSlashed(this._api, providerId), + + /** + * Asserts that a specific log message appears in a Docker container's output. + * @param options - The options for the log assertion. + * @param options.searchString - The string to search for in the container's logs. + * @param options.containerName - The name of the Docker container to search logs in. + * @param options.timeout - Optional. The maximum time (in milliseconds) to wait for the log message to appear. Default 10s. + * @returns A promise that resolves to the matching log message if found, or rejects if the timeout is reached. + */ + log: async (options: { searchString: string; containerName: string; timeout?: number }) => { + return Assertions.assertDockerLog( + options.containerName, + options.searchString, + options.timeout + ); + } + }; + + /** + * Waits namespace + * Contains methods for waiting on specific events or conditions in the BSP network. + */ + const remappedWaitsNs = { + /** + * Waits for a BSP to volunteer for a storage request. + * @param expectedExts - Optional param to specify the number of expected extrinsics. + * @returns A promise that resolves when a BSP has volunteered. + */ + bspVolunteer: (expectedExts?: number) => Waits.waitForBspVolunteer(this._api, expectedExts), + + /** + * Waits for a BSP to confirm storing a file. + * @param expectedExts - Optional param to specify the number of expected extrinsics. + * @returns A promise that resolves when a BSP has confirmed storing a file. + */ + bspStored: (expectedExts?: number) => Waits.waitForBspStored(this._api, expectedExts) + }; + + /** + * File operations namespace + * Contains methods for interacting with StorageHub file system. + */ + const remappedFileNs = { + /** + * Creates a new bucket. + * + * @param bucketName - The name of the bucket to be created. + * @param mspId - Optional MSP ID to use for the new storage request. Defaults to DUMMY_MSP_ID. + * @param owner - Optional signer with which to issue the newStorageRequest Defaults to SH_USER. + * @returns A promise that resolves to a new bucket event. + */ + newBucket: (bucketName: string, owner?: KeyringPair) => + Files.createBucket(this._api, bucketName, undefined, owner), + + /** + * Creates a new bucket and submits a new storage request. + * + * @param source - The local path to the file to be uploaded. + * @param location - The StorageHub "location" field of the file to be uploaded. + * @param bucketName - The name of the bucket to be created. + * @param mspId - Optional MSP ID to use for the new storage request. Defaults to DUMMY_MSP_ID. + * @param owner - Optional signer with which to issue the newStorageRequest Defaults to SH_USER. + * @returns A promise that resolves to file metadata. + */ + newStorageRequest: ( + source: string, + location: string, + bucketName: string, + msp_id?: HexString, + owner?: KeyringPair + ) => Files.sendNewStorageRequest(this._api, source, location, bucketName, msp_id, owner) + }; + + /** + * Block operations namespace + * Contains methods for manipulating and interacting with blocks in the BSP network. + */ + const remappedBlockNs = { + /** + * Seals a block with optional extrinsics. + * @param options - Options for sealing the block, including calls, signer, and whether to finalize. + * @returns A promise that resolves to a SealedBlock object. + */ + seal: (options?: SealBlockOptions) => + BspNetBlock.sealBlock(this._api, options?.calls, options?.signer, options?.finaliseBlock), + /** + * Seal blocks until the next challenge period block. + * It will verify that the SlashableProvider event is emitted and check if the provider is slashable with an additional failed challenge deadline. + * @param nextChallengeTick - The block number of the next challenge. + * @param provider - The provider to check for slashing. + * @returns A promise that resolves when the challenge period block is reached. + */ + skipToChallengePeriod: (nextChallengeTick: number, provider: string) => + BspNetBlock.runToNextChallengePeriodBlock(this._api, nextChallengeTick, provider), + /** + * Skips a specified number of blocks. + * Note: This skips too quickly for nodes to BSPs to react. Use skipTo where reaction extrinsics are required. + * @param blocksToAdvance - The number of blocks to skip. + * @returns A promise that resolves when the specified number of blocks have been skipped. + */ + skip: (blocksToAdvance: number) => BspNetBlock.skipBlocks(this._api, blocksToAdvance), + /** + * Advances the chain to a specific block number. + * @param blockNumber - The target block number to advance to. + * @param options - Optional parameters for waiting between blocks and watching for BSP proofs. + * @returns A promise that resolves when the specified block number is reached. + */ + skipTo: ( + blockNumber: number, + options?: { + waitBetweenBlocks?: number | boolean; + waitForBspProofs?: string[]; + spam?: boolean; + verbose?: boolean; + } + ) => + BspNetBlock.advanceToBlock( + this._api, + blockNumber, + options?.waitBetweenBlocks, + options?.waitForBspProofs, + options?.spam, + options?.verbose + ), + /** + * Skips blocks until the minimum time for capacity changes is reached. + * @returns A promise that resolves when the minimum change time is reached. + */ + skipToMinChangeTime: () => BspNetBlock.skipBlocksToMinChangeTime(this._api), + /** + * Causes a chain re-org by creating a finalized block on top of the parent block. + * Note: This requires the head block to be unfinalized, otherwise it will throw! + * @returns A promise that resolves when the chain re-org is complete. + */ + reOrg: () => BspNetBlock.reOrgBlocks(this._api) + }; + + const remappedNodeNs = { + /** + * Drops transaction(s) from the node's transaction pool. + * + * @param extrinsic - Optional. Specifies which transaction(s) to drop: + * - If omitted, all transactions in the pool will be cleared. + * - If an object with module and method, it will drop matching transactions. + * - If a hex string, it will drop the transaction with the matching hash. + * @param sealAfter - Whether to seal a block after dropping the transaction(s). Defaults to false. + */ + dropTxn: (extrinsic?: { module: string; method: string } | HexString, sealAfter = false) => + NodeBspNet.dropTransaction(this._api, extrinsic, sealAfter) + }; + + const remappedDockerNs = { + ...DockerBspNet, + onboardBsp: (options: { + bspSigner: KeyringPair; + name?: string; + rocksdb?: boolean; + bspKeySeed?: string; + bspId?: string; + bspStartingWeight?: bigint; + maxStorageCapacity?: number; + additionalArgs?: string[]; + }) => addBsp(this._api, options.bspSigner, options) + }; + + return Object.assign(this._api, { + /** + * Soon Deprecated. Use api.block.seal() instead. + * @see {@link sealBlock} + */ + sealBlock: this.sealBlock.bind(this), + /** + * Soon Deprecated. Use api.file.newStorageRequest() instead. + * @see {@link sendNewStorageRequest} + */ + sendNewStorageRequest: this.sendNewStorageRequest.bind(this), + /** + * Soon Deprecated. Use api.file.newBucket() instead. + * @see {@link createBucket} + */ + createBucket: this.createBucket.bind(this), + /** + * Soon Deprecated. Use api.assert.eventPresent() instead. + * @see {@link assertEvent} + */ + assertEvent: this.assertEvent.bind(this), + /** + * Soon Deprecated. Use api.assert.eventPresent() instead. + * @see {@link advanceToBlock} + */ + advanceToBlock: this.advanceToBlock.bind(this), + /** + * Assertions namespace + * Provides methods for asserting various conditions in the BSP network tests. + */ + assert: remappedAssertNs, + /** + * Waits namespace + * Contains methods for waiting on specific events or conditions in the BSP network. + */ + wait: remappedWaitsNs, + /** + * File operations namespace + * Offers methods for file-related operations in the BSP network, such as creating buckets and storage requests. + */ + file: remappedFileNs, + /** + * Node operations namespace + * Provides methods for interacting with and manipulating nodes in the BSP network. + */ + node: remappedNodeNs, + /** + * Block operations namespace + * Contains methods for manipulating and interacting with blocks in the BSP network. + */ + block: remappedBlockNs, + /** + * StorageHub Constants namespace + * Contains static data useful for testing the BSP network. + */ + shConsts: ShConsts, + /** + * Docker operations namespace + * Offers methods for interacting with Docker containers in the BSP network test environment. + */ + docker: remappedDockerNs, + [Symbol.asyncDispose]: this.disconnect.bind(this) + }) satisfies BspNetApi; + } + + async [Symbol.asyncDispose]() { + await this._api.disconnect(); + } +} + +/** + * Represents an enhanced API for interacting with StorageHub BSPNet. + * This type extends the standard Polkadot API with additional methods and namespaces + * specifically designed for testing and interacting with a StorageHub BSP network. + * + * It includes: + * - Extended assertion capabilities (@see {@link Assertions}) + * - Waiting utilities for BSP-specific events (@see {@link Waits}) + * - File and bucket operations (@see {@link Files}) + * - Block manipulation and advancement utilities (@see {@link BspNetBlock}) + * - Node interaction methods (@see {@link NodeBspNet}) + * - Docker container management for BSP testing (@see {@link DockerBspNet}) + * - StorageHub constants (@see {@link ShConsts}) + * + * This API is created using the BspNetTestApi.create() static method and provides + * a comprehensive toolkit for testing and developing BSP network functionality. + */ +export type EnrichedBspApi = Awaited>; diff --git a/test/util/constants.ts b/test/util/constants.ts index 418665d52..ab359b43e 100644 --- a/test/util/constants.ts +++ b/test/util/constants.ts @@ -1,5 +1,5 @@ -export const UNIT = 1_000_000_000_000n; -export const MILLIUNIT = 1_000_000_000n; -export const MICROUNIT = 1_000_000n; -export const ROUGH_TRANSFER_FEE = 1_482_409_632n; // This is just naive copypaste from explorer -export const DOCKER_IMAGE = "storage-hub:local"; +export const UNIT = 1_000_000_000_000n; +export const MILLIUNIT = 1_000_000_000n; +export const MICROUNIT = 1_000_000n; +export const ROUGH_TRANSFER_FEE = 1_482_409_632n; // This is just naive copypaste from explorer +export const DOCKER_IMAGE = "storage-hub:local"; diff --git a/test/util/fullNet/helpers.ts b/test/util/fullNet/helpers.ts index 8a78181b4..dc4f25f5d 100644 --- a/test/util/fullNet/helpers.ts +++ b/test/util/fullNet/helpers.ts @@ -1,279 +1,279 @@ -import "@storagehub/api-augment"; -import { v2 as compose } from "docker-compose"; -import * as child_process from "node:child_process"; -import { execSync } from "node:child_process"; -import path from "node:path"; -import * as util from "node:util"; -import { bspKey, mspKey, shUser } from "../pjsKeyring"; -import { showContainers } from "../bspNet/docker"; -import type { BspNetConfig } from "../bspNet/types"; -import * as ShConsts from "../bspNet/consts.ts"; -import { BspNetTestApi, type EnrichedBspApi } from "../bspNet/test-api.ts"; -import invariant from "tiny-invariant"; -import * as fs from "node:fs"; -import { parse, stringify } from "yaml"; -import { forceSignupBsp } from "../bspNet/helpers.ts"; - -const exec = util.promisify(child_process.exec); - -export const getContainerIp = async (containerName: string, verbose = false): Promise => { - const maxRetries = 60; - const sleepTime = 500; - - for (let i = 0; i < maxRetries; i++) { - verbose && console.log(`Waiting for ${containerName} to launch...`); - - // TODO: Replace with dockerode command - try { - const { stdout } = await exec( - `docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${containerName}` - ); - return stdout.trim(); - } catch { - await new Promise((resolve) => setTimeout(resolve, sleepTime)); - } - } - // TODO: Replace with dockerode - execSync("docker ps -a", { stdio: "inherit" }); - try { - execSync("docker logs docker-sh-bsp-1", { stdio: "inherit" }); - execSync("docker logs docker-sh-user-1", { stdio: "inherit" }); - } catch (e) { - console.log(e); - } - console.log( - `Error fetching container IP for ${containerName} after ${ - (maxRetries * sleepTime) / 1000 - } seconds` - ); - showContainers(); - throw "Error fetching container IP"; -}; - -export const checkNodeAlive = async (url: string, verbose = false) => getContainerIp(url, verbose); -export const getContainerPeerId = async (url: string, verbose = false) => { - const maxRetries = 60; - const sleepTime = 500; - - const payload = { - id: "1", - jsonrpc: "2.0", - method: "system_localPeerId", - params: [] - }; - - for (let i = 0; i < maxRetries; i++) { - verbose && console.log(`Waiting for node at ${url} to launch...`); - - try { - const response = await fetch(url, { - method: "POST", - headers: { - "Content-Type": "application/json" - }, - body: JSON.stringify(payload) - }); - - invariant(response.ok, `HTTP error! status: ${response.status}`); - - const resp = (await response.json()) as any; - return resp.result as string; - } catch { - await new Promise((resolve) => setTimeout(resolve, sleepTime)); - } - } - - console.log(`Error fetching peerId from ${url} after ${(maxRetries * sleepTime) / 1000} seconds`); - showContainers(); - throw `Error fetching peerId from ${url}`; -}; - -export const runFullNet = async (bspNetConfig: BspNetConfig) => { - let userApi: EnrichedBspApi | undefined; - try { - console.log(`SH user id: ${shUser.address}`); - console.log(`SH BSP id: ${bspKey.address}`); - let file = "local-dev-full-compose.yml"; - if (bspNetConfig.rocksdb) { - file = "local-dev-full-rocksdb-compose.yml"; - } - - const composeFilePath = path.resolve(process.cwd(), "..", "docker", file); - const cwd = path.resolve(process.cwd(), "..", "docker"); - const composeFile = fs.readFileSync(composeFilePath, "utf8"); - const composeYaml = parse(composeFile); - if (bspNetConfig.extrinsicRetryTimeout) { - composeYaml.services["sh-bsp"].command.push( - `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` - ); - composeYaml.services["sh-msp"].command.push( - `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` - ); - composeYaml.services["sh-user"].command.push( - `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` - ); - } - - const updatedCompose = stringify(composeYaml); - - if (bspNetConfig.noisy) { - await compose.upOne("toxiproxy", { cwd: cwd, configAsString: updatedCompose, log: true }); - } - - await compose.upOne("sh-bsp", { cwd: cwd, configAsString: updatedCompose, log: true }); - - const bspIp = await getContainerIp( - bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.bsp.containerName - ); - - if (bspNetConfig.noisy) { - console.log(`toxiproxy IP: ${bspIp}`); - } else { - console.log(`sh-bsp IP: ${bspIp}`); - } - - const bspPeerId = await getContainerPeerId( - `http://127.0.0.1:${ShConsts.NODE_INFOS.bsp.port}`, - true - ); - console.log(`sh-bsp Peer ID: ${bspPeerId}`); - - process.env.BSP_IP = bspIp; - process.env.BSP_PEER_ID = bspPeerId; - - await compose.upOne("sh-msp", { - cwd: cwd, - configAsString: updatedCompose, - log: true, - env: { - ...process.env, - NODE_KEY: ShConsts.NODE_INFOS.msp.nodeKey, - BSP_IP: bspIp, - BSP_PEER_ID: bspPeerId - } - }); - - const mspId = await getContainerIp( - bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.msp.containerName - ); - - const mspPeerId = await getContainerPeerId(`http://127.0.0.1:${ShConsts.NODE_INFOS.msp.port}`); - console.log(`sh-msp Peer ID: ${mspPeerId}`); - - const multiAddressMsp = `/ip4/${mspId}/tcp/30350/p2p/${mspPeerId}`; - - await compose.upOne("sh-user", { - cwd: cwd, - configAsString: updatedCompose, - log: true, - env: { - ...process.env, - BSP_IP: bspIp, - BSP_PEER_ID: bspPeerId - } - }); - - const peerIDUser = await getContainerPeerId( - `http://127.0.0.1:${ShConsts.NODE_INFOS.user.port}` - ); - console.log(`sh-user Peer ID: ${peerIDUser}`); - - const multiAddressBsp = `/ip4/${bspIp}/tcp/30350/p2p/${bspPeerId}`; - - // Create Connection API Object to User Node - userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - // Give Balances - const amount = 10000n * 10n ** 12n; - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(bspKey.address, amount)) - ); - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspKey.address, amount)) - ); - await userApi.sealBlock( - userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(shUser.address, amount)) - ); - - await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(1, 1))); - - // Make BSP - await forceSignupBsp({ - api: userApi, - who: bspKey.address, - multiaddress: multiAddressBsp, - bspId: ShConsts.DUMMY_BSP_ID, - capacity: bspNetConfig.capacity || ShConsts.CAPACITY_512, - weight: bspNetConfig.bspStartingWeight - }); - - // Sign up MSP - await userApi.sealBlock( - userApi.tx.sudo.sudo( - userApi.tx.providers.forceMspSignUp( - mspKey.address, - ShConsts.DUMMY_MSP_ID, - bspNetConfig.capacity || ShConsts.CAPACITY_512, - [multiAddressMsp], - { - identifier: ShConsts.VALUE_PROP, - dataLimit: 500, - protocols: ["https", "ssh", "telnet"] - }, - mspKey.address - ) - ) - ); - } catch (e) { - console.error("Error ", e); - } finally { - userApi?.disconnect(); - } -}; - -export const runInitialisedFullNet = async (bspNetConfig: BspNetConfig) => { - await runFullNet(bspNetConfig); - - let userApi: EnrichedBspApi | undefined; - try { - userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); - - /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ - const source = "res/whatsup.jpg"; - const destination = "test/smile.jpg"; - const bucketName = "nothingmuch-1"; - - const newBucketEventEvent = await userApi.createBucket(bucketName); - const newBucketEventDataBlob = - userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; - - invariant(newBucketEventDataBlob, "Event doesn't match Type"); - - const { fingerprint, file_size, location } = - await userApi.rpc.storagehubclient.loadFileInStorage( - source, - destination, - ShConsts.NODE_INFOS.user.AddressId, - newBucketEventDataBlob.bucketId - ); - - await userApi.sealBlock( - userApi.tx.fileSystem.issueStorageRequest( - newBucketEventDataBlob.bucketId, - location, - fingerprint, - file_size, - ShConsts.DUMMY_MSP_ID, - [ShConsts.NODE_INFOS.user.expectedPeerId] - ), - shUser - ); - - await userApi.wait.bspVolunteer(); - await userApi.wait.bspStored(); - } catch (e) { - console.error("Error ", e); - } finally { - userApi?.disconnect(); - } -}; +import "@storagehub/api-augment"; +import { v2 as compose } from "docker-compose"; +import * as child_process from "node:child_process"; +import { execSync } from "node:child_process"; +import path from "node:path"; +import * as util from "node:util"; +import { bspKey, mspKey, shUser } from "../pjsKeyring"; +import { showContainers } from "../bspNet/docker"; +import type { BspNetConfig } from "../bspNet/types"; +import * as ShConsts from "../bspNet/consts.ts"; +import { BspNetTestApi, type EnrichedBspApi } from "../bspNet/test-api.ts"; +import invariant from "tiny-invariant"; +import * as fs from "node:fs"; +import { parse, stringify } from "yaml"; +import { forceSignupBsp } from "../bspNet/helpers.ts"; + +const exec = util.promisify(child_process.exec); + +export const getContainerIp = async (containerName: string, verbose = false): Promise => { + const maxRetries = 60; + const sleepTime = 500; + + for (let i = 0; i < maxRetries; i++) { + verbose && console.log(`Waiting for ${containerName} to launch...`); + + // TODO: Replace with dockerode command + try { + const { stdout } = await exec( + `docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${containerName}` + ); + return stdout.trim(); + } catch { + await new Promise((resolve) => setTimeout(resolve, sleepTime)); + } + } + // TODO: Replace with dockerode + execSync("docker ps -a", { stdio: "inherit" }); + try { + execSync("docker logs docker-sh-bsp-1", { stdio: "inherit" }); + execSync("docker logs docker-sh-user-1", { stdio: "inherit" }); + } catch (e) { + console.log(e); + } + console.log( + `Error fetching container IP for ${containerName} after ${ + (maxRetries * sleepTime) / 1000 + } seconds` + ); + showContainers(); + throw "Error fetching container IP"; +}; + +export const checkNodeAlive = async (url: string, verbose = false) => getContainerIp(url, verbose); +export const getContainerPeerId = async (url: string, verbose = false) => { + const maxRetries = 60; + const sleepTime = 500; + + const payload = { + id: "1", + jsonrpc: "2.0", + method: "system_localPeerId", + params: [] + }; + + for (let i = 0; i < maxRetries; i++) { + verbose && console.log(`Waiting for node at ${url} to launch...`); + + try { + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json" + }, + body: JSON.stringify(payload) + }); + + invariant(response.ok, `HTTP error! status: ${response.status}`); + + const resp = (await response.json()) as any; + return resp.result as string; + } catch { + await new Promise((resolve) => setTimeout(resolve, sleepTime)); + } + } + + console.log(`Error fetching peerId from ${url} after ${(maxRetries * sleepTime) / 1000} seconds`); + showContainers(); + throw `Error fetching peerId from ${url}`; +}; + +export const runFullNet = async (bspNetConfig: BspNetConfig) => { + let userApi: EnrichedBspApi | undefined; + try { + console.log(`SH user id: ${shUser.address}`); + console.log(`SH BSP id: ${bspKey.address}`); + let file = "local-dev-full-compose.yml"; + if (bspNetConfig.rocksdb) { + file = "local-dev-full-rocksdb-compose.yml"; + } + + const composeFilePath = path.resolve(process.cwd(), "..", "docker", file); + const cwd = path.resolve(process.cwd(), "..", "docker"); + const composeFile = fs.readFileSync(composeFilePath, "utf8"); + const composeYaml = parse(composeFile); + if (bspNetConfig.extrinsicRetryTimeout) { + composeYaml.services["sh-bsp"].command.push( + `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` + ); + composeYaml.services["sh-msp"].command.push( + `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` + ); + composeYaml.services["sh-user"].command.push( + `--extrinsic-retry-timeout=${bspNetConfig.extrinsicRetryTimeout}` + ); + } + + const updatedCompose = stringify(composeYaml); + + if (bspNetConfig.noisy) { + await compose.upOne("toxiproxy", { cwd: cwd, configAsString: updatedCompose, log: true }); + } + + await compose.upOne("sh-bsp", { cwd: cwd, configAsString: updatedCompose, log: true }); + + const bspIp = await getContainerIp( + bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.bsp.containerName + ); + + if (bspNetConfig.noisy) { + console.log(`toxiproxy IP: ${bspIp}`); + } else { + console.log(`sh-bsp IP: ${bspIp}`); + } + + const bspPeerId = await getContainerPeerId( + `http://127.0.0.1:${ShConsts.NODE_INFOS.bsp.port}`, + true + ); + console.log(`sh-bsp Peer ID: ${bspPeerId}`); + + process.env.BSP_IP = bspIp; + process.env.BSP_PEER_ID = bspPeerId; + + await compose.upOne("sh-msp", { + cwd: cwd, + configAsString: updatedCompose, + log: true, + env: { + ...process.env, + NODE_KEY: ShConsts.NODE_INFOS.msp.nodeKey, + BSP_IP: bspIp, + BSP_PEER_ID: bspPeerId + } + }); + + const mspId = await getContainerIp( + bspNetConfig.noisy ? "toxiproxy" : ShConsts.NODE_INFOS.msp.containerName + ); + + const mspPeerId = await getContainerPeerId(`http://127.0.0.1:${ShConsts.NODE_INFOS.msp.port}`); + console.log(`sh-msp Peer ID: ${mspPeerId}`); + + const multiAddressMsp = `/ip4/${mspId}/tcp/30350/p2p/${mspPeerId}`; + + await compose.upOne("sh-user", { + cwd: cwd, + configAsString: updatedCompose, + log: true, + env: { + ...process.env, + BSP_IP: bspIp, + BSP_PEER_ID: bspPeerId + } + }); + + const peerIDUser = await getContainerPeerId( + `http://127.0.0.1:${ShConsts.NODE_INFOS.user.port}` + ); + console.log(`sh-user Peer ID: ${peerIDUser}`); + + const multiAddressBsp = `/ip4/${bspIp}/tcp/30350/p2p/${bspPeerId}`; + + // Create Connection API Object to User Node + userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + // Give Balances + const amount = 10000n * 10n ** 12n; + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(bspKey.address, amount)) + ); + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(mspKey.address, amount)) + ); + await userApi.sealBlock( + userApi.tx.sudo.sudo(userApi.tx.balances.forceSetBalance(shUser.address, amount)) + ); + + await userApi.sealBlock(userApi.tx.sudo.sudo(userApi.tx.fileSystem.setGlobalParameters(1, 1))); + + // Make BSP + await forceSignupBsp({ + api: userApi, + who: bspKey.address, + multiaddress: multiAddressBsp, + bspId: ShConsts.DUMMY_BSP_ID, + capacity: bspNetConfig.capacity || ShConsts.CAPACITY_512, + weight: bspNetConfig.bspStartingWeight + }); + + // Sign up MSP + await userApi.sealBlock( + userApi.tx.sudo.sudo( + userApi.tx.providers.forceMspSignUp( + mspKey.address, + ShConsts.DUMMY_MSP_ID, + bspNetConfig.capacity || ShConsts.CAPACITY_512, + [multiAddressMsp], + { + identifier: ShConsts.VALUE_PROP, + dataLimit: 500, + protocols: ["https", "ssh", "telnet"] + }, + mspKey.address + ) + ) + ); + } catch (e) { + console.error("Error ", e); + } finally { + userApi?.disconnect(); + } +}; + +export const runInitialisedFullNet = async (bspNetConfig: BspNetConfig) => { + await runFullNet(bspNetConfig); + + let userApi: EnrichedBspApi | undefined; + try { + userApi = await BspNetTestApi.create(`ws://127.0.0.1:${ShConsts.NODE_INFOS.user.port}`); + + /**** CREATE BUCKET AND ISSUE STORAGE REQUEST ****/ + const source = "res/whatsup.jpg"; + const destination = "test/smile.jpg"; + const bucketName = "nothingmuch-1"; + + const newBucketEventEvent = await userApi.createBucket(bucketName); + const newBucketEventDataBlob = + userApi.events.fileSystem.NewBucket.is(newBucketEventEvent) && newBucketEventEvent.data; + + invariant(newBucketEventDataBlob, "Event doesn't match Type"); + + const { fingerprint, file_size, location } = + await userApi.rpc.storagehubclient.loadFileInStorage( + source, + destination, + ShConsts.NODE_INFOS.user.AddressId, + newBucketEventDataBlob.bucketId + ); + + await userApi.sealBlock( + userApi.tx.fileSystem.issueStorageRequest( + newBucketEventDataBlob.bucketId, + location, + fingerprint, + file_size, + ShConsts.DUMMY_MSP_ID, + [ShConsts.NODE_INFOS.user.expectedPeerId] + ), + shUser + ); + + await userApi.wait.bspVolunteer(); + await userApi.wait.bspStored(); + } catch (e) { + console.error("Error ", e); + } finally { + userApi?.disconnect(); + } +}; diff --git a/test/util/pjsKeyring.ts b/test/util/pjsKeyring.ts index af79626de..b1b19d630 100644 --- a/test/util/pjsKeyring.ts +++ b/test/util/pjsKeyring.ts @@ -1,56 +1,56 @@ -import { Keyring } from "@polkadot/api"; -import { randomBytes } from "node:crypto"; -import { cryptoWaitReady } from "@polkadot/util-crypto"; - -export const keyring = new Keyring({ type: "sr25519" }); -await cryptoWaitReady(); - -export const alice = keyring.addFromUri("//Alice", { name: "Alice default" }); - -export const bob = keyring.addFromUri("//Bob", { name: "Bob default" }); - -export const charlie = keyring.addFromUri("//Charlie", { - name: "Charlie default" -}); - -export const dave = keyring.addFromUri("//Dave", { name: "Dave default" }); - -export const eve = keyring.addFromUri("//Eve", { name: "Eve default" }); - -export const ferdie = keyring.addFromUri("//Ferdie", { - name: "Ferdie default" -}); - -export const bspSeed = "//Sh-BSP"; -export const bspKey = keyring.addFromUri(bspSeed, { name: "Sh-BSP" }); -export const bspDownSeed = "//Sh-BSP-Down"; -export const bspDownKey = keyring.addFromUri(bspDownSeed, { name: "Sh-BSP-Down" }); -export const bspTwoSeed = "//Sh-BSP-Two"; -export const bspTwoKey = keyring.addFromUri(bspTwoSeed, { name: "Sh-BSP-Two" }); -export const bspThreeSeed = "//Sh-BSP-Three"; -export const bspThreeKey = keyring.addFromUri(bspThreeSeed, { name: "Sh-BSP-Three" }); - -export const mspSeed = "//Sh-MSP"; -export const mspKey = keyring.addFromUri(mspSeed, { name: "Sh-MSP" }); -export const mspDownSeed = "//Sh-MSP-Down"; -export const mspDownKey = keyring.addFromUri(mspDownSeed, { name: "Sh-MSP-Down" }); -export const mspTwoSeed = "//Sh-MSP-Two"; -export const mspTwoKey = keyring.addFromUri(mspTwoSeed, { name: "Sh-MSP-Two" }); - -export const collator = keyring.addFromUri("//Sh-collator", { - name: "Sh-collator" -}); - -export const shUser = keyring.addFromUri("//Sh-User", { - name: "Sh-User" -}); - -export const sudo = alice; - -export const createSr25519Account = async (privateKey?: string) => { - const rand = `0x${randomBytes(32).toString("hex")}`; - console.log("random", rand); - const keyring = new Keyring({ type: "sr25519" }); - const account = keyring.addFromUri(privateKey || rand); - return account; -}; +import { Keyring } from "@polkadot/api"; +import { randomBytes } from "node:crypto"; +import { cryptoWaitReady } from "@polkadot/util-crypto"; + +export const keyring = new Keyring({ type: "sr25519" }); +await cryptoWaitReady(); + +export const alice = keyring.addFromUri("//Alice", { name: "Alice default" }); + +export const bob = keyring.addFromUri("//Bob", { name: "Bob default" }); + +export const charlie = keyring.addFromUri("//Charlie", { + name: "Charlie default" +}); + +export const dave = keyring.addFromUri("//Dave", { name: "Dave default" }); + +export const eve = keyring.addFromUri("//Eve", { name: "Eve default" }); + +export const ferdie = keyring.addFromUri("//Ferdie", { + name: "Ferdie default" +}); + +export const bspSeed = "//Sh-BSP"; +export const bspKey = keyring.addFromUri(bspSeed, { name: "Sh-BSP" }); +export const bspDownSeed = "//Sh-BSP-Down"; +export const bspDownKey = keyring.addFromUri(bspDownSeed, { name: "Sh-BSP-Down" }); +export const bspTwoSeed = "//Sh-BSP-Two"; +export const bspTwoKey = keyring.addFromUri(bspTwoSeed, { name: "Sh-BSP-Two" }); +export const bspThreeSeed = "//Sh-BSP-Three"; +export const bspThreeKey = keyring.addFromUri(bspThreeSeed, { name: "Sh-BSP-Three" }); + +export const mspSeed = "//Sh-MSP"; +export const mspKey = keyring.addFromUri(mspSeed, { name: "Sh-MSP" }); +export const mspDownSeed = "//Sh-MSP-Down"; +export const mspDownKey = keyring.addFromUri(mspDownSeed, { name: "Sh-MSP-Down" }); +export const mspTwoSeed = "//Sh-MSP-Two"; +export const mspTwoKey = keyring.addFromUri(mspTwoSeed, { name: "Sh-MSP-Two" }); + +export const collator = keyring.addFromUri("//Sh-collator", { + name: "Sh-collator" +}); + +export const shUser = keyring.addFromUri("//Sh-User", { + name: "Sh-User" +}); + +export const sudo = alice; + +export const createSr25519Account = async (privateKey?: string) => { + const rand = `0x${randomBytes(32).toString("hex")}`; + console.log("random", rand); + const keyring = new Keyring({ type: "sr25519" }); + const account = keyring.addFromUri(privateKey || rand); + return account; +};