diff --git a/src/assets/assets.cpp b/src/assets/assets.cpp index a241c57312..03cb5249f0 100644 --- a/src/assets/assets.cpp +++ b/src/assets/assets.cpp @@ -674,13 +674,23 @@ bool TransferAssetFromScript(const CScript& scriptPubKey, CAssetTransfer& assetT strAddress = EncodeDestination(destination); std::vector vchTransferAsset; - vchTransferAsset.insert(vchTransferAsset.end(), scriptPubKey.begin() + 31, scriptPubKey.end()); + + if (AreTransferScriptsSizeDeployed()) { + // Before kawpow activation we used the hardcoded 31 to find the data + // This created a bug where large transfers scripts would fail to serialize. + // This fixes that issue (https://github.com/RavenProject/Ravencoin/issues/752) + // TODO, after the kawpow fork goes active, we should be able to remove this if/else statement and just use this line. + vchTransferAsset.insert(vchTransferAsset.end(), scriptPubKey.begin() + nStartingIndex, scriptPubKey.end()); + } else { + vchTransferAsset.insert(vchTransferAsset.end(), scriptPubKey.begin() + 31, scriptPubKey.end()); + } + CDataStream ssAsset(vchTransferAsset, SER_NETWORK, PROTOCOL_VERSION); try { ssAsset >> assetTransfer; } catch(std::exception& e) { - std::cout << "Failed to get the transfer asset from the stream: " << e.what() << std::endl; + error("Failed to get the transfer asset from the stream: %s", e.what()); return false; } @@ -705,7 +715,7 @@ bool AssetFromScript(const CScript& scriptPubKey, CNewAsset& assetNew, std::stri try { ssAsset >> assetNew; } catch(std::exception& e) { - std::cout << "Failed to get the asset from the stream: " << e.what() << std::endl; + error("Failed to get the asset from the stream: %s", e.what()); return false; } @@ -730,7 +740,7 @@ bool MsgChannelAssetFromScript(const CScript& scriptPubKey, CNewAsset& assetNew, try { ssAsset >> assetNew; } catch(std::exception& e) { - std::cout << "Failed to get the msg channel asset from the stream: " << e.what() << std::endl; + error("Failed to get the msg channel asset from the stream: %s", e.what()); return false; } @@ -755,7 +765,7 @@ bool QualifierAssetFromScript(const CScript& scriptPubKey, CNewAsset& assetNew, try { ssAsset >> assetNew; } catch(std::exception& e) { - std::cout << "Failed to get the qualifier asset from the stream: " << e.what() << std::endl; + error("Failed to get the qualifier asset from the stream: %s", e.what()); return false; } @@ -780,7 +790,7 @@ bool RestrictedAssetFromScript(const CScript& scriptPubKey, CNewAsset& assetNew, try { ssAsset >> assetNew; } catch(std::exception& e) { - std::cout << "Failed to get the restricted asset from the stream: " << e.what() << std::endl; + error("Failed to get the restricted asset from the stream: %s", e.what()); return false; } @@ -805,7 +815,7 @@ bool OwnerAssetFromScript(const CScript& scriptPubKey, std::string& assetName, s try { ssOwner >> assetName; } catch(std::exception& e) { - std::cout << "Failed to get the owner asset from the stream: " << e.what() << std::endl; + error("Failed to get the owner asset from the stream: %s", e.what()); return false; } @@ -830,7 +840,7 @@ bool ReissueAssetFromScript(const CScript& scriptPubKey, CReissueAsset& reissue, try { ssReissue >> reissue; } catch(std::exception& e) { - std::cout << "Failed to get the reissue asset from the stream: " << e.what() << std::endl; + error("Failed to get the reissue asset from the stream: %s", e.what()); return false; } @@ -855,7 +865,7 @@ bool AssetNullDataFromScript(const CScript& scriptPubKey, CNullAssetTxData& asse try { ssData >> assetData; } catch(std::exception& e) { - std::cout << "Failed to get the asset tx data from the stream: " << e.what() << std::endl; + error("Failed to get the null asset tx data from the stream: %s", e.what()); return false; } @@ -875,7 +885,7 @@ bool GlobalAssetNullDataFromScript(const CScript& scriptPubKey, CNullAssetTxData try { ssData >> assetData; } catch(std::exception& e) { - std::cout << "Failed to get the global restriction asset tx data from the stream: " << e.what() << std::endl; + error("Failed to get the global restriction asset tx data from the stream: %s", e.what()); return false; } @@ -895,7 +905,7 @@ bool AssetNullVerifierDataFromScript(const CScript& scriptPubKey, CNullAssetTxVe try { ssData >> verifierData; } catch(std::exception& e) { - std::cout << "Failed to get the verifier string from the stream: " << e.what() << std::endl; + error("Failed to get the verifier string from the stream: %s", e.what()); return false; } diff --git a/src/chainparams.cpp b/src/chainparams.cpp index c46c19b77a..079b2beb4a 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -146,6 +146,11 @@ class CMainParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nTimeout = 1610542800; // UTC: Wed Jan 13 2021 13:00:00 consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideRuleChangeActivationThreshold = 1714; // Approx 85% of 2016 consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideMinerConfirmationWindow = 2016; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].bit = 8; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nStartTime = 1588788000; // UTC: Wed May 06 2020 18:00:00 + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nTimeout = 1620324000; // UTC: Thu May 06 2021 18:00:00 + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideRuleChangeActivationThreshold = 1714; // Approx 85% of 2016 + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideMinerConfirmationWindow = 2016; // The best chain should have at least this much work consensus.nMinimumChainWork = uint256S("0x00000000000000000000000000000000000000000000001445cb2bc4398ebded"); // Block 1040000 @@ -248,7 +253,7 @@ class CMainParams : public CChainParams { nMessagingActivationBlock = 0; // Messaging activated block height // TODO after messaging goes active on mainnet nRestrictedActivationBlock = 0; // Restricted activated block height // TODO after restricted goes active on mainnet - nKAAAWWWPOWActivationTime = 4294967295; + nKAAAWWWPOWActivationTime = 1588788000; // UTC: Wed May 06 2020 18:00:00 nKAWPOWActivationTime = nKAAAWWWPOWActivationTime; /** RVN End **/ } @@ -291,6 +296,11 @@ class CTestNetParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nTimeout = 1577257200; // UTC: Wed Dec 25 2019 07:00:00 consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideRuleChangeActivationThreshold = 1310; consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideMinerConfirmationWindow = 2016; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].bit = 8; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nStartTime = 1586973600; // UTC: Wed Apr 15 2020 18:00:00 + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nTimeout = 1618509600; // UTC: Thu Apr 15 2021 18:00:00 + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideRuleChangeActivationThreshold = 1310; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideMinerConfirmationWindow = 2016; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00"); @@ -495,6 +505,11 @@ class CRegTestParams : public CChainParams { consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nTimeout = 999999999999ULL; // UTC: Wed Dec 25 2019 07:00:00 consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideRuleChangeActivationThreshold = 108; consensus.vDeployments[Consensus::DEPLOYMENT_MSG_REST_ASSETS].nOverrideMinerConfirmationWindow = 144; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].bit = 8; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nStartTime = 0; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nTimeout = 999999999999ULL; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideRuleChangeActivationThreshold = 208; + consensus.vDeployments[Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE].nOverrideMinerConfirmationWindow = 288; // The best chain should have at least this much work. consensus.nMinimumChainWork = uint256S("0x00"); @@ -640,7 +655,10 @@ class CRegTestParams : public CChainParams { nMessagingActivationBlock = 0; // Messaging activated block height nRestrictedActivationBlock = 0; // Restricted activated block height - nKAAAWWWPOWActivationTime = 1582830167; + // TODO, we need to figure out what to do with this for regtest. This effects the unit tests + // For now we can use a timestamp very far away + // If you are looking to test the kawpow hashing function in regtest. You will need to change this number + nKAAAWWWPOWActivationTime = 3582830167; nKAWPOWActivationTime = nKAAAWWWPOWActivationTime; /** RVN End **/ } diff --git a/src/consensus/consensus.h b/src/consensus/consensus.h index 55764cd29a..6c746cc0ef 100644 --- a/src/consensus/consensus.h +++ b/src/consensus/consensus.h @@ -36,6 +36,7 @@ static const size_t MIN_SERIALIZABLE_TRANSACTION_WEIGHT = WITNESS_SCALE_FACTOR * //! it causes unused variable warnings when compiling. This UNUSED_VAR removes the unused warnings UNUSED_VAR static bool fAssetsIsActive = false; UNUSED_VAR static bool fRip5IsActive = false; +UNUSED_VAR static bool fTransferScriptIsActive = false; unsigned int GetMaxBlockWeight(); unsigned int GetMaxBlockSerializedSize(); diff --git a/src/consensus/params.h b/src/consensus/params.h index c7fc4e35b4..8b255ac652 100644 --- a/src/consensus/params.h +++ b/src/consensus/params.h @@ -18,6 +18,7 @@ enum DeploymentPos DEPLOYMENT_TESTDUMMY, DEPLOYMENT_ASSETS, // Deployment of RIP2 DEPLOYMENT_MSG_REST_ASSETS, // Delpoyment of RIP5 and Restricted assets + DEPLOYMENT_TRANSFER_SCRIPT_SIZE, // DEPLOYMENT_CSV, // Deployment of BIP68, BIP112, and BIP113. // DEPLOYMENT_SEGWIT, // Deployment of BIP141, BIP143, and BIP147. // NOTE: Also add new deployments to VersionBitsDeploymentInfo in versionbits.cpp diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 296355ea5a..59bada524f 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -1642,6 +1642,14 @@ bool static ProcessMessage(CNode* pfrom, const std::string& strCommand, CDataStr return false; } + if (AreTransferScriptsSizeDeployed() && nVersion < KAWPOW_VERSION) { + LogPrintf("peer=%d using obsolete version %i; disconnecting because peer isn't signalling protocol version for kawpow support\n", pfrom->GetId(), nVersion); + connman->PushMessage(pfrom, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::REJECT, strCommand, REJECT_OBSOLETE, + strprintf("Version must be %d or greater or equal to", KAWPOW_VERSION))); + pfrom->fDisconnect = true; + return false; + } + if (nVersion == 10300) nVersion = 300; if (!vRecv.empty()) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index f6a4e14170..3edb09a0d8 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -1432,6 +1432,7 @@ UniValue getblockchaininfo(const JSONRPCRequest& request) //BIP9SoftForkDescPushBack(bip9_softforks, "segwit", consensusParams, Consensus::DEPLOYMENT_SEGWIT); BIP9SoftForkDescPushBack(bip9_softforks, "assets", consensusParams, Consensus::DEPLOYMENT_ASSETS); BIP9SoftForkDescPushBack(bip9_softforks, "messaging_restricted", consensusParams, Consensus::DEPLOYMENT_MSG_REST_ASSETS); + BIP9SoftForkDescPushBack(bip9_softforks, "transfer_script", consensusParams, Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE); obj.push_back(Pair("softforks", softforks)); obj.push_back(Pair("bip9_softforks", bip9_softforks)); diff --git a/src/validation.cpp b/src/validation.cpp index eac7bc1bcd..cf4b0da065 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -3489,8 +3489,9 @@ static bool ActivateBestChainStep(CValidationState& state, const CChainParams& c if (!ConnectTip(state, chainparams, pindexConnect, pindexConnect == pindexMostWork ? pblock : std::shared_ptr(), connectTrace, disconnectpool)) { if (state.IsInvalid()) { // The block violates a consensus rule. - if (!state.CorruptionPossible()) + if (!state.CorruptionPossible()) { InvalidChainFound(vpindexToConnect.back()); + } state = CValidationState(); fInvalidFound = true; fContinue = false; @@ -4308,7 +4309,7 @@ bool ProcessNewBlockHeaders(const std::vector& headers, CValidatio } /** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */ -static bool AcceptBlock(const std::shared_ptr& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock) +static bool AcceptBlock(const std::shared_ptr& pblock, CValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const CDiskBlockPos* dbp, bool* fNewBlock, bool fFromLoad = false) { const CBlock& block = *pblock; @@ -4358,11 +4359,17 @@ static bool AcceptBlock(const std::shared_ptr& pblock, CValidation // Dont force the CheckBlock asset duplciates when checking from this state if (!CheckBlock(block, state, chainparams.GetConsensus(), true, true) || !ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindex->pprev, currentActiveAssetCache)) { - if (state.IsInvalid() && !state.CorruptionPossible()) { - pindex->nStatus |= BLOCK_FAILED_VALID; - setDirtyBlockIndex.insert(pindex); + if (fFromLoad && state.GetRejectReason() == "bad-txns-transfer-asset-bad-deserialize") { + // keep going, we are only loading blocks from database + CValidationState new_state; + state = new_state; + } else { + if (state.IsInvalid() && !state.CorruptionPossible()) { + pindex->nStatus |= BLOCK_FAILED_VALID; + setDirtyBlockIndex.insert(pindex); + } + return error("%s: %s", __func__, FormatStateMessage(state)); } - return error("%s: %s", __func__, FormatStateMessage(state)); } // Header is valid/has work, merkle tree and segwit merkle tree are good...RELAY NOW @@ -5276,11 +5283,12 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB if (mapBlockIndex.count(hash) == 0 || (mapBlockIndex[hash]->nStatus & BLOCK_HAVE_DATA) == 0) { LOCK(cs_main); CValidationState state; - if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { + if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr, true)) { nLoaded++; } - if (state.IsError()) + if (state.IsError()) { break; + } } else if (hash != chainparams.GetConsensus().hashGenesisBlock && mapBlockIndex[hash]->nHeight % 1000 == 0) { LogPrint(BCLog::REINDEX, "Block Import: already had block %s at height %d\n", hash.ToString(), mapBlockIndex[hash]->nHeight); } @@ -5311,7 +5319,7 @@ bool LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, CDiskB head.ToString()); LOCK(cs_main); CValidationState dummy; - if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) + if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr, true)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); @@ -5724,6 +5732,18 @@ bool AreMessagesDeployed() { return IsRip5Active(); } +bool AreTransferScriptsSizeDeployed() { + + if (fTransferScriptIsActive) + return true; + + const ThresholdState thresholdState = VersionBitsTipState(GetParams().GetConsensus(), Consensus::DEPLOYMENT_TRANSFER_SCRIPT_SIZE); + if (thresholdState == THRESHOLD_ACTIVE) + fTransferScriptIsActive = true; + + return fTransferScriptIsActive; +} + bool AreRestrictedAssetsDeployed() { return IsRip5Active(); diff --git a/src/validation.h b/src/validation.h index 32f9225d02..7ac6f28421 100644 --- a/src/validation.h +++ b/src/validation.h @@ -597,6 +597,9 @@ bool AreRestrictedAssetsDeployed(); bool IsRip5Active(); + +bool AreTransferScriptsSizeDeployed(); + bool IsDGWActive(unsigned int nBlockNumber); bool IsMessagingActive(unsigned int nBlockNumber); bool IsRestrictedActive(unsigned int nBlockNumber); diff --git a/src/version.h b/src/version.h index f41939dc08..13771d0002 100644 --- a/src/version.h +++ b/src/version.h @@ -32,6 +32,9 @@ static const int ASSETDATA_VERSION = 70017; //! getassetdata reutrn asstnotfound, and assetdata doesn't have blockhash in the data static const int X16RV2_VERSION = 70025; +//! getassetdata reutrn asstnotfound, and assetdata doesn't have blockhash in the data +static const int KAWPOW_VERSION = 70027; + //! disconnect from peers older than this proto version //!!! Anytime this value is changed please also update the "MY_VERSION" value to match in the //!!! ./test/functional/test_framework/mininode.py file. Not doing so will cause verack to fail! @@ -65,4 +68,5 @@ static const int ASSETDATA_VERSION_UPDATED = 70020; //! In this version, 'rip5 (messaging and restricted assets)' was introduced static const int MESSAGING_RESTRICTED_ASSETS_VERSION = 70026; + #endif // RAVEN_VERSION_H diff --git a/src/versionbits.cpp b/src/versionbits.cpp index 39050d3f4b..cad9b3ad90 100644 --- a/src/versionbits.cpp +++ b/src/versionbits.cpp @@ -22,6 +22,10 @@ const struct VBDeploymentInfo VersionBitsDeploymentInfo[Consensus::MAX_VERSION_B { /*.name =*/ "messaging_restricted", /*.gbt_force =*/ true, + }, + { + /*.name =*/ "transfer_script", + /*.gbt_force =*/ true, } }; diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 15da44edd9..edf48ad703 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -272,7 +272,7 @@ bool ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, // If a client has a wallet.dat that contains asset transactions, but we are syncing the chain. // we want to make sure that we don't fail to load this wallet transaction just because it is an asset transaction // before asset are active - if (state.GetRejectReason() != "bad-txns-is-asset-and-asset-not-active") { + if (state.GetRejectReason() != "bad-txns-is-asset-and-asset-not-active" && state.GetRejectReason() != "bad-txns-transfer-asset-bad-deserialize") { strErr = state.GetRejectReason(); return false; }