diff --git a/include/pando-lib-galois/containers/host_cached_array.hpp b/include/pando-lib-galois/containers/host_cached_array.hpp index 894cf66a..078d9e48 100644 --- a/include/pando-lib-galois/containers/host_cached_array.hpp +++ b/include/pando-lib-galois/containers/host_cached_array.hpp @@ -55,7 +55,7 @@ class HostCachedArray { assert(range.size() == m_data.size()); size_ = 0; PANDO_CHECK_RETURN(m_data.initialize()); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( range, m_data, +[](Range range, pando::GlobalRef>> data) { PANDO_CHECK(lift(data, initialize)); @@ -64,7 +64,7 @@ class HostCachedArray { ref, initialize, *(range.begin() + static_cast(pando::getCurrentPlace().node.id)))); })); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( m_data, m_data, +[](decltype(m_data) complete, galois::HostIndexedMap> data) { for (std::uint64_t i = 0; i < data.size(); i++) { @@ -79,7 +79,7 @@ class HostCachedArray { } void deinitialize() { - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( m_data, +[](galois::HostIndexedMap> data) { const std::uint64_t i = static_cast(pando::getCurrentPlace().node.id); auto ref = data[i]; diff --git a/include/pando-lib-galois/containers/host_local_storage.hpp b/include/pando-lib-galois/containers/host_local_storage.hpp index 035c5999..12dcae66 100644 --- a/include/pando-lib-galois/containers/host_local_storage.hpp +++ b/include/pando-lib-galois/containers/host_local_storage.hpp @@ -287,7 +287,7 @@ template [[nodiscard]] pando::Expected> copyToAllHosts(T&& cont) { galois::HostLocalStorage ret{}; PANDO_CHECK_RETURN(ret.initialize()); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( cont, ret, +[](T cont, pando::GlobalRef refcopy) { T copy; if (galois::localityOf(cont).node.id != pando::getCurrentPlace().node.id) { diff --git a/include/pando-lib-galois/containers/pod_local_storage.hpp b/include/pando-lib-galois/containers/pod_local_storage.hpp index 424c3626..c58a50fb 100644 --- a/include/pando-lib-galois/containers/pod_local_storage.hpp +++ b/include/pando-lib-galois/containers/pod_local_storage.hpp @@ -297,7 +297,7 @@ template [[nodiscard]] pando::Expected> copyToAllPods(T& cont) { galois::PodLocalStorage ret{}; PANDO_CHECK_RETURN(ret.initialize()); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( cont, ret, +[](T cont, pando::GlobalRef refcopy) { T copy; const std::uint64_t size = cont.size(); diff --git a/include/pando-lib-galois/containers/thread_local_storage.hpp b/include/pando-lib-galois/containers/thread_local_storage.hpp index d59d9c3c..3aaafb3b 100644 --- a/include/pando-lib-galois/containers/thread_local_storage.hpp +++ b/include/pando-lib-galois/containers/thread_local_storage.hpp @@ -44,7 +44,7 @@ class ThreadLocalStorage { [[nodiscard]] pando::Status initialize() { PANDO_CHECK_RETURN(m_items.initialize()); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( m_items, +[](pando::GlobalRef> ref) { const auto placeDims = pando::getPlaceDims(); const auto threadDims = pando::getThreadDims(); @@ -60,7 +60,7 @@ class ThreadLocalStorage { } void deinitialize() { - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( m_items, +[](pando::GlobalRef> ptr) { const auto placeDims = pando::getPlaceDims(); const auto threadDims = pando::getThreadDims(); @@ -275,7 +275,7 @@ template [[nodiscard]] pando::Expected> copyToAllThreads(T& cont) { galois::ThreadLocalStorage ret{}; PANDO_CHECK_RETURN(ret.initialize()); - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( cont, ret, +[](T cont, pando::GlobalRef refcopy) { T copy; const std::uint64_t size = cont.size(); diff --git a/include/pando-lib-galois/graphs/dist_local_csr.hpp b/include/pando-lib-galois/graphs/dist_local_csr.hpp index 7b4c1d17..82500f14 100644 --- a/include/pando-lib-galois/graphs/dist_local_csr.hpp +++ b/include/pando-lib-galois/graphs/dist_local_csr.hpp @@ -854,7 +854,7 @@ class DistLocalCSR { galois::HostLocalStorage> pHV{}; PANDO_CHECK_RETURN(pHV.initialize()); - PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( partEdges, pHV, +[](HostLocalStorage>> partEdges, pando::GlobalRef> pHV) { @@ -1206,7 +1206,7 @@ class DistLocalCSR { * @brief create CSR Caches */ pando::Status generateCache() { - return galois::doAll( + return galois::doAllExplicitPolicy( arrayOfCSRs, arrayOfCSRs, +[](decltype(arrayOfCSRs) arrayOfCSRs, HostIndexedMap localCSRs) { for (std::uint64_t i = 0; i < localCSRs.size(); i++) { diff --git a/include/pando-lib-galois/graphs/mirror_dist_local_csr.hpp b/include/pando-lib-galois/graphs/mirror_dist_local_csr.hpp index 404c0f2d..83b4fdc8 100644 --- a/include/pando-lib-galois/graphs/mirror_dist_local_csr.hpp +++ b/include/pando-lib-galois/graphs/mirror_dist_local_csr.hpp @@ -386,7 +386,7 @@ class MirrorDistLocalCSR { * @brief reset the master bit sets of all hosts */ void resetMasterBitSets() { - galois::doAll( + galois::doAllExplicitPolicy( masterBitSets, +[](pando::GlobalRef> masterBitSet) { fmapVoid(masterBitSet, fill, false); }); @@ -395,7 +395,7 @@ class MirrorDistLocalCSR { * @brief reset the mirror bit sets of all hosts */ void resetMirrorBitSets() { - galois::doAll( + galois::doAllExplicitPolicy( mirrorBitSets, +[](pando::GlobalRef> mirrorBitSet) { fmapVoid(mirrorBitSet, fill, false); }); @@ -576,7 +576,7 @@ class MirrorDistLocalCSR { auto thisMDLCSR = *this; auto state = galois::make_tpl(thisMDLCSR, func, wgh); - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, state, localMirrorToRemoteMasterOrderedTable, +[](decltype(state) state, pando::Array localMirrorToRemoteMasterOrderedMap) { @@ -636,7 +636,7 @@ class MirrorDistLocalCSR { auto thisMDLCSR = *this; auto state = galois::make_tpl(thisMDLCSR, wgh); - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, state, localMasterToRemoteMirrorTable, +[](decltype(state) state, pando::Vector> localMasterToRemoteMirrorMap) { @@ -795,7 +795,7 @@ class MirrorDistLocalCSR { PANDO_CHECK_RETURN(mirrorBitSets.initialize()); PANDO_CHECK_RETURN(masterBitSets.initialize()); auto state = galois::make_tpl(masterRange, mirrorRange, mirrorBitSets); - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, state, masterBitSets, +[](decltype(state) state, pando::GlobalRef> globalMasterBitSet) { auto [masterRange, mirrorRange, mirrorBitSets] = state; @@ -839,7 +839,7 @@ class MirrorDistLocalCSR { // each host traverses its own localMirrorToRemoteMasterOrderedTable and send out the mapping to // the corresponding remote host append to the vector of vector where each vector is the mapping // from a specific host - galois::doAll( + galois::doAllExplicitPolicy( state, localMirrorToRemoteMasterOrderedTable, +[](decltype(state) state, pando::Array localMirrorToRemoteMasterOrderedMap) { diff --git a/include/pando-lib-galois/import/ingest_rmat_el.hpp b/include/pando-lib-galois/import/ingest_rmat_el.hpp index adf7c5de..a9a8fe20 100644 --- a/include/pando-lib-galois/import/ingest_rmat_el.hpp +++ b/include/pando-lib-galois/import/ingest_rmat_el.hpp @@ -92,7 +92,7 @@ ReturnType initializeELDACSR(pando::Array filename, std::uint64_t numVerti galois::WaitGroup freeWaiter; PANDO_CHECK(freeWaiter.initialize(0)); auto freeWGH = freeWaiter.getHandle(); - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( freeWGH, perThreadRename, +[](galois::HashTable hash) { hash.deinitialize(); }); @@ -158,7 +158,7 @@ ReturnType initializeELDLCSR(pando::Array filename, std::uint64_t numVerti galois::WaitGroup freeWaiter; PANDO_CHECK(freeWaiter.initialize(0)); auto freeWGH = freeWaiter.getHandle(); - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( freeWGH, perThreadRename, +[](galois::HashTable hash) { hash.deinitialize(); }); @@ -204,16 +204,16 @@ ReturnType initializeELDLCSR(pando::Array filename, std::uint64_t numVerti } }; - PANDO_CHECK(galois::doAllExplicitPolicy(generateVerticesState, pHV, - generateVerticesPerHost)); + PANDO_CHECK(galois::doAllExplicitPolicy( + generateVerticesState, pHV, generateVerticesPerHost)); auto [partEdges, renamePerHost] = internal::partitionEdgesParallely(pHV, std::move(localReadEdges), hostLocalV2PM); - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( partEdges, +[](pando::GlobalRef>> edge_vectors) { pando::Vector> evs_tmp = edge_vectors; - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( evs_tmp, +[](pando::GlobalRef> src_ev) { pando::Vector tmp = src_ev; std::sort(tmp.begin(), tmp.end()); diff --git a/include/pando-lib-galois/import/ingest_wmd_csv.hpp b/include/pando-lib-galois/import/ingest_wmd_csv.hpp index 04f1b630..41465302 100644 --- a/include/pando-lib-galois/import/ingest_wmd_csv.hpp +++ b/include/pando-lib-galois/import/ingest_wmd_csv.hpp @@ -104,7 +104,7 @@ galois::DistLocalCSR initializeWMDDLCSR(pando::Array galois::WaitGroup freeWaiter; PANDO_CHECK(freeWaiter.initialize(0)); auto freeWGH = freeWaiter.getHandle(); - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( freeWGH, perThreadRename, +[](galois::HashTable hash) { hash.deinitialize(); }); diff --git a/include/pando-lib-galois/import/wmd_graph_importer.hpp b/include/pando-lib-galois/import/wmd_graph_importer.hpp index bc275ef1..3c58657e 100644 --- a/include/pando-lib-galois/import/wmd_graph_importer.hpp +++ b/include/pando-lib-galois/import/wmd_graph_importer.hpp @@ -70,7 +70,7 @@ buildEdgeCountToSend(std::uint64_t numVirtualHosts, sumArray[i] = p; } - PANDO_CHECK_RETURN(galois::doAll( + PANDO_CHECK_RETURN(galois::doAllExplicitPolicy( sumArray, localEdges, +[](pando::Array> counts, pando::Vector> localEdges) { @@ -111,10 +111,10 @@ buildEdgeCountToSend(std::uint64_t numVirtualHosts, const uint64_t pairOffset = offsetof(UPair, first); auto state = galois::make_tpl(wgh, sumArray); - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, state, localEdges, +[](decltype(state) state, pando::Vector localEdges) { auto [wgh, sumArray] = state; - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, sumArray, localEdges, +[](decltype(sumArray) counts, EdgeType localEdge) { pando::GlobalPtr toAdd = static_cast>( static_cast>(&counts[localEdge.src % counts.size()])); @@ -179,7 +179,7 @@ partitionEdgesParallely(HostLocalStorage> partitionedV // Insert into hashmap // ToDo: Parallelize this (Divija) - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( partitionedVertices, renamePerHost, +[](HostLocalStorage> partitionedVertices, pando::GlobalRef> hashRef) { @@ -386,7 +386,7 @@ template auto tpl = galois::make_tpl(numVerticesPerHostPerThread, prefixArrPerHostPerThread, perThreadVerticesPartition); - galois::doAll( + galois::doAllExplicitPolicy( tpl, perThreadVerticesPartition, +[](decltype(tpl) tpl, pando::GlobalRef>> perThreadVerticesPartition) { @@ -432,7 +432,7 @@ template using SRC_Val = uint64_t; using DST_Val = uint64_t; - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( numVerticesPerHostPerThread, prefixArrPerHostPerThread, +[](HostLocalStorage> numVerticesPerHostPerThread, Array prefixArr) { @@ -452,7 +452,7 @@ template WaitGroup freeWaiter; PANDO_CHECK(freeWaiter.initialize()); auto freeWGH = freeWaiter.getHandle(); - PANDO_CHECK(doAll( + PANDO_CHECK(doAllExplicitPolicy( freeWGH, numVerticesPerHostPerThread, +[](galois::Array arr) { arr.deinitialize(); })); @@ -461,7 +461,7 @@ template galois::HostLocalStorage> pHV{}; PANDO_CHECK(pHV.initialize()); - PANDO_CHECK(doAll( + PANDO_CHECK(doAllExplicitPolicy( pHV, prefixArrPerHostPerThread, +[](decltype(pHV) pHV, galois::Array prefixArr) { const std::uint64_t nodeIdx = pando::getCurrentPlace().node.id; const std::uint64_t numThreads = galois::getNumThreads(); @@ -494,7 +494,7 @@ template #if FREE auto tpl = galois::make_tpl(prefixArrPerHostPerThread, perThreadVerticesPartition); - galois::doAll( + galois::doAllExplicitPolicy( freeWGH, tpl, perThreadVerticesPartition, +[](decltype(tpl) tpl, pando::GlobalRef>> perThreadVerticesPartition) { diff --git a/include/pando-lib-galois/loops/do_all.hpp b/include/pando-lib-galois/loops/do_all.hpp index c0e30557..60304c05 100644 --- a/include/pando-lib-galois/loops/do_all.hpp +++ b/include/pando-lib-galois/loops/do_all.hpp @@ -22,6 +22,7 @@ constexpr bool DOALL_TIMER_ENABLE = false; extern counter::Record perCoreRNG; extern counter::Record> perCoreDist; +extern counter::Record> nodeSizeDist; extern counter::Record schedulerCount; extern counter::Record doAllCount; @@ -49,15 +50,16 @@ static inline uint64_t getTotalThreads() { } enum SchedulerPolicy { - RANDOM, + INFER_RANDOM_CORE, UNSAFE_STRIPE, CORE_STRIPE, NODE_ONLY, + NODE_RANDOM, NAIVE, }; #ifndef PANDO_SCHED -constexpr SchedulerPolicy CURRENT_SCHEDULER_POLICY = SchedulerPolicy::RANDOM; +constexpr SchedulerPolicy CURRENT_SCHEDULER_POLICY = SchedulerPolicy::INFER_RANDOM_CORE; #else constexpr SchedulerPolicy CURRENT_SCHEDULER_POLICY = PANDO_SCHED; #endif @@ -82,7 +84,7 @@ pando::Place schedulerImpl(pando::Place preferredLocality, [[maybe_unused]] LoopLocalSchedulerStruct& loopLocal) noexcept { counter::HighResolutionCount schedulerTimer; schedulerTimer.start(); - if constexpr (Policy == RANDOM) { + if constexpr (Policy == INFER_RANDOM_CORE) { auto coreIdx = perCoreDist.getLocal()(perCoreRNG.getLocal()); assert(coreIdx < pando::getCoreDims().x); preferredLocality = @@ -103,6 +105,10 @@ pando::Place schedulerImpl(pando::Place preferredLocality, assert(coreIdx < pando::getCoreDims().x); preferredLocality = pando::Place(pando::getCurrentNode(), pando::anyPod, pando::CoreIndex(coreIdx, 0)); + } else if constexpr (Policy == NODE_RANDOM) { + auto nodeIdx = nodeSizeDist.getLocal()(perCoreRNG.getLocal()); + assert(nodeIdx < pando::getNodeDims().id); + preferredLocality = pando::Place(pando::NodeIndex(nodeIdx), pando::anyPod, pando::anyCore); } else { PANDO_ABORT("SCHEDULER POLICY NOT IMPLEMENTED"); } diff --git a/microbench/bfs/include/pando-bfs-galois/sssp.hpp b/microbench/bfs/include/pando-bfs-galois/sssp.hpp index aa0c9474..697794f8 100644 --- a/microbench/bfs/include/pando-bfs-galois/sssp.hpp +++ b/microbench/bfs/include/pando-bfs-galois/sssp.hpp @@ -134,7 +134,7 @@ pando::Status SSSP_DLCSR( state.dist = 0; #ifdef PANDO_STAT_TRACE_ENABLE - PANDO_CHECK(galois::doAllExplicitPolicy( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, phbfs, +[](pando::Vector) { PANDO_MEM_STAT_NEW_KERNEL("BFS Start"); })); @@ -168,7 +168,7 @@ pando::Status SSSP_DLCSR( } #ifdef PANDO_STAT_TRACE_ENABLE - PANDO_CHECK(galois::doAllExplicitPolicy( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, phbfs, +[](pando::Vector) { PANDO_MEM_STAT_NEW_KERNEL("BFS END"); })); @@ -311,7 +311,7 @@ pando::Status SSSPMDLCSR(G& graph, std::uint64_t src, HostLocalStorage( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, toRead, +[](MDWorkList) { PANDO_MEM_STAT_NEW_KERNEL("BFS Start"); })); @@ -328,7 +328,7 @@ pando::Status SSSPMDLCSR(G& graph, std::uint64_t src, HostLocalStorage( wgh, state, toRead, +[](decltype(state) state, MDWorkList toRead) { auto [graph, toWrite] = state; MDLCSRLocal(graph, toRead, toWrite.getLocalRef()); @@ -360,7 +360,7 @@ pando::Status SSSPMDLCSR(G& graph, std::uint64_t src, HostLocalStorage( + PANDO_CHECK(galois::doAllExplicitPolicy( wgh, toRead, +[](MDWorkList) { PANDO_MEM_STAT_NEW_KERNEL("BFS END"); })); diff --git a/pando-rt/src/init.cpp b/pando-rt/src/init.cpp index f151fe28..c255e5ac 100644 --- a/pando-rt/src/init.cpp +++ b/pando-rt/src/init.cpp @@ -34,6 +34,7 @@ counter::Record perCoreRNG; counter::Record> perCoreDist; +counter::Record> nodeSizeDist; counter::Record schedulerCount = counter::Record(); counter::Record doAllCount = counter::Record(); @@ -50,13 +51,16 @@ void initialize() { // initialization // this is called from the CP so no yield is necessary + auto nodeNum = pando::getNodeDims().id; auto coreDims = pando::getCoreDims(); for(std::int8_t i = 0; i < coreDims.x; i++) { perCoreRNG.get(false, i, coreDims.x) = std::minstd_rand(i); perCoreDist.get(false, i, coreDims.x) = std::uniform_int_distribution (0, coreDims.x - 1); + nodeSizeDist.get(false, i, coreDims.x) = std::uniform_int_distribution (0, nodeNum - 1); } perCoreRNG.get(true, 0, coreDims.x) = std::minstd_rand(-1); perCoreDist.get(true, 0, coreDims.x) = std::uniform_int_distribution(0, coreDims.x - 1); + nodeSizeDist.get(true, 0, coreDims.x) = std::uniform_int_distribution (0, nodeNum - 1); Nodes::barrier(); } @@ -67,13 +71,17 @@ void initialize() { if (auto status = CommandProcessor::initialize(); status != Status::Success) { PANDO_ABORT("CP was not initialized"); } + + auto nodeNum = pando::getNodeDims().id; auto coreDims = pando::getCoreDims(); for(std::int8_t i = 0; i < coreDims.x; i++) { perCoreRNG.get(false, i, coreDims.x) = std::minstd_rand(i); perCoreDist.get(false, i, coreDims.x) = std::uniform_int_distribution (0, coreDims.x - 1); + nodeSizeDist.get(false, i, coreDims.x) = std::uniform_int_distribution(0, nodeNum - 1); } perCoreRNG.get(true, 0, coreDims.x) = std::minstd_rand(-1); perCoreDist.get(true, 0, coreDims.x) = std::uniform_int_distribution(0, coreDims.x - 1); + nodeSizeDist.get(true, 0, coreDims.x) = std::uniform_int_distribution(0, nodeNum - 1); } else { Cores::initializeQueues(); } diff --git a/src/ingest_rmat_el.cpp b/src/ingest_rmat_el.cpp index 096e3949..33015e03 100644 --- a/src/ingest_rmat_el.cpp +++ b/src/ingest_rmat_el.cpp @@ -92,7 +92,7 @@ pando::Vector> galois::reduceLocalEdges( } } - galois::doAllExplicitPolicy( + galois::doAllExplicitPolicy( reducedEL, +[](pando::Vector src_ev) { std::sort(src_ev.begin(), src_ev.end()); }); diff --git a/test/containers/test_per_thread.cpp b/test/containers/test_per_thread.cpp index 072a2e50..a04053eb 100644 --- a/test/containers/test_per_thread.cpp +++ b/test/containers/test_per_thread.cpp @@ -88,7 +88,7 @@ TEST(PerThreadVector, Parallel) { static const uint64_t workItems = 1000; pando::Vector work; EXPECT_EQ(work.initialize(workItems), pando::Status::Success); - galois::doAll( + galois::doAllExplicitPolicy( perThreadVec, work, +[](galois::PerThreadVector& perThreadVec, uint64_t x) { uint64_t originalID = pando::getCurrentThread().id; EXPECT_GE(originalID, 0); @@ -209,9 +209,9 @@ TEST(PerThreadVector, HostLocalStorageVector) { galois::HostLocalStorage phu{}; - galois::doAll( + galois::doAllExplicitPolicy( ptv, phu, +[](galois::PerThreadVector ptv, std::uint64_t) { - galois::doAll( + galois::doAllExplicitPolicy( ptv, galois::IotaRange(0, size), +[](galois::PerThreadVector ptv, std::uint64_t i) { pando::Status err; @@ -249,9 +249,9 @@ TEST(PerThreadVector, HostLocalStorageVectorAppend) { galois::HostLocalStorage phu{}; - galois::doAll( + galois::doAllExplicitPolicy( ptv, phu, +[](galois::PerThreadVector ptv, std::uint64_t) { - galois::doAll( + galois::doAllExplicitPolicy( ptv, galois::IotaRange(0, size), +[](galois::PerThreadVector ptv, std::uint64_t i) { pando::Status err; diff --git a/test/containers/test_thread_local_storage.cpp b/test/containers/test_thread_local_storage.cpp index f4656854..4a86224a 100644 --- a/test/containers/test_thread_local_storage.cpp +++ b/test/containers/test_thread_local_storage.cpp @@ -83,7 +83,7 @@ TEST(ThreadLocalStorage, DoAll) { val = threadIdx; }; - err = galois::doAll(tls, g); + err = galois::doAllExplicitPolicy(tls, g); EXPECT_EQ(err, pando::Status::Success); diff --git a/test/containers/test_thread_local_vector.cpp b/test/containers/test_thread_local_vector.cpp index cef9ece5..6278036a 100644 --- a/test/containers/test_thread_local_vector.cpp +++ b/test/containers/test_thread_local_vector.cpp @@ -197,9 +197,9 @@ TEST(ThreadLocalVector, HostLocalStorageVector) { galois::HostLocalStorage phu{}; - galois::doAll( + galois::doAllExplicitPolicy( ptv, phu, +[](galois::ThreadLocalVector ptv, std::uint64_t) { - galois::doAll( + galois::doAllExplicitPolicy( ptv, galois::IotaRange(0, size), +[](galois::ThreadLocalVector ptv, std::uint64_t i) { pando::Status err; diff --git a/test/import/test_wmd_importer.cpp b/test/import/test_wmd_importer.cpp index 014872e3..cfdb0927 100644 --- a/test/import/test_wmd_importer.cpp +++ b/test/import/test_wmd_importer.cpp @@ -209,7 +209,7 @@ TEST_P(DLCSRInitEdgeList, initializeEL) { // Iterate over vertices std::uint64_t vid = 0; - PANDO_CHECK(galois::doAll( + PANDO_CHECK(galois::doAllExplicitPolicy( graph.vertices(), +[](typename Graph::VertexTopologyID vert) { EXPECT_EQ(static_cast(localityOf(vert).node.id), pando::getCurrentPlace().node.id);