Skip to content

Commit

Permalink
Add schedule timing counters
Browse files Browse the repository at this point in the history
  • Loading branch information
AdityaAtulTewari committed May 25, 2024
1 parent 678ecdf commit 5d966e9
Show file tree
Hide file tree
Showing 2 changed files with 23 additions and 6 deletions.
24 changes: 18 additions & 6 deletions include/pando-lib-galois/loops/do_all.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,11 @@
#include <pando-rt/memory/global_ptr.hpp>
#include <pando-rt/pando-rt.hpp>

constexpr bool SCHEDULER_TIMER_ENABLE = false;

extern counter::Record<std::minstd_rand> perCoreRNG;
extern counter::Record<std::uniform_int_distribution<std::int8_t>> perCoreDist;
extern counter::Record<std::int64_t> schedulerCount;

namespace galois {

Expand Down Expand Up @@ -47,8 +50,12 @@ enum SchedulerPolicy {
RANDOM,
UNSAFE_STRIPE,
CORE_STRIPE,
NODE_ONLY,
};

constexpr SchedulerPolicy CURRENT_SCHEDULER_POLICY = SchedulerPolicy::RANDOM;
constexpr SchedulerPolicy EVENLY_PARITION_SCHEDULER_POLICY = SchedulerPolicy::CORE_STRIPE;

template <enum SchedulerPolicy>
struct LoopLocalSchedulerStruct {};

Expand All @@ -65,25 +72,31 @@ struct LoopLocalSchedulerStruct<SchedulerPolicy::CORE_STRIPE> {
template <SchedulerPolicy Policy>
pando::Place schedulerImpl(pando::Place preferredLocality,
[[maybe_unused]] LoopLocalSchedulerStruct<Policy>& loopLocal) noexcept {
counter::HighResolutionCount<SCHEDULER_TIMER_ENABLE> schedulerTimer;
schedulerTimer.start();
if constexpr (Policy == RANDOM) {
auto coreIdx = perCoreDist.getLocal()(perCoreRNG.getLocal());
assert(coreIdx < pando::getCoreDims().x);
return pando::Place(preferredLocality.node, pando::anyPod, pando::CoreIndex(coreIdx, 0));
preferredLocality =
pando::Place(preferredLocality.node, pando::anyPod, pando::CoreIndex(coreIdx, 0));
} else if constexpr (Policy == UNSAFE_STRIPE) {
auto threadIdx = ++loopLocal.lastThreadIdx;
threadIdx %= getNumThreads();
return std::get<0>(getPlaceFromThreadIdx(threadIdx));
preferredLocality = std::get<0>(getPlaceFromThreadIdx(threadIdx));
} else if constexpr (Policy == CORE_STRIPE) {
auto coreIdx = ++loopLocal.lastCoreIdx;
coreIdx %= pando::getCoreDims().x;
return pando::Place(preferredLocality.node, pando::anyPod, pando::CoreIndex(coreIdx, 0));
preferredLocality =
pando::Place(preferredLocality.node, pando::anyPod, pando::CoreIndex(coreIdx, 0));
} else if constexpr (Policy == NODE_ONLY) {
preferredLocality = pando::Place(preferredLocality.node, pando::anyPod, pando::anyCore);
} else {
PANDO_ABORT("SCHEDULER POLICY NOT IMPLEMENTED");
}
counter::recordHighResolutionEvent(schedulerCount, schedulerTimer);
return preferredLocality;
}

constexpr SchedulerPolicy CURRENT_SCHEDULER_POLICY = SchedulerPolicy::RANDOM;

inline pando::Place scheduler(pando::Place preferredLocality,
LoopLocalSchedulerStruct<CURRENT_SCHEDULER_POLICY>& loopLocal) {
return schedulerImpl<CURRENT_SCHEDULER_POLICY>(preferredLocality, loopLocal);
Expand Down Expand Up @@ -357,7 +370,6 @@ class DoAll {
template <typename State, typename F>
static pando::Status doAllEvenlyPartition(WaitGroup::HandleType wgh, State s, uint64_t workItems,
const F& func) {
constexpr SchedulerPolicy EVENLY_PARITION_SCHEDULER_POLICY = SchedulerPolicy::CORE_STRIPE;
LoopLocalSchedulerStruct<EVENLY_PARITION_SCHEDULER_POLICY> loopLocal;
pando::Status err = pando::Status::Success;
if (workItems == 0) {
Expand Down
5 changes: 5 additions & 0 deletions pando-rt/src/init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

counter::Record<std::minstd_rand> perCoreRNG;
counter::Record<std::uniform_int_distribution<std::int8_t>> perCoreDist;
counter::Record<std::int64_t> schedulerCount = counter::Record<std::int64_t>();

namespace pando {

Expand Down Expand Up @@ -211,6 +212,10 @@ int main(int argc, char* argv[]) {
thisPlace.node.id,
std::int8_t((i == std::uint64_t(dims.core.x + 1)) ? -1 : i),
pointerCount.get(i));
SPDLOG_WARN("Scheduler time on node: {}, core: {} was {}",
thisPlace.node.id,
std::int8_t((i == std::uint64_t(dims.core.x + 1)) ? -1 : i),
schedulerCount.get(i));
}


Expand Down

0 comments on commit 5d966e9

Please sign in to comment.