From 38d44a3765afc5a67aedf0839ea7b7368cd3a818 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 20 Nov 2023 14:20:26 +0000 Subject: [PATCH 01/20] Port Schelling to pyflamegpu Added RTC init time to outputs. Requires a means to disable RTC cache, JitifyCache is not currently exposed via SWIG. --- pyflamegpu/LICENSE.MD | 9 + pyflamegpu/src/schelling.py | 346 ++++++++++++++++++++++++++++++++++++ 2 files changed, 355 insertions(+) create mode 100644 pyflamegpu/LICENSE.MD create mode 100644 pyflamegpu/src/schelling.py diff --git a/pyflamegpu/LICENSE.MD b/pyflamegpu/LICENSE.MD new file mode 100644 index 00000000..662b75ef --- /dev/null +++ b/pyflamegpu/LICENSE.MD @@ -0,0 +1,9 @@ +MIT License for FLAME GPU 2 + +Copyright (c) 2019 University of Sheffield + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/pyflamegpu/src/schelling.py b/pyflamegpu/src/schelling.py new file mode 100644 index 00000000..0983e1f1 --- /dev/null +++ b/pyflamegpu/src/schelling.py @@ -0,0 +1,346 @@ +import pyflamegpu +import typing +import sys, random, time + +# Configurable properties (note these are not dynamically updated in current agent functions) +GRID_WIDTH: typing.Final = 500 +POPULATED_COUNT: typing.Final = 200000 + +THRESHOLD: typing.Final = 0.375 # 0.375 == 3/8s to match integer models + +A: typing.Final = 0 +B: typing.Final = 1 +UNOCCUPIED: typing.Final = 2 + +# Agents output their type +output_type = """ +FLAMEGPU_AGENT_FUNCTION(output_type, flamegpu::MessageNone, flamegpu::MessageArray2D) { + FLAMEGPU->message_out.setVariable("type", FLAMEGPU->getVariable("type")); + FLAMEGPU->message_out.setIndex(FLAMEGPU->getVariable("pos", 0), FLAMEGPU->getVariable("pos", 1)); + return flamegpu::ALIVE; +} +""" + +# Agents decide whether they are happy or not and whether or not their space is available +determine_status = """ +#define THRESHOLD 0.375 +#define UNOCCUPIED 2 +FLAMEGPU_AGENT_FUNCTION(determine_status, flamegpu::MessageArray2D, flamegpu::MessageNone) { + const unsigned int my_x = FLAMEGPU->getVariable("pos", 0); + const unsigned int my_y = FLAMEGPU->getVariable("pos", 1); + + unsigned int same_type_neighbours = 0; + unsigned int diff_type_neighbours = 0; + + // Iterate 3x3 Moore neighbourhood (this does not include the central cell) + unsigned int my_type = FLAMEGPU->getVariable("type"); + for (auto message : FLAMEGPU->message_in.wrap(my_x, my_y)) { + int message_type = message.getVariable("type"); + same_type_neighbours += my_type == message_type; + diff_type_neighbours += (my_type != message_type) && (message_type != UNOCCUPIED); + } + + int isHappy = (static_cast(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD; + FLAMEGPU->setVariable("happy", isHappy); + unsigned int my_next_type = ((my_type != UNOCCUPIED) && isHappy) ? my_type : UNOCCUPIED; + FLAMEGPU->setVariable("next_type", my_next_type); + FLAMEGPU->setVariable("movement_resolved", (my_type == UNOCCUPIED) || isHappy); + unsigned int my_availability = (my_type == UNOCCUPIED) || (isHappy == 0); + FLAMEGPU->setVariable("available", my_availability); + + return flamegpu::ALIVE; +} +""" +is_available = """ +FLAMEGPU_AGENT_FUNCTION_CONDITION(is_available) { + return FLAMEGPU->getVariable("available"); +} +""" +output_available_locations = """ +FLAMEGPU_AGENT_FUNCTION(output_available_locations, flamegpu::MessageNone, flamegpu::MessageArray) { + FLAMEGPU->message_out.setIndex(FLAMEGPU->getIndex()); + FLAMEGPU->message_out.setVariable("id", FLAMEGPU->getID()); + return flamegpu::ALIVE; +} +""" + +class count_available_spaces(pyflamegpu.HostFunction): + def run(self,FLAMEGPU): + FLAMEGPU.environment.setPropertyUInt("spaces_available", FLAMEGPU.agent("agent").countUInt("available", 1)) + +is_moving = """ +FLAMEGPU_AGENT_FUNCTION_CONDITION(is_moving) { + bool movementResolved = FLAMEGPU->getVariable("movement_resolved"); + return !movementResolved; +} +""" +bid_for_location = """ +FLAMEGPU_AGENT_FUNCTION(bid_for_location, flamegpu::MessageArray, flamegpu::MessageBucket) { + // Select a location + unsigned int selected_index = FLAMEGPU->random.uniform(0, FLAMEGPU->environment.getProperty("spaces_available") - 1); + + // Get the location at that index + const auto message = FLAMEGPU->message_in.at(selected_index); + const flamegpu::id_t selected_location = message.getVariable("id"); + + // Bid for that location + FLAMEGPU->message_out.setKey(selected_location - 1); + FLAMEGPU->message_out.setVariable("id", FLAMEGPU->getID()); + FLAMEGPU->message_out.setVariable("type", FLAMEGPU->getVariable("type")); + return flamegpu::ALIVE; +} +""" +# @todo - device exception triggered when running +select_winners = """ +FLAMEGPU_AGENT_FUNCTION(select_winners, flamegpu::MessageBucket, flamegpu::MessageArray) { + // First agent in the bucket wins + for (const auto message : FLAMEGPU->message_in(FLAMEGPU->getID() - 1)) { + flamegpu::id_t winning_id = message.getVariable("id"); + FLAMEGPU->setVariable("next_type", message.getVariable("type")); + FLAMEGPU->setVariable("available", 0); + FLAMEGPU->message_out.setIndex(winning_id - 1); + FLAMEGPU->message_out.setVariable("won", 1); + break; + } + return flamegpu::ALIVE; +} +""" + +has_moved = """ +FLAMEGPU_AGENT_FUNCTION(has_moved, flamegpu::MessageArray, flamegpu::MessageNone) { + const auto message = FLAMEGPU->message_in.at(FLAMEGPU->getID() - 1); + if (message.getVariable("won")) { + FLAMEGPU->setVariable("movement_resolved", 1); + } + return flamegpu::ALIVE; +} +""" + +class movement_resolved(pyflamegpu.HostCondition): + def run(self, FLAMEGPU): + return pyflamegpu.EXIT if (FLAMEGPU.agent("agent").countUInt("movement_resolved", 0) == 0) else pyflamegpu.CONTINUE + +update_locations = """ +FLAMEGPU_AGENT_FUNCTION(update_locations, flamegpu::MessageNone, flamegpu::MessageNone) { + FLAMEGPU->setVariable("type", FLAMEGPU->getVariable("next_type")); + return flamegpu::ALIVE; +} +""" + + +# flamegpu::util::nvtx::Range("main"); +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() + +# Define the model +model = pyflamegpu.ModelDescription("Schelling_segregation") + +# Define the message list(s) +message = model.newMessageArray2D("type_message") +message.newVariableUInt("type") +message.setDimensions(GRID_WIDTH, GRID_WIDTH) + +# Define the agent types +# Agents representing the cells. +agent = model.newAgent("agent") +agent.newVariableArrayUInt("pos", 2) +agent.newVariableUInt("type") +agent.newVariableUInt("next_type") +agent.newVariableUInt("happy") +agent.newVariableUInt("available") +agent.newVariableUInt("movement_resolved") +if pyflamegpu.VISUALISATION: + # Redundant separate floating point position vars for vis + agent.newVariableFloat("x") + agent.newVariableFloat("y") + +# Functions +outputTypeFunction = agent.newRTCFunction("output_type", output_type) +outputTypeFunction.setMessageOutput("type_message") +determineStatusFunction = agent.newRTCFunction("determine_status", determine_status) +determineStatusFunction.setMessageInput("type_message") +updateLocationsFunction = agent.newRTCFunction("update_locations", update_locations) + + +# Define a submodel for conflict resolution for agent movement +# This is necessary for parallel random movement of agents, to resolve conflict between agents moveing to the same location +submodel = pyflamegpu.ModelDescription("plan_movement") +# Submodels require an exit condition function, so they do not run forever +submodel.addExitCondition(movement_resolved()) + +# Define the submodel environment +s_env = submodel.Environment() +s_env.newPropertyUInt("spaces_available", 0) + +# Define message lists used within the submodel +s_message = submodel.newMessageArray("available_location_message") +s_message.newVariableID("id") +s_message.setLength(GRID_WIDTH*GRID_WIDTH) + +s_message = submodel.newMessageBucket("intent_to_move_message") +s_message.newVariableID("id") +s_message.newVariableUInt("type") +s_message.setBounds(0, GRID_WIDTH * GRID_WIDTH) + +s_message = submodel.newMessageArray("movement_won_message") +s_message.newVariableUInt("won"); +s_message.setLength(GRID_WIDTH*GRID_WIDTH); + +# Define agents within the submodel +s_agent = submodel.newAgent("agent") +s_agent.newVariableArrayUInt("pos", 2) +s_agent.newVariableUInt("type") +s_agent.newVariableUInt("next_type") +s_agent.newVariableUInt("happy") +s_agent.newVariableUInt("available") +s_agent.newVariableUInt("movement_resolved") + +# Functions +outputLocationsFunction = s_agent.newRTCFunction("output_available_locations", output_available_locations) +outputLocationsFunction.setMessageOutput("available_location_message") +outputLocationsFunction.setRTCFunctionCondition(is_available) + +bidFunction = s_agent.newRTCFunction("bid_for_location", bid_for_location) +bidFunction.setRTCFunctionCondition(is_moving) +bidFunction.setMessageInput("available_location_message") +bidFunction.setMessageOutput("intent_to_move_message") + +selectWinnersFunction = s_agent.newRTCFunction("select_winners", select_winners) +selectWinnersFunction.setMessageInput("intent_to_move_message") +selectWinnersFunction.setMessageOutput("movement_won_message") +selectWinnersFunction.setMessageOutputOptional(True) + +hasMovedFunction = s_agent.newRTCFunction("has_moved", has_moved) +hasMovedFunction.setMessageInput("movement_won_message") + +# Specify control flow for the submodel (@todo - dependencies) +# Available agents output their location (indexed by thread ID) +s_layer1 = submodel.newLayer() +s_layer1.addAgentFunction(outputLocationsFunction) + +# Count the number of available spaces +s_layer2 = submodel.newLayer() +s_layer2.addHostFunction(count_available_spaces()) + +# Unhappy agents bid for a new location +s_layer3 = submodel.newLayer() +s_layer3.addAgentFunction(bidFunction) + +# Available locations check if anyone wants to move to them. If so, approve one and mark as unavailable +# Update next type to the type of the mover +# Output a message to inform the mover that they have been successful +s_layer4 = submodel.newLayer() +s_layer4.addAgentFunction(selectWinnersFunction); + +# Movers mark themselves as resolved +s_layer5 = submodel.newLayer() +s_layer5.addAgentFunction(hasMovedFunction); + +# Attach the submodel to the model, +plan_movement = model.newSubModel("plan_movement", submodel) +# Bind the agents within the submodel to the same agents outside of the submodel +plan_movement.bindAgent("agent", "agent", True, True) + +# Define the control flow of the outer/parent model (@todo - use dependencies) +layer1 = model.newLayer() +layer1.addAgentFunction(outputTypeFunction) + +layer2 = model.newLayer() +layer2.addAgentFunction(determineStatusFunction) + +layer3 = model.newLayer() +layer3.addSubModel(plan_movement) + +layer4 = model.newLayer() +layer4.addAgentFunction(updateLocationsFunction) + +# Trying calling this again to fix vis +if pyflamegpu.VISUALISATION: + layer5 = model.newLayer(); + layer5.addAgentFunction(determineStatusFunction) + + +# Create the simulator for the model +cudaSimulation = pyflamegpu.CUDASimulation(model) + +""" + * Create visualisation + * @note FLAMEGPU2 doesn't currently have proper support for discrete/2d visualisations +""" +if pyflamegpu.VISUALISATION: + visualisation = cudaSimulation.getVisualisation() + visualisation.setSimulationSpeed(10) # slow down the simulation for visualisation purposes + visualisation.setInitialCameraLocation(GRID_WIDTH / 2.0, GRID_WIDTH / 2.0, 225.0) + visualisation.setInitialCameraTarget(GRID_WIDTH / 2.0, GRID_WIDTH /2.0, 0.0) + visualisation.setCameraSpeed(0.001 * GRID_WIDTH) + visualisation.setViewClips(0.1, 5000) + agt = visualisation.addAgent("agent") + # Position vars are named x, y, z; so they are used by default + agt.setModel(pyflamegpu.CUBE) # 5 unwanted faces! + agt.setModelScale(1.0) + + cell_colors = pyflamegpu.uDiscreteColor("type", pyflamegpu.Color("#666")) + cell_colors[A] = pyflamegpu.RED + cell_colors[B] = pyflamegpu.BLUE + agt.setColor(cell_colors) + visualisation.activate() + + +# Initialise the simulation +cudaSimulation.initialise(sys.argv) +prePopulationTimer_stop = time.monotonic() +print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + +populationGenerationTimer_start = time.monotonic() +# Generate a population if not provided from disk +if not cudaSimulation.SimulationConfig().input_file: + # Use a seeded generator + rng = random.Random(cudaSimulation.SimulationConfig().random_seed) + # Calculate the total number of cells + CELL_COUNT = GRID_WIDTH * GRID_WIDTH + # Generate a population of agents, which are just default initialised for now + population = pyflamegpu.AgentVector(model.Agent("agent"), CELL_COUNT) + + # Select POPULATED_COUNT unique random integers in the range [0, CELL_COUNT), by shuffling the iota. + shuffledIota = [i for i in range(CELL_COUNT)] + rng.shuffle(shuffledIota) + + # Then iterate the shuffled iota, the first POPULATED_COUNT agents will randomly select a type/group. The remaining agents will not. + # The agent index provides the X and Y coordinate for the position. + for elementIdx in range(len(shuffledIota)): + idx = shuffledIota[elementIdx] + instance = population[idx] + # the position can be computed from the index, given the width. + x = int(idx % GRID_WIDTH) + y = int(idx / GRID_WIDTH) + instance.setVariableArrayUInt("pos", [ x, y ]) + + # If the elementIDX is below the populated count, generated a populated type, otherwise it is unoccupied + if elementIdx < POPULATED_COUNT: + type_ = A if rng.random() < 0.5 else B + instance.setVariableUInt("type", type_) + else: + instance.setVariableUInt("type", UNOCCUPIED) + instance.setVariableUInt("happy", 0) + if pyflamegpu.VISUALISATION: + # Redundant separate floating point position vars for vis + instance.setVariableFloat("x", x) + instance.setVariableFloat("y", y) + + cudaSimulation.setPopulationData(population) + +populationGenerationTimer_stop = time.monotonic() +print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + +# Run the simulation +cudaSimulation.simulate() + +# Print the execution time to stdout +print("simulate (s): %.6f"%cudaSimulation.getElapsedTimeSimulation()) +print("rtc (s): %.6f"%cudaSimulation.getElapsedTimeRTCInitialisation()) + +if pyflamegpu.VISUALISATION: + visualisation.join() + +mainTimer_stop = time.monotonic() +print("main (s): %.6f"%(mainTimer_stop - mainTimer_start)) From f526dce26abea298faf85855e2f2fef5866d3b1a Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 20 Nov 2023 14:55:57 +0000 Subject: [PATCH 02/20] Port of Boids2D model. --- pyflamegpu/src/boids2D.py | 414 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 414 insertions(+) create mode 100644 pyflamegpu/src/boids2D.py diff --git a/pyflamegpu/src/boids2D.py b/pyflamegpu/src/boids2D.py new file mode 100644 index 00000000..a9a9d5f4 --- /dev/null +++ b/pyflamegpu/src/boids2D.py @@ -0,0 +1,414 @@ +import pyflamegpu +import typing +import sys, random, time +import numpy as np + +""" + * pyFLAME GPU 2 implementation of the Boids flocking model in 2D, using spatial2D messaging. + * This is based on the FLAME GPU 1 implementation, but with dynamic generation of agents. + * The environment is wrapped, effectively the surface of a torus. +""" + +# inputdata agent function for Boid agents, which reads data from neighbouring Boid agents, to perform the boid +inputdata = """ +/** + * Get the length of a vector + * @param x x component of the vector + * @param y y component of the vector + * @return the length of the vector + */ +FLAMEGPU_HOST_DEVICE_FUNCTION float vec3Length(const float x, const float y) { + return sqrtf(x * x + y * y); +} + +/** + * Add a scalar to a vector in-place + * @param x x component of the vector + * @param y y component of the vector + * @param value scalar value to add + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Add(float &x, float &y, const float value) { + x += value; + y += value; +} + +/** + * Subtract a scalar from a vector in-place + * @param x x component of the vector + * @param y y component of the vector + * @param value scalar value to subtract + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Sub(float &x, float &y, const float value) { + x -= value; + y -= value; +} + +/** + * Multiply a vector by a scalar value in-place + * @param x x component of the vector + * @param y y component of the vector + * @param multiplier scalar value to multiply by + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Mult(float &x, float &y, const float multiplier) { + x *= multiplier; + y *= multiplier; +} + +/** + * Divide a vector by a scalar value in-place + * @param x x component of the vector + * @param y y component of the vector + * @param divisor scalar value to divide by + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Div(float &x, float &y, const float divisor) { + x /= divisor; + y /= divisor; +} + +/** + * Normalize a 3 component vector in-place + * @param x x component of the vector + * @param y y component of the vector + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Normalize(float &x, float &y) { + // Get the length + float length = vec3Length(x, y); + vec3Div(x, y, length); +} + +/** + * Ensure that the x and y position are withini the defined boundary area, wrapping to the far side if out of bounds. + * Performs the operation in place + * @param x x component of the vector + * @param y y component of the vector + * @param MIN_POSITION the minimum value for each component + * @param MAX_POSITION the maximum value for each component + */ +FLAMEGPU_HOST_DEVICE_FUNCTION void wrapPosition(float &x, float &y, const float MIN_POSITION, const float MAX_POSITION) { + const float WIDTH = MAX_POSITION - MIN_POSITION; + if (x < MIN_POSITION) { + x += WIDTH; + } + if (y < MIN_POSITION) { + y += WIDTH; + } + + if (x > MAX_POSITION) { + x -= WIDTH; + } + if (y > MAX_POSITION) { + y -= WIDTH; + } +} + +/** + * inputdata agent function for Boid agents, which reads data from neighbouring Boid agents, to perform the boid flocking model. + */ +FLAMEGPU_AGENT_FUNCTION(inputdata, flamegpu::MessageSpatial2D, flamegpu::MessageNone) { + // Agent properties in local register + const flamegpu::id_t id = FLAMEGPU->getID(); + // Agent position + float agent_x = FLAMEGPU->getVariable("x"); + float agent_y = FLAMEGPU->getVariable("y"); + // Agent velocity + float agent_fx = FLAMEGPU->getVariable("fx"); + float agent_fy = FLAMEGPU->getVariable("fy"); + + // Boids percieved center + float perceived_centre_x = 0.0f; + float perceived_centre_y = 0.0f; + int perceived_count = 0; + + // Boids global velocity matching + float global_velocity_x = 0.0f; + float global_velocity_y = 0.0f; + + // Total change in velocity + float velocity_change_x = 0.f; + float velocity_change_y = 0.f; + + const float INTERACTION_RADIUS = FLAMEGPU->environment.getProperty("INTERACTION_RADIUS"); + const float SEPARATION_RADIUS = FLAMEGPU->environment.getProperty("SEPARATION_RADIUS"); + // Iterate location messages, accumulating relevant data and counts. + for (const auto message : FLAMEGPU->message_in.wrap(agent_x, agent_y)) { + // Ignore self messages. + if (message.getVariable("id") != id) { + // Get the message location and velocity. + const float message_x = message.getVirtualX(); + const float message_y = message.getVirtualY(); + + // Check interaction radius + float separation = vec3Length(agent_x - message_x, agent_y - message_y); + + if (separation < INTERACTION_RADIUS) { + // Update the percieved centre + perceived_centre_x += message_x; + perceived_centre_y += message_y; + perceived_count++; + + // Update percieved velocity matching + const float message_fx = message.getVariable("fx"); + const float message_fy = message.getVariable("fy"); + global_velocity_x += message_fx; + global_velocity_y += message_fy; + + // Update collision centre + if (separation < (SEPARATION_RADIUS)) { // dependant on model size + // Rule 3) Avoid other nearby boids (Separation) + float normalizedSeparation = (separation / SEPARATION_RADIUS); + float invNormSep = (1.0f - normalizedSeparation); + float invSqSep = invNormSep * invNormSep; + + const float collisionScale = FLAMEGPU->environment.getProperty("COLLISION_SCALE"); + velocity_change_x += collisionScale * (agent_x - message_x) * invSqSep; + velocity_change_y += collisionScale * (agent_y - message_y) * invSqSep; + } + } + } + } + + if (perceived_count) { + // Divide positions/velocities by relevant counts. + vec3Div(perceived_centre_x, perceived_centre_y, perceived_count); + vec3Div(global_velocity_x, global_velocity_y, perceived_count); + + // Rule 1) Steer towards perceived centre of flock (Cohesion) + float steer_velocity_x = 0.f; + float steer_velocity_y = 0.f; + + const float STEER_SCALE = FLAMEGPU->environment.getProperty("STEER_SCALE"); + steer_velocity_x = (perceived_centre_x - agent_x) * STEER_SCALE; + steer_velocity_y = (perceived_centre_y - agent_y) * STEER_SCALE; + + velocity_change_x += steer_velocity_x; + velocity_change_y += steer_velocity_y; + + // Rule 2) Match neighbours speeds (Alignment) + float match_velocity_x = 0.f; + float match_velocity_y = 0.f; + + const float MATCH_SCALE = FLAMEGPU->environment.getProperty("MATCH_SCALE"); + match_velocity_x = global_velocity_x * MATCH_SCALE; + match_velocity_y = global_velocity_y * MATCH_SCALE; + + velocity_change_x += match_velocity_x - agent_fx; + velocity_change_y += match_velocity_y - agent_fy; + } + + // Global scale of velocity change + vec3Mult(velocity_change_x, velocity_change_y, FLAMEGPU->environment.getProperty("GLOBAL_SCALE")); + + // Update agent velocity + agent_fx += velocity_change_x; + agent_fy += velocity_change_y; + + // Bound velocity + float agent_fscale = vec3Length(agent_fx, agent_fy); + if (agent_fscale > 1) { + vec3Div(agent_fx, agent_fy, agent_fscale); + } + + float minSpeed = 0.5f; + if (agent_fscale < minSpeed) { + // Normalise + vec3Div(agent_fx, agent_fy, agent_fscale); + + // Scale to min + vec3Mult(agent_fx, agent_fy, minSpeed); + } + + // Apply the velocity + const float TIME_SCALE = FLAMEGPU->environment.getProperty("TIME_SCALE"); + agent_x += agent_fx * TIME_SCALE; + agent_y += agent_fy * TIME_SCALE; + + // Wramp position + const float MIN_POSITION = FLAMEGPU->environment.getProperty("MIN_POSITION"); + const float MAX_POSITION = FLAMEGPU->environment.getProperty("MAX_POSITION"); + wrapPosition(agent_x, agent_y, MIN_POSITION, MAX_POSITION); + + // Update global agent memory. + FLAMEGPU->setVariable("x", agent_x); + FLAMEGPU->setVariable("y", agent_y); + + FLAMEGPU->setVariable("fx", agent_fx); + FLAMEGPU->setVariable("fy", agent_fy); + + return flamegpu::ALIVE; +} +""" +# outputdata agent function for Boid agents, which outputs publicly visible properties to a message list +outputdata = """ +FLAMEGPU_AGENT_FUNCTION(outputdata, flamegpu::MessageNone, flamegpu::MessageSpatial2D) { + // Output each agents publicly visible properties. + FLAMEGPU->message_out.setVariable("id", FLAMEGPU->getID()); + FLAMEGPU->message_out.setVariable("x", FLAMEGPU->getVariable("x")); + FLAMEGPU->message_out.setVariable("y", FLAMEGPU->getVariable("y")); + FLAMEGPU->message_out.setVariable("fx", FLAMEGPU->getVariable("fx")); + FLAMEGPU->message_out.setVariable("fy", FLAMEGPU->getVariable("fy")); + return flamegpu::ALIVE; +} +""" + +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() +model = pyflamegpu.ModelDescription("Boids Spatial2D"); + +# Environment variables with default values +env = model.Environment(); + +# Population size to generate, if no agents are loaded from disk +env.newPropertyUInt("POPULATION_TO_GENERATE", 80000) + +# Environment Bounds +env.newPropertyFloat("MIN_POSITION", 0.0) +env.newPropertyFloat("MAX_POSITION", 400.0) + +# Initialisation parameter(s) +env.newPropertyFloat("INITIAL_SPEED", 1.0) # always start with a speed of 1.0 + +# Interaction radius +env.newPropertyFloat("INTERACTION_RADIUS", 5.0) +env.newPropertyFloat("SEPARATION_RADIUS", 1.0) + +# Global Scalers +env.newPropertyFloat("TIME_SCALE", 1.0) # 1.0 for benchmarking to behave the same as the other simulators. +env.newPropertyFloat("GLOBAL_SCALE", 1.0) # 1.0 for comparing to other benchamrks + +# Rule scalers +env.newPropertyFloat("STEER_SCALE", 0.03) # cohere scale? 0.03 +env.newPropertyFloat("COLLISION_SCALE", 0.015) # separate_scale? 0.015 +env.newPropertyFloat("MATCH_SCALE", 0.05) # match 0.05 + + +# Define the Location 2D spatial message list +message = model.newMessageSpatial2D("location") +# Set the range and bounds. +message.setRadius(env.getPropertyFloat("INTERACTION_RADIUS")) +message.setMin(env.getPropertyFloat("MIN_POSITION"), env.getPropertyFloat("MIN_POSITION")) +message.setMax(env.getPropertyFloat("MAX_POSITION"), env.getPropertyFloat("MAX_POSITION")) + +# A message to hold the location of an agent. +message.newVariableID("id") +# Spatial 2D messages implicitly have float members x and y, so they do not need to be defined +message.newVariableFloat("fx") +message.newVariableFloat("fy") +message.newVariableFloat("fz") + +# Boid agent +agent = model.newAgent("Boid") +agent.newVariableFloat("x") +agent.newVariableFloat("y") +agent.newVariableFloat("fx") +agent.newVariableFloat("fy") +# Define the agents methods +outputdataDescription = agent.newRTCFunction("outputdata", outputdata) +outputdataDescription.setMessageOutput("location") +inputdataDescription = agent.newRTCFunction("inputdata", inputdata) +inputdataDescription.setMessageInput("location") + +# Specify agent method dependencies, i.e. the execution order within a layer. +model.addExecutionRoot(outputdataDescription) +inputdataDescription.dependsOn(outputdataDescription) +# Build the execution graph +model.generateLayers() + +# Create Model Runner +simulator = pyflamegpu.CUDASimulation(model) + +# If enabled, define the visualisation for the model +if pyflamegpu.VISUALISATION: + visualisation = simulator.getVisualisation(); + + visualisation.setSimulationSpeed(10) # slow down the simulation for visualisation purposes + env = model.Environment() + ENV_WIDTH = env.getPropertyFloat("MAX_POSITION") - env.getPropertyFloat("MIN_POSITION") + ENV_CENTER = env.getPropertyFloat("MIN_POSITION") + (ENV_WIDTH) / 2.0 + INIT_CAM = env.getPropertyFloat("MAX_POSITION") * 1.25 + visualisation.setInitialCameraLocation(ENV_CENTER, ENV_CENTER, INIT_CAM) + visualisation.setInitialCameraTarget(ENV_CENTER, ENV_CENTER, 0.0) + visualisation.setCameraSpeed(0.001 * ENV_WIDTH) + visualisation.setViewClips(0.00001, ENV_WIDTH) + agentVisualiser = visualisation.addAgent("Boid") + # Position vars are named x, y so they are used by default + agentVisualiser.setForwardXVariable("fx") + agentVisualiser.setForwardYVariable("fy") + agentVisualiser.setModel(pyflamegpu.STUNTPLANE) + agentVisualiser.setModelScale(env.getPropertyFloat("SEPARATION_RADIUS")/3.0) + # Add a settings UI + ui = visualisation.newUIPanel("Environment") + ui.newStaticLabel("Interaction") + ui.newEnvironmentPropertyDragFloat("INTERACTION_RADIUS", 0.0, env.getPropertyFloat("INTERACTION_RADIUS"), 0.01) # Can't go bigger than the comms radius, which is fixed at compile time. + ui.newEnvironmentPropertyDragFloat("SEPARATION_RADIUS", 0.0, env.getPropertyFloat("INTERACTION_RADIUS"), 0.01) # Can't go bigger than the initial interaction radius which is fixed at compile time. + ui.newStaticLabel("Environment Scalars") + ui.newEnvironmentPropertyDragFloat("TIME_SCALE", 0.0, 1.0, 0.0001) + ui.newEnvironmentPropertyDragFloat("GLOBAL_SCALE", 0.0, 0.5, 0.001) + ui.newStaticLabel("Force Scalars") + ui.newEnvironmentPropertyDragFloat("STEER_SCALE", 0.0, 10.0, 0.001) + ui.newEnvironmentPropertyDragFloat("COLLISION_SCALE", 0.0, 10.0, 0.001) + ui.newEnvironmentPropertyDragFloat("MATCH_SCALE", 0.0, 10.0, 0.001) + + visualisation.activate(); + +# Initialisation +simulator.initialise(sys.argv) +prePopulationTimer_stop = time.monotonic() +print("pre population (s): %.6f\n"%(prePopulationTimer_stop - prePopulationTimer_start)) + +populationGenerationTimer_start = time.monotonic() + +# If no agent states were provided, generate a population of randomly distributed agents within the environment space +if not simulator.SimulationConfig().input_file: + env = model.Environment() + # Uniformly distribute agents within space, with uniformly distributed initial velocity. + # c++ random number generator engine + rng = random.Random(simulator.SimulationConfig().random_seed) + + # Generate a random location within the environment bounds + min_pos = env.getPropertyFloat("MIN_POSITION") + max_pos = env.getPropertyFloat("MAX_POSITION") + + # Generate a random speed between 0 and the maximum initial speed + fmagnitude = env.getPropertyFloat("INITIAL_SPEED") + + # Generate a population of agents, based on the relevant environment property + populationSize = env.getPropertyUInt("POPULATION_TO_GENERATE") + population = pyflamegpu.AgentVector(model.Agent("Boid"), populationSize) + for i in range(populationSize): + instance = population[i] + + # Agent position in space + instance.setVariableFloat("x", rng.uniform(min_pos, max_pos)); + instance.setVariableFloat("y", rng.uniform(min_pos, max_pos)); + + # Generate a random velocity direction + fx = rng.uniform(-1, 1) + fy = rng.uniform(-1, 1) + # Use the random speed for the velocity. + fx /= np.linalg.norm([fx, fy]) + fy /= np.linalg.norm([fx, fy]) + fx *= fmagnitude + fy *= fmagnitude + + # Set these for the agent. + instance.setVariableFloat("fx", fx) + instance.setVariableFloat("fy", fy) + + simulator.setPopulationData(population); + +populationGenerationTimer_stop = time.monotonic() +print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + +# Execute the simulation +simulator.simulate() + +# Print the exeuction time to stdout +print("simulate (s): %.6f"%simulator.getElapsedTimeSimulation()) +print("rtc (s): %.6f"%simulator.getElapsedTimeRTCInitialisation()) + +# Join the visualsition if required +if pyflamegpu.VISUALISATION: + visualisation.join() + +mainTimer_stop = time.monotonic() +print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) From 500f3108be1b06e238ed2fd13f9266c94b534efb Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Fri, 24 Nov 2023 09:22:16 +0000 Subject: [PATCH 03/20] Add arg to disable RTC cache These methods have now been exposed to Python in FLAMEGPU master branch --- pyflamegpu/src/boids2D.py | 5 +++++ pyflamegpu/src/schelling.py | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/pyflamegpu/src/boids2D.py b/pyflamegpu/src/boids2D.py index a9a9d5f4..2bdc9285 100644 --- a/pyflamegpu/src/boids2D.py +++ b/pyflamegpu/src/boids2D.py @@ -3,6 +3,11 @@ import sys, random, time import numpy as np +if "--disable-rtc-cache" in sys.argv: + rtc_cache = pyflamegpu.JitifyCache.getInstance() + rtc_cache.useMemoryCache(False) + rtc_cache.useDiskCache(False) + """ * pyFLAME GPU 2 implementation of the Boids flocking model in 2D, using spatial2D messaging. * This is based on the FLAME GPU 1 implementation, but with dynamic generation of agents. diff --git a/pyflamegpu/src/schelling.py b/pyflamegpu/src/schelling.py index 0983e1f1..ac51a66f 100644 --- a/pyflamegpu/src/schelling.py +++ b/pyflamegpu/src/schelling.py @@ -2,6 +2,11 @@ import typing import sys, random, time +if "--disable-rtc-cache" in sys.argv: + rtc_cache = pyflamegpu.JitifyCache.getInstance() + rtc_cache.useMemoryCache(False) + rtc_cache.useDiskCache(False) + # Configurable properties (note these are not dynamically updated in current agent functions) GRID_WIDTH: typing.Final = 500 POPULATED_COUNT: typing.Final = 200000 From adf7bb2290979b4436d25970fb883e5a1ab67eeb Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Fri, 24 Nov 2023 09:53:01 +0000 Subject: [PATCH 04/20] Update benchmark script/readme. --- pyflamegpu/.gitignore | 506 ++++++++++++++++++++++++++++++++++++++++ pyflamegpu/README.md | 11 + pyflamegpu/benchmark.py | 127 ++++++++++ 3 files changed, 644 insertions(+) create mode 100644 pyflamegpu/.gitignore create mode 100644 pyflamegpu/README.md create mode 100644 pyflamegpu/benchmark.py diff --git a/pyflamegpu/.gitignore b/pyflamegpu/.gitignore new file mode 100644 index 00000000..5d768891 --- /dev/null +++ b/pyflamegpu/.gitignore @@ -0,0 +1,506 @@ +# Temporary files for benchmarking +benchmark_config.txt + +# Model output files +*.xml +*.json +*.bin + +# Visualisation screenshots +*.png + +# GoogleTest directories (handled by CMake) +googletest-build/ +googletest-download/ +googletest-src/ + +# Doxygen / CMake + Doxygen generated files +DoxyGen*/ +CMakeDoxyfile.in +CMakeDoxygenDefaults.cmake +Doxyfile.docs + +# Visual Studio (these are gen by CMake, don't want tracked) +*.vcxproj +*.vcxproj.filters +*.sln + +# Directories +docs/ +bin/ +obj/ +build*/ + +# Editor configuration files/directories +.vscode +*.cbp +*.layout +.cbp +.layout + +# CUDA +*.i +*.ii +*.gpu +*.ptx +*.cubin +*.fatbin +*.tlog +*.cache + +# Valgrind log files (for syntax-highlight enabled editors such as vscode with plugin) +*.valgrind + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# ========================= +# Operating System Files +# ========================= + +# OSX +# ========================= + +.DS_Store +.AppleDouble +.LSOverride + +# Thumbnails +._* + +# Files that might appear on external disk +.Spotlight-V100 +.Trashes + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# Windows +# ========================= + +# Windows image file caches +Thumbs.db +ehthumbs.db +*.opendb + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# ========================= +# CMake +# From: https://github.com/github/gitignore/blob/master/CMake.gitignore +# Commit 3784768 +# ========================= +CMakeLists.txt.user +CMakeCache.txt +CMakeFiles +CMakeScripts +Testing +Makefile +cmake_install.cmake +install_manifest.txt +compile_commands.json +CTestTestfile.cmake +_deps + +# ========================= +# Visual Studio +# From: https://github.com/github/gitignore/blob/master/VisualStudio.gitignore +# Commit 3784768 +# ========================= + +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*_wpftmp.csproj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# JustCode is a .NET coding add-in +.JustCode + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ diff --git a/pyflamegpu/README.md b/pyflamegpu/README.md new file mode 100644 index 00000000..a0b9d70e --- /dev/null +++ b/pyflamegpu/README.md @@ -0,0 +1,11 @@ +# pyFLAMEGPU ABM Comparison Benchmarks + +This directory contains the implementation of several ABMs used to compare agent based models, including: + ++ `boids2D` - a 2D spatial implementation of boids flocking model ++ `schelling` - an implementation of Schelling's Model of Segregation + +The [`pyflamegpu` package](https://github.com/FLAMEGPU/FLAMEGPU2#installation) must be available within the Python environment used to launch the independent models or `benchmark.py`. The upto-date requirements for running pyFLAMEGPU can be found within the same document, however typically a recent CUDA capable GPU, CUDA toolkit and Python 3 are the core requirements. + +For details on how to develop a model using pyFLAMEGPU (or FLAME GPU 2) or the requirements for using pyFLAMEGPU, refer to the [userguide & API documentation](https://docs.flamegpu.com/). + diff --git a/pyflamegpu/benchmark.py b/pyflamegpu/benchmark.py new file mode 100644 index 00000000..817478a5 --- /dev/null +++ b/pyflamegpu/benchmark.py @@ -0,0 +1,127 @@ +#! /usr/bin/env python3 + +# Run 100 iterations of each the Release builds of boids and schelling models, capturing the time of each and emitting the minimum time. +# @todo - support scaling, do not just output minimum, time whole execution vs just simulation? +# @todo - this is only reporting the simulate method, not the rest of FLAMEGPUs runtime which may be biased compared to other timings (need to check) + +import sys +import subprocess +import pathlib +import re +import math +import statistics + +SCRIPT_PATH = pathlib.Path(__file__).parent +BUILD_DIR = "build" +CONFIG = "Release" +REPETITIONS = 10 + +def extract_times(lines): + PRE_POP_RE = re.compile("^(pre population \(s\): ([0-9]+(\.[0-9]+)?))$") + POP_GEN_RE = re.compile("^(population generation \(s\): ([0-9]+(\.[0-9]+)?))$") + MAIN_RE = re.compile("^(main \(s\): ([0-9]+(\.[0-9]+)?))$") + SIMULATE_RE = re.compile("^(simulate \(s\): ([0-9]+(\.[0-9]+)?))$") + RTC_RE = re.compile("^(rtc \(s\): ([0-9]+(\.[0-9]+)?))$") + pre_pop_time = math.inf + pop_gen_time = math.inf + main_time = math.inf + simulate_time = math.inf + rtc_time = math.inf + matched = False + for line in lines: + line_matched = False + if not line_matched: + match = PRE_POP_RE.match(line.strip()) + if match: + pre_pop_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = POP_GEN_RE.match(line.strip()) + if match: + pop_gen_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = MAIN_RE.match(line.strip()) + if match: + main_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = SIMULATE_RE.match(line.strip()) + if match: + simulate_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = RTC_RE.match(line.strip()) + if match: + rtc_time = float(match.group(2)) + line_matched = True + return pre_pop_time, pop_gen_time, main_time, simulate_time, rtc_time + + +# Benchmark flocking +flocking_model_path = SCRIPT_PATH / "src/boids2D.py" +if flocking_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + rtc_times = [] + for i in range(0, REPETITIONS): + result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + rtc_times.append(rtc_time) + min_main_time = min(main_times) + min_simulate_time = min(sim_times) + print(f"FLAMEGPU2 flocking prepop times (s) : {pre_pop_times}") + print(f"FLAMEGPU2 flocking popgen times (s) : {pop_gen_times}") + print(f"FLAMEGPU2 flocking simulate times (s): {sim_times}") + print(f"FLAMEGPU2 flocking rtc times (s): {rtc_times}") + print(f"FLAMEGPU2 flocking main times (s) : {main_times}") + print(f"FLAMEGPU2 flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"FLAMEGPU2 flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"FLAMEGPU2 flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"FLAMEGPU2 flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"FLAMEGPU2 flocking main (mean ms) : {statistics.mean(main_times)*1e3}") + + +else: + print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + +# Benchmark Schelling +schelling_model_path = SCRIPT_PATH / f"{BUILD_DIR}/bin/{CONFIG}/schelling" +if schelling_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + rtc_times = [] + for i in range(0, REPETITIONS): + result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + rtc_times.append(rtc_time) + print(f"FLAMEGPU2 schelling prepop times (s) : {pre_pop_times}") + print(f"FLAMEGPU2 schelling popgen times (s) : {pop_gen_times}") + print(f"FLAMEGPU2 schelling simulate times (s): {sim_times}") + print(f"FLAMEGPU2 schelling rtc times (s): {rtc_times}") + print(f"FLAMEGPU2 schelling main times (s) : {main_times}") + print(f"FLAMEGPU2 schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"FLAMEGPU2 schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"FLAMEGPU2 schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"FLAMEGPU2 schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"FLAMEGPU2 schelling main (mean ms) : {statistics.mean(main_times)*1e3}") + +else: + print(f"Error: pyFLAMEGPU schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + From e595e28c808b5d34960f8d80d95645446468a95d Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 27 Nov 2023 12:14:27 +0000 Subject: [PATCH 05/20] Agent Python versions of the two models. Visualisation appears the same between FLAMEGPU implementations. --- pyflamegpu-agentpy/.gitignore | 506 ++++++++++++++++++++++++++++ pyflamegpu-agentpy/LICENSE.MD | 9 + pyflamegpu-agentpy/README.md | 11 + pyflamegpu-agentpy/benchmark.py | 127 +++++++ pyflamegpu-agentpy/src/boids2D.py | 351 +++++++++++++++++++ pyflamegpu-agentpy/src/schelling.py | 336 ++++++++++++++++++ pyflamegpu/src/boids2D.py | 1 - 7 files changed, 1340 insertions(+), 1 deletion(-) create mode 100644 pyflamegpu-agentpy/.gitignore create mode 100644 pyflamegpu-agentpy/LICENSE.MD create mode 100644 pyflamegpu-agentpy/README.md create mode 100644 pyflamegpu-agentpy/benchmark.py create mode 100644 pyflamegpu-agentpy/src/boids2D.py create mode 100644 pyflamegpu-agentpy/src/schelling.py diff --git a/pyflamegpu-agentpy/.gitignore b/pyflamegpu-agentpy/.gitignore new file mode 100644 index 00000000..5d768891 --- /dev/null +++ b/pyflamegpu-agentpy/.gitignore @@ -0,0 +1,506 @@ +# Temporary files for benchmarking +benchmark_config.txt + +# Model output files +*.xml +*.json +*.bin + +# Visualisation screenshots +*.png + +# GoogleTest directories (handled by CMake) +googletest-build/ +googletest-download/ +googletest-src/ + +# Doxygen / CMake + Doxygen generated files +DoxyGen*/ +CMakeDoxyfile.in +CMakeDoxygenDefaults.cmake +Doxyfile.docs + +# Visual Studio (these are gen by CMake, don't want tracked) +*.vcxproj +*.vcxproj.filters +*.sln + +# Directories +docs/ +bin/ +obj/ +build*/ + +# Editor configuration files/directories +.vscode +*.cbp +*.layout +.cbp +.layout + +# CUDA +*.i +*.ii +*.gpu +*.ptx +*.cubin +*.fatbin +*.tlog +*.cache + +# Valgrind log files (for syntax-highlight enabled editors such as vscode with plugin) +*.valgrind + +# Compiled Object files +*.slo +*.lo +*.o +*.obj + +# Precompiled Headers +*.gch +*.pch + +# Compiled Dynamic libraries +*.so +*.dylib +*.dll + +# Fortran module files +*.mod + +# Compiled Static libraries +*.lai +*.la +*.a +*.lib + +# Executables +*.exe +*.out +*.app + +# ========================= +# Operating System Files +# ========================= + +# OSX +# ========================= + +.DS_Store +.AppleDouble +.LSOverride + +# Thumbnails +._* + +# Files that might appear on external disk +.Spotlight-V100 +.Trashes + +# Directories potentially created on remote AFP share +.AppleDB +.AppleDesktop +Network Trash Folder +Temporary Items +.apdisk + +# Windows +# ========================= + +# Windows image file caches +Thumbs.db +ehthumbs.db +*.opendb + +# Folder config file +Desktop.ini + +# Recycle Bin used on file shares +$RECYCLE.BIN/ + +# Windows Installer files +*.cab +*.msi +*.msm +*.msp + +# Windows shortcuts +*.lnk + +# ========================= +# CMake +# From: https://github.com/github/gitignore/blob/master/CMake.gitignore +# Commit 3784768 +# ========================= +CMakeLists.txt.user +CMakeCache.txt +CMakeFiles +CMakeScripts +Testing +Makefile +cmake_install.cmake +install_manifest.txt +compile_commands.json +CTestTestfile.cmake +_deps + +# ========================= +# Visual Studio +# From: https://github.com/github/gitignore/blob/master/VisualStudio.gitignore +# Commit 3784768 +# ========================= + +## Ignore Visual Studio temporary files, build results, and +## files generated by popular Visual Studio add-ons. +## +## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore + +# User-specific files +*.rsuser +*.suo +*.user +*.userosscache +*.sln.docstates + +# User-specific files (MonoDevelop/Xamarin Studio) +*.userprefs + +# Mono auto generated files +mono_crash.* + +# Build results +[Dd]ebug/ +[Dd]ebugPublic/ +[Rr]elease/ +[Rr]eleases/ +x64/ +x86/ +[Aa][Rr][Mm]/ +[Aa][Rr][Mm]64/ +bld/ +[Bb]in/ +[Oo]bj/ +[Ll]og/ +[Ll]ogs/ + +# Visual Studio 2015/2017 cache/options directory +.vs/ +# Uncomment if you have tasks that create the project's static files in wwwroot +#wwwroot/ + +# Visual Studio 2017 auto generated files +Generated\ Files/ + +# MSTest test Results +[Tt]est[Rr]esult*/ +[Bb]uild[Ll]og.* + +# NUnit +*.VisualState.xml +TestResult.xml +nunit-*.xml + +# Build Results of an ATL Project +[Dd]ebugPS/ +[Rr]eleasePS/ +dlldata.c + +# Benchmark Results +BenchmarkDotNet.Artifacts/ + +# .NET Core +project.lock.json +project.fragment.lock.json +artifacts/ + +# StyleCop +StyleCopReport.xml + +# Files built by Visual Studio +*_i.c +*_p.c +*_h.h +*.ilk +*.meta +*.obj +*.iobj +*.pch +*.pdb +*.ipdb +*.pgc +*.pgd +*.rsp +*.sbr +*.tlb +*.tli +*.tlh +*.tmp +*.tmp_proj +*_wpftmp.csproj +*.log +*.vspscc +*.vssscc +.builds +*.pidb +*.svclog +*.scc + +# Chutzpah Test files +_Chutzpah* + +# Visual C++ cache files +ipch/ +*.aps +*.ncb +*.opendb +*.opensdf +*.sdf +*.cachefile +*.VC.db +*.VC.VC.opendb + +# Visual Studio profiler +*.psess +*.vsp +*.vspx +*.sap + +# Visual Studio Trace Files +*.e2e + +# TFS 2012 Local Workspace +$tf/ + +# Guidance Automation Toolkit +*.gpState + +# ReSharper is a .NET coding add-in +_ReSharper*/ +*.[Rr]e[Ss]harper +*.DotSettings.user + +# JustCode is a .NET coding add-in +.JustCode + +# TeamCity is a build add-in +_TeamCity* + +# DotCover is a Code Coverage Tool +*.dotCover + +# AxoCover is a Code Coverage Tool +.axoCover/* +!.axoCover/settings.json + +# Visual Studio code coverage results +*.coverage +*.coveragexml + +# NCrunch +_NCrunch_* +.*crunch*.local.xml +nCrunchTemp_* + +# MightyMoose +*.mm.* +AutoTest.Net/ + +# Web workbench (sass) +.sass-cache/ + +# Installshield output folder +[Ee]xpress/ + +# DocProject is a documentation generator add-in +DocProject/buildhelp/ +DocProject/Help/*.HxT +DocProject/Help/*.HxC +DocProject/Help/*.hhc +DocProject/Help/*.hhk +DocProject/Help/*.hhp +DocProject/Help/Html2 +DocProject/Help/html + +# Click-Once directory +publish/ + +# Publish Web Output +*.[Pp]ublish.xml +*.azurePubxml +# Note: Comment the next line if you want to checkin your web deploy settings, +# but database connection strings (with potential passwords) will be unencrypted +*.pubxml +*.publishproj + +# Microsoft Azure Web App publish settings. Comment the next line if you want to +# checkin your Azure Web App publish settings, but sensitive information contained +# in these scripts will be unencrypted +PublishScripts/ + +# NuGet Packages +*.nupkg +# NuGet Symbol Packages +*.snupkg +# The packages folder can be ignored because of Package Restore +**/[Pp]ackages/* +# except build/, which is used as an MSBuild target. +!**/[Pp]ackages/build/ +# Uncomment if necessary however generally it will be regenerated when needed +#!**/[Pp]ackages/repositories.config +# NuGet v3's project.json files produces more ignorable files +*.nuget.props +*.nuget.targets + +# Microsoft Azure Build Output +csx/ +*.build.csdef + +# Microsoft Azure Emulator +ecf/ +rcf/ + +# Windows Store app package directories and files +AppPackages/ +BundleArtifacts/ +Package.StoreAssociation.xml +_pkginfo.txt +*.appx +*.appxbundle +*.appxupload + +# Visual Studio cache files +# files ending in .cache can be ignored +*.[Cc]ache +# but keep track of directories ending in .cache +!?*.[Cc]ache/ + +# Others +ClientBin/ +~$* +*~ +*.dbmdl +*.dbproj.schemaview +*.jfm +*.pfx +*.publishsettings +orleans.codegen.cs + +# Including strong name files can present a security risk +# (https://github.com/github/gitignore/pull/2483#issue-259490424) +#*.snk + +# Since there are multiple workflows, uncomment next line to ignore bower_components +# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622) +#bower_components/ + +# RIA/Silverlight projects +Generated_Code/ + +# Backup & report files from converting an old project file +# to a newer Visual Studio version. Backup files are not needed, +# because we have git ;-) +_UpgradeReport_Files/ +Backup*/ +UpgradeLog*.XML +UpgradeLog*.htm +ServiceFabricBackup/ +*.rptproj.bak + +# SQL Server files +*.mdf +*.ldf +*.ndf + +# Business Intelligence projects +*.rdl.data +*.bim.layout +*.bim_*.settings +*.rptproj.rsuser +*- [Bb]ackup.rdl +*- [Bb]ackup ([0-9]).rdl +*- [Bb]ackup ([0-9][0-9]).rdl + +# Microsoft Fakes +FakesAssemblies/ + +# GhostDoc plugin setting file +*.GhostDoc.xml + +# Node.js Tools for Visual Studio +.ntvs_analysis.dat +node_modules/ + +# Visual Studio 6 build log +*.plg + +# Visual Studio 6 workspace options file +*.opt + +# Visual Studio 6 auto-generated workspace file (contains which files were open etc.) +*.vbw + +# Visual Studio LightSwitch build output +**/*.HTMLClient/GeneratedArtifacts +**/*.DesktopClient/GeneratedArtifacts +**/*.DesktopClient/ModelManifest.xml +**/*.Server/GeneratedArtifacts +**/*.Server/ModelManifest.xml +_Pvt_Extensions + +# Paket dependency manager +.paket/paket.exe +paket-files/ + +# FAKE - F# Make +.fake/ + +# CodeRush personal settings +.cr/personal + +# Python Tools for Visual Studio (PTVS) +__pycache__/ +*.pyc + +# Cake - Uncomment if you are using it +# tools/** +# !tools/packages.config + +# Tabs Studio +*.tss + +# Telerik's JustMock configuration file +*.jmconfig + +# BizTalk build output +*.btp.cs +*.btm.cs +*.odx.cs +*.xsd.cs + +# OpenCover UI analysis results +OpenCover/ + +# Azure Stream Analytics local run output +ASALocalRun/ + +# MSBuild Binary and Structured Log +*.binlog + +# NVidia Nsight GPU debugger configuration file +*.nvuser + +# MFractors (Xamarin productivity tool) working folder +.mfractor/ + +# Local History for Visual Studio +.localhistory/ + +# BeatPulse healthcheck temp database +healthchecksdb + +# Backup folder for Package Reference Convert tool in Visual Studio 2017 +MigrationBackup/ + +# Ionide (cross platform F# VS Code tools) working folder +.ionide/ diff --git a/pyflamegpu-agentpy/LICENSE.MD b/pyflamegpu-agentpy/LICENSE.MD new file mode 100644 index 00000000..662b75ef --- /dev/null +++ b/pyflamegpu-agentpy/LICENSE.MD @@ -0,0 +1,9 @@ +MIT License for FLAME GPU 2 + +Copyright (c) 2019 University of Sheffield + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/pyflamegpu-agentpy/README.md b/pyflamegpu-agentpy/README.md new file mode 100644 index 00000000..a0b9d70e --- /dev/null +++ b/pyflamegpu-agentpy/README.md @@ -0,0 +1,11 @@ +# pyFLAMEGPU ABM Comparison Benchmarks + +This directory contains the implementation of several ABMs used to compare agent based models, including: + ++ `boids2D` - a 2D spatial implementation of boids flocking model ++ `schelling` - an implementation of Schelling's Model of Segregation + +The [`pyflamegpu` package](https://github.com/FLAMEGPU/FLAMEGPU2#installation) must be available within the Python environment used to launch the independent models or `benchmark.py`. The upto-date requirements for running pyFLAMEGPU can be found within the same document, however typically a recent CUDA capable GPU, CUDA toolkit and Python 3 are the core requirements. + +For details on how to develop a model using pyFLAMEGPU (or FLAME GPU 2) or the requirements for using pyFLAMEGPU, refer to the [userguide & API documentation](https://docs.flamegpu.com/). + diff --git a/pyflamegpu-agentpy/benchmark.py b/pyflamegpu-agentpy/benchmark.py new file mode 100644 index 00000000..817478a5 --- /dev/null +++ b/pyflamegpu-agentpy/benchmark.py @@ -0,0 +1,127 @@ +#! /usr/bin/env python3 + +# Run 100 iterations of each the Release builds of boids and schelling models, capturing the time of each and emitting the minimum time. +# @todo - support scaling, do not just output minimum, time whole execution vs just simulation? +# @todo - this is only reporting the simulate method, not the rest of FLAMEGPUs runtime which may be biased compared to other timings (need to check) + +import sys +import subprocess +import pathlib +import re +import math +import statistics + +SCRIPT_PATH = pathlib.Path(__file__).parent +BUILD_DIR = "build" +CONFIG = "Release" +REPETITIONS = 10 + +def extract_times(lines): + PRE_POP_RE = re.compile("^(pre population \(s\): ([0-9]+(\.[0-9]+)?))$") + POP_GEN_RE = re.compile("^(population generation \(s\): ([0-9]+(\.[0-9]+)?))$") + MAIN_RE = re.compile("^(main \(s\): ([0-9]+(\.[0-9]+)?))$") + SIMULATE_RE = re.compile("^(simulate \(s\): ([0-9]+(\.[0-9]+)?))$") + RTC_RE = re.compile("^(rtc \(s\): ([0-9]+(\.[0-9]+)?))$") + pre_pop_time = math.inf + pop_gen_time = math.inf + main_time = math.inf + simulate_time = math.inf + rtc_time = math.inf + matched = False + for line in lines: + line_matched = False + if not line_matched: + match = PRE_POP_RE.match(line.strip()) + if match: + pre_pop_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = POP_GEN_RE.match(line.strip()) + if match: + pop_gen_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = MAIN_RE.match(line.strip()) + if match: + main_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = SIMULATE_RE.match(line.strip()) + if match: + simulate_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = RTC_RE.match(line.strip()) + if match: + rtc_time = float(match.group(2)) + line_matched = True + return pre_pop_time, pop_gen_time, main_time, simulate_time, rtc_time + + +# Benchmark flocking +flocking_model_path = SCRIPT_PATH / "src/boids2D.py" +if flocking_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + rtc_times = [] + for i in range(0, REPETITIONS): + result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + rtc_times.append(rtc_time) + min_main_time = min(main_times) + min_simulate_time = min(sim_times) + print(f"FLAMEGPU2 flocking prepop times (s) : {pre_pop_times}") + print(f"FLAMEGPU2 flocking popgen times (s) : {pop_gen_times}") + print(f"FLAMEGPU2 flocking simulate times (s): {sim_times}") + print(f"FLAMEGPU2 flocking rtc times (s): {rtc_times}") + print(f"FLAMEGPU2 flocking main times (s) : {main_times}") + print(f"FLAMEGPU2 flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"FLAMEGPU2 flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"FLAMEGPU2 flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"FLAMEGPU2 flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"FLAMEGPU2 flocking main (mean ms) : {statistics.mean(main_times)*1e3}") + + +else: + print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + +# Benchmark Schelling +schelling_model_path = SCRIPT_PATH / f"{BUILD_DIR}/bin/{CONFIG}/schelling" +if schelling_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + rtc_times = [] + for i in range(0, REPETITIONS): + result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + rtc_times.append(rtc_time) + print(f"FLAMEGPU2 schelling prepop times (s) : {pre_pop_times}") + print(f"FLAMEGPU2 schelling popgen times (s) : {pop_gen_times}") + print(f"FLAMEGPU2 schelling simulate times (s): {sim_times}") + print(f"FLAMEGPU2 schelling rtc times (s): {rtc_times}") + print(f"FLAMEGPU2 schelling main times (s) : {main_times}") + print(f"FLAMEGPU2 schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"FLAMEGPU2 schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"FLAMEGPU2 schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"FLAMEGPU2 schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"FLAMEGPU2 schelling main (mean ms) : {statistics.mean(main_times)*1e3}") + +else: + print(f"Error: pyFLAMEGPU schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + diff --git a/pyflamegpu-agentpy/src/boids2D.py b/pyflamegpu-agentpy/src/boids2D.py new file mode 100644 index 00000000..b0d17492 --- /dev/null +++ b/pyflamegpu-agentpy/src/boids2D.py @@ -0,0 +1,351 @@ +from pyflamegpu import * +import pyflamegpu.codegen +import typing +import sys, random, time +import numpy as np +import math + +if "--disable-rtc-cache" in sys.argv: + rtc_cache = pyflamegpu.JitifyCache.getInstance() + rtc_cache.useMemoryCache(False) + rtc_cache.useDiskCache(False) +id = 2 +id += 2 +""" + * pyFLAME GPU 2 implementation of the Boids flocking model in 2D, using spatial2D messaging. + * This is based on the FLAME GPU 1 implementation, but with dynamic generation of agents. + * The environment is wrapped, effectively the surface of a torus. +""" + +# inputdata agent function for Boid agents, which reads data from neighbouring Boid agents, to perform the boid +""" + * Get the length of a vector + * @param x x component of the vector + * @param y y component of the vector + * @return the length of the vector +""" +@pyflamegpu.device_function +def vec3Length(x: float, y: float) -> float : + return math.sqrtf(x * x + y * y) + +""" + * inputdata agent function for Boid agents, which reads data from neighbouring Boid agents, to perform the boid flocking model. +""" +@pyflamegpu.agent_function +def inputdata(message_in: pyflamegpu.MessageSpatial2D, message_out: pyflamegpu.MessageNone): + # Agent properties in local register + id = pyflamegpu.getID() + # Agent position + agent_x = pyflamegpu.getVariableFloat("x") + agent_y = pyflamegpu.getVariableFloat("y") + # Agent velocity + agent_fx = pyflamegpu.getVariableFloat("fx") + agent_fy = pyflamegpu.getVariableFloat("fy") + + # Boids perceived center + perceived_centre_x = 0.0 + perceived_centre_y = 0.0 + perceived_count = 0 + + # Boids global velocity matching + global_velocity_x = 0.0 + global_velocity_y = 0.0 + + # Total change in velocity + velocity_change_x = 0.0 + velocity_change_y = 0.0 + + INTERACTION_RADIUS = pyflamegpu.environment.getPropertyFloat("INTERACTION_RADIUS") + SEPARATION_RADIUS = pyflamegpu.environment.getPropertyFloat("SEPARATION_RADIUS") + # Iterate location messages, accumulating relevant data and counts. + for message in message_in.wrap(agent_x, agent_y): + # Ignore self messages. + if message.getVariableID("id") != id: + # Get the message location and velocity. + message_x = message.getVirtualX() + message_y = message.getVirtualY() + + # Check interaction radius + separation = vec3Length(agent_x - message_x, agent_y - message_y) + + if separation < INTERACTION_RADIUS: + # Update the perceived centre + perceived_centre_x += message_x + perceived_centre_y += message_y + perceived_count+=1 + + # Update perceived velocity matching + message_fx = message.getVariableFloat("fx") + message_fy = message.getVariableFloat("fy") + global_velocity_x += message_fx; + global_velocity_y += message_fy; + + # Update collision centre + if separation < (SEPARATION_RADIUS): # dependant on model size + # Rule 3) Avoid other nearby boids (Separation) + normalizedSeparation = (separation / SEPARATION_RADIUS) + invNormSep = (1.0 - normalizedSeparation) + invSqSep = invNormSep * invNormSep + + collisionScale = pyflamegpu.environment.getPropertyFloat("COLLISION_SCALE") + velocity_change_x += collisionScale * (agent_x - message_x) * invSqSep + velocity_change_y += collisionScale * (agent_y - message_y) * invSqSep + + if perceived_count: + # Divide positions/velocities by relevant counts. + perceived_centre_x /= perceived_count + perceived_centre_y /= perceived_count + global_velocity_x /= perceived_count + global_velocity_y /= perceived_count + + # Rule 1) Steer towards perceived centre of flock (Cohesion) + steer_velocity_x = 0.0 + steer_velocity_y = 0.0 + + STEER_SCALE = pyflamegpu.environment.getPropertyFloat("STEER_SCALE") + steer_velocity_x = (perceived_centre_x - agent_x) * STEER_SCALE + steer_velocity_y = (perceived_centre_y - agent_y) * STEER_SCALE + + velocity_change_x += steer_velocity_x; + velocity_change_y += steer_velocity_y; + + # Rule 2) Match neighbours speeds (Alignment) + match_velocity_x = 0.0 + match_velocity_y = 0.0 + + MATCH_SCALE = pyflamegpu.environment.getPropertyFloat("MATCH_SCALE") + match_velocity_x = global_velocity_x * MATCH_SCALE + match_velocity_y = global_velocity_y * MATCH_SCALE + + velocity_change_x += match_velocity_x - agent_fx + velocity_change_y += match_velocity_y - agent_fy + + + # Global scale of velocity change + GLOBAL_SCALE = pyflamegpu.environment.getPropertyFloat("GLOBAL_SCALE") + velocity_change_x *= GLOBAL_SCALE + velocity_change_y *= GLOBAL_SCALE + + # Update agent velocity + agent_fx += velocity_change_x + agent_fy += velocity_change_y + + # Bound velocity + agent_fscale = vec3Length(agent_fx, agent_fy) + if agent_fscale > 1: + agent_fx /= agent_fscale + agent_fy /= agent_fscale + + minSpeed = 0.5 + if agent_fscale < minSpeed: + # Normalise + agent_fx /= agent_fscale + agent_fy /= agent_fscale + + # Scale to min + agent_fx *= minSpeed + agent_fy *= minSpeed + + + # Apply the velocity + TIME_SCALE = pyflamegpu.environment.getPropertyFloat("TIME_SCALE"); + agent_x += agent_fx * TIME_SCALE + agent_y += agent_fy * TIME_SCALE + + # Wrap position + MIN_POSITION = pyflamegpu.environment.getPropertyFloat("MIN_POSITION") + MAX_POSITION = pyflamegpu.environment.getPropertyFloat("MAX_POSITION") + width = MAX_POSITION-MIN_POSITION + if agent_x < MIN_POSITION : + agent_x += width + if agent_y < MIN_POSITION : + agent_y += width + + if agent_x > MAX_POSITION : + agent_x -= width + if agent_y > MAX_POSITION : + agent_y -= width + + # Update global agent memory. + pyflamegpu.setVariableFloat("x", agent_x) + pyflamegpu.setVariableFloat("y", agent_y) + + pyflamegpu.setVariableFloat("fx", agent_fx) + pyflamegpu.setVariableFloat("fy", agent_fy) + + return pyflamegpu.ALIVE + + +# outputdata agent function for Boid agents, which outputs publicly visible properties to a message list +@pyflamegpu.agent_function +def outputdata(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.MessageSpatial2D): + # Output each agents publicly visible properties. + message_out.setVariableID("id", pyflamegpu.getID()); + message_out.setVariableFloat("x", pyflamegpu.getVariableFloat("x")) + message_out.setVariableFloat("y", pyflamegpu.getVariableFloat("y")) + message_out.setVariableFloat("fx", pyflamegpu.getVariableFloat("fx")) + message_out.setVariableFloat("fy", pyflamegpu.getVariableFloat("fy")) + return pyflamegpu.ALIVE; + + +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() +model = pyflamegpu.ModelDescription("Boids Spatial2D"); + +# Environment variables with default values +env = model.Environment(); + +# Population size to generate, if no agents are loaded from disk +env.newPropertyUInt("POPULATION_TO_GENERATE", 80000) + +# Environment Bounds +env.newPropertyFloat("MIN_POSITION", 0.0) +env.newPropertyFloat("MAX_POSITION", 400.0) + +# Initialisation parameter(s) +env.newPropertyFloat("INITIAL_SPEED", 1.0) # always start with a speed of 1.0 + +# Interaction radius +env.newPropertyFloat("INTERACTION_RADIUS", 5.0) +env.newPropertyFloat("SEPARATION_RADIUS", 1.0) + +# Global Scalers +env.newPropertyFloat("TIME_SCALE", 1.0) # 1.0 for benchmarking to behave the same as the other simulators. +env.newPropertyFloat("GLOBAL_SCALE", 0.5) # 1.0 for comparing to other benchmarks + +# Rule scalers +env.newPropertyFloat("STEER_SCALE", 0.03) # cohere scale? 0.03 +env.newPropertyFloat("COLLISION_SCALE", 0.015) # separate_scale? 0.015 +env.newPropertyFloat("MATCH_SCALE", 0.05) # match 0.05 + + +# Define the Location 2D spatial message list +message = model.newMessageSpatial2D("location") +# Set the range and bounds. +message.setRadius(env.getPropertyFloat("INTERACTION_RADIUS")) +message.setMin(env.getPropertyFloat("MIN_POSITION"), env.getPropertyFloat("MIN_POSITION")) +message.setMax(env.getPropertyFloat("MAX_POSITION"), env.getPropertyFloat("MAX_POSITION")) + +# A message to hold the location of an agent. +message.newVariableID("id") +# Spatial 2D messages implicitly have float members x and y, so they do not need to be defined +message.newVariableFloat("fx") +message.newVariableFloat("fy") + +# Boid agent +agent = model.newAgent("Boid") +agent.newVariableFloat("x") +agent.newVariableFloat("y") +agent.newVariableFloat("fx") +agent.newVariableFloat("fy") +# Define the agents methods +outputdataDescription = agent.newRTCFunction("outputdata", pyflamegpu.codegen.translate(outputdata)) +outputdataDescription.setMessageOutput("location") +inputdataDescription = agent.newRTCFunction("inputdata", pyflamegpu.codegen.translate(inputdata)) +inputdataDescription.setMessageInput("location") + +# Specify agent method dependencies, i.e. the execution order within a layer. +model.addExecutionRoot(outputdataDescription) +inputdataDescription.dependsOn(outputdataDescription) +# Build the execution graph +model.generateLayers() + +# Create Model Runner +simulator = pyflamegpu.CUDASimulation(model) + +# If enabled, define the visualisation for the model +if pyflamegpu.VISUALISATION: + visualisation = simulator.getVisualisation(); + + visualisation.setSimulationSpeed(10) # slow down the simulation for visualisation purposes + env = model.Environment() + ENV_WIDTH = env.getPropertyFloat("MAX_POSITION") - env.getPropertyFloat("MIN_POSITION") + ENV_CENTER = env.getPropertyFloat("MIN_POSITION") + (ENV_WIDTH) / 2.0 + INIT_CAM = env.getPropertyFloat("MAX_POSITION") * 1.25 + visualisation.setInitialCameraLocation(ENV_CENTER, ENV_CENTER, INIT_CAM) + visualisation.setInitialCameraTarget(ENV_CENTER, ENV_CENTER, 0.0) + visualisation.setCameraSpeed(0.001 * ENV_WIDTH) + visualisation.setViewClips(0.00001, ENV_WIDTH) + agentVisualiser = visualisation.addAgent("Boid") + # Position vars are named x, y so they are used by default + agentVisualiser.setForwardXVariable("fx") + agentVisualiser.setForwardYVariable("fy") + agentVisualiser.setModel(pyflamegpu.STUNTPLANE) + agentVisualiser.setModelScale(env.getPropertyFloat("SEPARATION_RADIUS")/3.0) + # Add a settings UI + ui = visualisation.newUIPanel("Environment") + ui.newStaticLabel("Interaction") + ui.newEnvironmentPropertyDragFloat("INTERACTION_RADIUS", 0.0, env.getPropertyFloat("INTERACTION_RADIUS"), 0.01) # Can't go bigger than the comms radius, which is fixed at compile time. + ui.newEnvironmentPropertyDragFloat("SEPARATION_RADIUS", 0.0, env.getPropertyFloat("INTERACTION_RADIUS"), 0.01) # Can't go bigger than the initial interaction radius which is fixed at compile time. + ui.newStaticLabel("Environment Scalars") + ui.newEnvironmentPropertyDragFloat("TIME_SCALE", 0.0, 1.0, 0.0001) + ui.newEnvironmentPropertyDragFloat("GLOBAL_SCALE", 0.0, 0.5, 0.001) + ui.newStaticLabel("Force Scalars") + ui.newEnvironmentPropertyDragFloat("STEER_SCALE", 0.0, 10.0, 0.001) + ui.newEnvironmentPropertyDragFloat("COLLISION_SCALE", 0.0, 10.0, 0.001) + ui.newEnvironmentPropertyDragFloat("MATCH_SCALE", 0.0, 10.0, 0.001) + + visualisation.activate(); + +# Initialisation +simulator.initialise(sys.argv) +prePopulationTimer_stop = time.monotonic() +print("pre population (s): %.6f\n"%(prePopulationTimer_stop - prePopulationTimer_start)) + +populationGenerationTimer_start = time.monotonic() + +# If no agent states were provided, generate a population of randomly distributed agents within the environment space +if not simulator.SimulationConfig().input_file: + env = model.Environment() + # Uniformly distribute agents within space, with uniformly distributed initial velocity. + # c++ random number generator engine + rng = random.Random(simulator.SimulationConfig().random_seed) + + # Generate a random location within the environment bounds + min_pos = env.getPropertyFloat("MIN_POSITION") + max_pos = env.getPropertyFloat("MAX_POSITION") + + # Generate a random speed between 0 and the maximum initial speed + fmagnitude = env.getPropertyFloat("INITIAL_SPEED") + + # Generate a population of agents, based on the relevant environment property + populationSize = env.getPropertyUInt("POPULATION_TO_GENERATE") + population = pyflamegpu.AgentVector(model.Agent("Boid"), populationSize) + for i in range(populationSize): + instance = population[i] + + # Agent position in space + instance.setVariableFloat("x", rng.uniform(min_pos, max_pos)); + instance.setVariableFloat("y", rng.uniform(min_pos, max_pos)); + + # Generate a random velocity direction + fx = rng.uniform(-1, 1) + fy = rng.uniform(-1, 1) + # Use the random speed for the velocity. + fx /= np.linalg.norm([fx, fy]) + fy /= np.linalg.norm([fx, fy]) + fx *= fmagnitude + fy *= fmagnitude + + # Set these for the agent. + instance.setVariableFloat("fx", fx) + instance.setVariableFloat("fy", fy) + + simulator.setPopulationData(population); + +populationGenerationTimer_stop = time.monotonic() +print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + +# Execute the simulation +simulator.simulate() + +# Print the exeuction time to stdout +print("simulate (s): %.6f"%simulator.getElapsedTimeSimulation()) +print("rtc (s): %.6f"%simulator.getElapsedTimeRTCInitialisation()) + +# Join the visualsition if required +if pyflamegpu.VISUALISATION: + visualisation.join() + +mainTimer_stop = time.monotonic() +print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) diff --git a/pyflamegpu-agentpy/src/schelling.py b/pyflamegpu-agentpy/src/schelling.py new file mode 100644 index 00000000..54fdf6c8 --- /dev/null +++ b/pyflamegpu-agentpy/src/schelling.py @@ -0,0 +1,336 @@ +from pyflamegpu import * +import pyflamegpu.codegen +from typing import Final +import sys, random, time + +if "--disable-rtc-cache" in sys.argv: + rtc_cache = pyflamegpu.JitifyCache.getInstance() + rtc_cache.useMemoryCache(False) + rtc_cache.useDiskCache(False) + +# Configurable properties (note these are not dynamically updated in current agent functions) +GRID_WIDTH: Final = 500 +POPULATED_COUNT: Final = 200000 + +THRESHOLD: Final = 0.375 # 0.375 == 3/8s to match integer models + +A: Final = 0 +B: Final = 1 +UNOCCUPIED: Final = 2 + +# Agents output their type +@pyflamegpu.agent_function +def output_type(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.MessageArray2D): + message_out.setVariableUInt("type", pyflamegpu.getVariableUInt("type")) + message_out.setIndex(pyflamegpu.getVariableUIntArray2("pos", 0), pyflamegpu.getVariableUIntArray2("pos", 1)) + return pyflamegpu.ALIVE + +# Agents decide whether they are happy or not and whether or not their space is available +@pyflamegpu.agent_function +def determine_status(message_in: pyflamegpu.MessageArray2D, message_out: pyflamegpu.MessageNone): + my_x = pyflamegpu.getVariableUIntArray2("pos", 0) + my_y = pyflamegpu.getVariableUIntArray2("pos", 1) + + same_type_neighbours = 0 + diff_type_neighbours = 0 + + # Iterate 3x3 Moore neighbourhood (this does not include the central cell) + my_type = pyflamegpu.getVariableUInt("type") + for message in message_in.wrap(my_x, my_y): + message_type = message.getVariableUInt("type") + same_type_neighbours += my_type == message_type + diff_type_neighbours += (my_type != message_type) and (message_type != UNOCCUPIED) + + isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD + pyflamegpu.setVariableUInt("happy", isHappy); + my_next_type = my_type if ((my_type != UNOCCUPIED) and isHappy) else UNOCCUPIED + pyflamegpu.setVariableUInt("next_type", my_next_type) + pyflamegpu.setVariableUInt("movement_resolved", (my_type == UNOCCUPIED) or isHappy) + my_availability = (my_type == UNOCCUPIED) or (isHappy == 0) + pyflamegpu.setVariableUInt("available", my_availability) + + return pyflamegpu.ALIVE + +@pyflamegpu.agent_function_condition +def is_available() -> bool: + return pyflamegpu.getVariableUInt("available") + + +@pyflamegpu.agent_function +def output_available_locations(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.MessageArray): + message_out.setIndex(pyflamegpu.getIndex()) + message_out.setVariableID("id", pyflamegpu.getID()) + return pyflamegpu.ALIVE + + +class count_available_spaces(pyflamegpu.HostFunction): + def run(self,FLAMEGPU): + FLAMEGPU.environment.setPropertyUInt("spaces_available", FLAMEGPU.agent("agent").countUInt("available", 1)) + +@pyflamegpu.agent_function_condition +def is_moving() -> bool: + movementResolved = pyflamegpu.getVariableUInt("movement_resolved") + return not movementResolved + +@pyflamegpu.agent_function +def bid_for_location(message_in: pyflamegpu.MessageArray, message_out: pyflamegpu.MessageBucket): + # Select a location + selected_index = pyflamegpu.random.uniformUInt(0, pyflamegpu.environment.getPropertyUInt("spaces_available") - 1) + + # Get the location at that index + message = message_in.at(selected_index) + selected_location = message.getVariableID("id") + + # Bid for that location + message_out.setKey(selected_location - 1) + message_out.setVariableID("id", pyflamegpu.getID()) + message_out.setVariableUInt("type", pyflamegpu.getVariableUInt("type")) + return pyflamegpu.ALIVE + +# @todo - device exception triggered when running +@pyflamegpu.agent_function +def select_winners(message_in: pyflamegpu.MessageBucket, message_out: pyflamegpu.MessageArray): + # First agent in the bucket wins + for message in message_in(pyflamegpu.getID() - 1): + winning_id = message.getVariableID("id") + pyflamegpu.setVariableUInt("next_type", message.getVariableUInt("type")) + pyflamegpu.setVariableUInt("available", 0) + message_out.setIndex(winning_id - 1) + message_out.setVariableUInt("won", 1) + break + return pyflamegpu.ALIVE + +@pyflamegpu.agent_function +def has_moved(message_in: pyflamegpu.MessageArray, message_out: pyflamegpu.MessageNone): + message = message_in.at(pyflamegpu.getID() - 1) + if message.getVariableUInt("won"): + pyflamegpu.setVariableUInt("movement_resolved", 1) + return pyflamegpu.ALIVE + + +class movement_resolved(pyflamegpu.HostCondition): + def run(self, FLAMEGPU): + return pyflamegpu.EXIT if (FLAMEGPU.agent("agent").countUInt("movement_resolved", 0) == 0) else pyflamegpu.CONTINUE + +@pyflamegpu.agent_function +def update_locations(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.MessageNone): + pyflamegpu.setVariableUInt("type", pyflamegpu.getVariableUInt("next_type")) + return pyflamegpu.ALIVE + + +# flamegpu::util::nvtx::Range("main"); +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() + +# Define the model +model = pyflamegpu.ModelDescription("Schelling_segregation") + +# Define the message list(s) +message = model.newMessageArray2D("type_message") +message.newVariableUInt("type") +message.setDimensions(GRID_WIDTH, GRID_WIDTH) + +# Define the agent types +# Agents representing the cells. +agent = model.newAgent("agent") +agent.newVariableArrayUInt("pos", 2) +agent.newVariableUInt("type") +agent.newVariableUInt("next_type") +agent.newVariableUInt("happy") +agent.newVariableUInt("available") +agent.newVariableUInt("movement_resolved") +if pyflamegpu.VISUALISATION: + # Redundant separate floating point position vars for vis + agent.newVariableFloat("x") + agent.newVariableFloat("y") + +# Functions +outputTypeFunction = agent.newRTCFunction("output_type", pyflamegpu.codegen.translate(output_type)) +outputTypeFunction.setMessageOutput("type_message") +determineStatusFunction = agent.newRTCFunction("determine_status", pyflamegpu.codegen.translate(determine_status)) +determineStatusFunction.setMessageInput("type_message") +updateLocationsFunction = agent.newRTCFunction("update_locations", pyflamegpu.codegen.translate(update_locations)) + + +# Define a submodel for conflict resolution for agent movement +# This is necessary for parallel random movement of agents, to resolve conflict between agents moveing to the same location +submodel = pyflamegpu.ModelDescription("plan_movement") +# Submodels require an exit condition function, so they do not run forever +submodel.addExitCondition(movement_resolved()) + +# Define the submodel environment +s_env = submodel.Environment() +s_env.newPropertyUInt("spaces_available", 0) + +# Define message lists used within the submodel +s_message = submodel.newMessageArray("available_location_message") +s_message.newVariableID("id") +s_message.setLength(GRID_WIDTH*GRID_WIDTH) + +s_message = submodel.newMessageBucket("intent_to_move_message") +s_message.newVariableID("id") +s_message.newVariableUInt("type") +s_message.setBounds(0, GRID_WIDTH * GRID_WIDTH) + +s_message = submodel.newMessageArray("movement_won_message") +s_message.newVariableUInt("won"); +s_message.setLength(GRID_WIDTH*GRID_WIDTH); + +# Define agents within the submodel +s_agent = submodel.newAgent("agent") +s_agent.newVariableArrayUInt("pos", 2) +s_agent.newVariableUInt("type") +s_agent.newVariableUInt("next_type") +s_agent.newVariableUInt("happy") +s_agent.newVariableUInt("available") +s_agent.newVariableUInt("movement_resolved") + +# Functions +outputLocationsFunction = s_agent.newRTCFunction("output_available_locations", pyflamegpu.codegen.translate(output_available_locations)) +outputLocationsFunction.setMessageOutput("available_location_message") +outputLocationsFunction.setRTCFunctionCondition(pyflamegpu.codegen.translate(is_available)) + +bidFunction = s_agent.newRTCFunction("bid_for_location", pyflamegpu.codegen.translate(bid_for_location)) +bidFunction.setRTCFunctionCondition(pyflamegpu.codegen.translate(is_moving)) +bidFunction.setMessageInput("available_location_message") +bidFunction.setMessageOutput("intent_to_move_message") + +selectWinnersFunction = s_agent.newRTCFunction("select_winners", pyflamegpu.codegen.translate(select_winners)) +selectWinnersFunction.setMessageInput("intent_to_move_message") +selectWinnersFunction.setMessageOutput("movement_won_message") +selectWinnersFunction.setMessageOutputOptional(True) + +hasMovedFunction = s_agent.newRTCFunction("has_moved", pyflamegpu.codegen.translate(has_moved)) +hasMovedFunction.setMessageInput("movement_won_message") + +# Specify control flow for the submodel (@todo - dependencies) +# Available agents output their location (indexed by thread ID) +s_layer1 = submodel.newLayer() +s_layer1.addAgentFunction(outputLocationsFunction) + +# Count the number of available spaces +s_layer2 = submodel.newLayer() +s_layer2.addHostFunction(count_available_spaces()) + +# Unhappy agents bid for a new location +s_layer3 = submodel.newLayer() +s_layer3.addAgentFunction(bidFunction) + +# Available locations check if anyone wants to move to them. If so, approve one and mark as unavailable +# Update next type to the type of the mover +# Output a message to inform the mover that they have been successful +s_layer4 = submodel.newLayer() +s_layer4.addAgentFunction(selectWinnersFunction); + +# Movers mark themselves as resolved +s_layer5 = submodel.newLayer() +s_layer5.addAgentFunction(hasMovedFunction); + +# Attach the submodel to the model, +plan_movement = model.newSubModel("plan_movement", submodel) +# Bind the agents within the submodel to the same agents outside of the submodel +plan_movement.bindAgent("agent", "agent", True, True) + +# Define the control flow of the outer/parent model (@todo - use dependencies) +layer1 = model.newLayer() +layer1.addAgentFunction(outputTypeFunction) + +layer2 = model.newLayer() +layer2.addAgentFunction(determineStatusFunction) + +layer3 = model.newLayer() +layer3.addSubModel(plan_movement) + +layer4 = model.newLayer() +layer4.addAgentFunction(updateLocationsFunction) + +# Trying calling this again to fix vis +if pyflamegpu.VISUALISATION: + layer5 = model.newLayer(); + layer5.addAgentFunction(determineStatusFunction) + + +# Create the simulator for the model +cudaSimulation = pyflamegpu.CUDASimulation(model) + +""" + * Create visualisation + * @note FLAMEGPU2 doesn't currently have proper support for discrete/2d visualisations +""" +if pyflamegpu.VISUALISATION: + visualisation = cudaSimulation.getVisualisation() + visualisation.setSimulationSpeed(10) # slow down the simulation for visualisation purposes + visualisation.setInitialCameraLocation(GRID_WIDTH / 2.0, GRID_WIDTH / 2.0, 225.0) + visualisation.setInitialCameraTarget(GRID_WIDTH / 2.0, GRID_WIDTH /2.0, 0.0) + visualisation.setCameraSpeed(0.001 * GRID_WIDTH) + visualisation.setViewClips(0.1, 5000) + agt = visualisation.addAgent("agent") + # Position vars are named x, y, z; so they are used by default + agt.setModel(pyflamegpu.CUBE) # 5 unwanted faces! + agt.setModelScale(1.0) + + cell_colors = pyflamegpu.uDiscreteColor("type", pyflamegpu.Color("#666")) + cell_colors[A] = pyflamegpu.RED + cell_colors[B] = pyflamegpu.BLUE + agt.setColor(cell_colors) + visualisation.activate() + + +# Initialise the simulation +cudaSimulation.initialise(sys.argv) +prePopulationTimer_stop = time.monotonic() +print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + +populationGenerationTimer_start = time.monotonic() +# Generate a population if not provided from disk +if not cudaSimulation.SimulationConfig().input_file: + # Use a seeded generator + rng = random.Random(cudaSimulation.SimulationConfig().random_seed) + # Calculate the total number of cells + CELL_COUNT = GRID_WIDTH * GRID_WIDTH + # Generate a population of agents, which are just default initialised for now + population = pyflamegpu.AgentVector(model.Agent("agent"), CELL_COUNT) + + # Select POPULATED_COUNT unique random integers in the range [0, CELL_COUNT), by shuffling the iota. + shuffledIota = [i for i in range(CELL_COUNT)] + rng.shuffle(shuffledIota) + + # Then iterate the shuffled iota, the first POPULATED_COUNT agents will randomly select a type/group. The remaining agents will not. + # The agent index provides the X and Y coordinate for the position. + for elementIdx in range(len(shuffledIota)): + idx = shuffledIota[elementIdx] + instance = population[idx] + # the position can be computed from the index, given the width. + x = int(idx % GRID_WIDTH) + y = int(idx / GRID_WIDTH) + instance.setVariableArrayUInt("pos", [ x, y ]) + + # If the elementIDX is below the populated count, generated a populated type, otherwise it is unoccupied + if elementIdx < POPULATED_COUNT: + type_ = A if rng.random() < 0.5 else B + instance.setVariableUInt("type", type_) + else: + instance.setVariableUInt("type", UNOCCUPIED) + instance.setVariableUInt("happy", 0) + if pyflamegpu.VISUALISATION: + # Redundant separate floating point position vars for vis + instance.setVariableFloat("x", x) + instance.setVariableFloat("y", y) + + cudaSimulation.setPopulationData(population) + +populationGenerationTimer_stop = time.monotonic() +print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + +# Run the simulation +cudaSimulation.simulate() + +# Print the execution time to stdout +print("simulate (s): %.6f"%cudaSimulation.getElapsedTimeSimulation()) +print("rtc (s): %.6f"%cudaSimulation.getElapsedTimeRTCInitialisation()) + +if pyflamegpu.VISUALISATION: + visualisation.join() + +mainTimer_stop = time.monotonic() +print("main (s): %.6f"%(mainTimer_stop - mainTimer_start)) diff --git a/pyflamegpu/src/boids2D.py b/pyflamegpu/src/boids2D.py index 2bdc9285..d5b29b0f 100644 --- a/pyflamegpu/src/boids2D.py +++ b/pyflamegpu/src/boids2D.py @@ -298,7 +298,6 @@ # Spatial 2D messages implicitly have float members x and y, so they do not need to be defined message.newVariableFloat("fx") message.newVariableFloat("fy") -message.newVariableFloat("fz") # Boid agent agent = model.newAgent("Boid") From 87b92abc9f100b46edc34828484b5ca6d6e44c39 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 27 Nov 2023 12:17:16 +0000 Subject: [PATCH 06/20] Trivial fixes to FLAMEGPU models. --- FLAMEGPU2/src/boids2D.cu | 13 ++++++------- FLAMEGPU2/src/schelling.cu | 5 +++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/FLAMEGPU2/src/boids2D.cu b/FLAMEGPU2/src/boids2D.cu index 06475879..eb895609 100644 --- a/FLAMEGPU2/src/boids2D.cu +++ b/FLAMEGPU2/src/boids2D.cu @@ -76,7 +76,7 @@ FLAMEGPU_HOST_DEVICE_FUNCTION void vec3Normalize(float &x, float &y) { } /** - * Ensure that the x and y position are withini the defined boundary area, wrapping to the far side if out of bounds. + * Ensure that the x and y position are within the defined boundary area, wrapping to the far side if out of bounds. * Performs the operation in place * @param x x component of the vector * @param y y component of the vector @@ -126,7 +126,7 @@ FLAMEGPU_AGENT_FUNCTION(inputdata, flamegpu::MessageSpatial2D, flamegpu::Message float agent_fx = FLAMEGPU->getVariable("fx"); float agent_fy = FLAMEGPU->getVariable("fy"); - // Boids percieved center + // Boids perceived center float perceived_centre_x = 0.0f; float perceived_centre_y = 0.0f; int perceived_count = 0; @@ -234,7 +234,7 @@ FLAMEGPU_AGENT_FUNCTION(inputdata, flamegpu::MessageSpatial2D, flamegpu::Message agent_x += agent_fx * TIME_SCALE; agent_y += agent_fy * TIME_SCALE; - // Wramp position + // Wrap position const float MIN_POSITION = FLAMEGPU->environment.getProperty("MIN_POSITION"); const float MAX_POSITION = FLAMEGPU->environment.getProperty("MAX_POSITION"); wrapPosition(agent_x, agent_y, MIN_POSITION, MAX_POSITION); @@ -296,7 +296,6 @@ int main(int argc, const char ** argv) { // Spatial 2D messages implicitly have float members x and y, so they do not need to be defined message.newVariable("fx"); message.newVariable("fy"); - message.newVariable("fz"); // Boid agent flamegpu::AgentDescription agent = model.newAgent("Boid"); @@ -319,7 +318,7 @@ int main(int argc, const char ** argv) { // Create Model Runner flamegpu::CUDASimulation simulator(model); - // If enabled, define the visualsiation for the model + // If enabled, define the visualisation for the model #ifdef FLAMEGPU_VISUALISATION flamegpu::visualiser::ModelVis visualisation = simulator.getVisualisation(); { @@ -405,10 +404,10 @@ int main(int argc, const char ** argv) { // Execute the simulation simulator.simulate(); - // Print the exeuction time to stdout + // Print the execution time to stdout fprintf(stdout, "simulate (s): %.6f\n", simulator.getElapsedTimeSimulation()); - // Join the visualsition if required + // Join the visualisation if required #ifdef FLAMEGPU_VISUALISATION visualisation.join(); #endif diff --git a/FLAMEGPU2/src/schelling.cu b/FLAMEGPU2/src/schelling.cu index 615497a3..082be9c1 100644 --- a/FLAMEGPU2/src/schelling.cu +++ b/FLAMEGPU2/src/schelling.cu @@ -4,6 +4,7 @@ #include #include #include +#include #include "flamegpu/flamegpu.h" #include "flamegpu/detail/SteadyClockTimer.h" @@ -149,12 +150,12 @@ int main(int argc, const char ** argv) { // Define a submodel for conflict resolution for agent movement - // This is neccesary for parlalel random movement of agents, to resolve conflict between agents moveing to the same location + // This is necessary for parallel random movement of agents, to resolve conflict between agents moving to the same location flamegpu::ModelDescription submodel("plan_movement"); // Submodels require an exit condition function, so they do not run forever submodel.addExitCondition(movement_resolved); { - // Define the submodel environemnt + // Define the submodel environment flamegpu::EnvironmentDescription env = submodel.Environment(); env.newProperty("spaces_available", 0); From bff754b69937d2c1c48f6915770efe2d5047c05c Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 27 Nov 2023 13:14:54 +0000 Subject: [PATCH 07/20] Correct schelling path in pyflamegpu benchmark scripts. --- pyflamegpu-agentpy/benchmark.py | 2 +- pyflamegpu/benchmark.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyflamegpu-agentpy/benchmark.py b/pyflamegpu-agentpy/benchmark.py index 817478a5..f20ea671 100644 --- a/pyflamegpu-agentpy/benchmark.py +++ b/pyflamegpu-agentpy/benchmark.py @@ -94,7 +94,7 @@ def extract_times(lines): print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) # Benchmark Schelling -schelling_model_path = SCRIPT_PATH / f"{BUILD_DIR}/bin/{CONFIG}/schelling" +schelling_model_path = SCRIPT_PATH / "src/schelling.py" if schelling_model_path.is_file(): pre_pop_times = [] pop_gen_times = [] diff --git a/pyflamegpu/benchmark.py b/pyflamegpu/benchmark.py index 817478a5..f20ea671 100644 --- a/pyflamegpu/benchmark.py +++ b/pyflamegpu/benchmark.py @@ -94,7 +94,7 @@ def extract_times(lines): print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) # Benchmark Schelling -schelling_model_path = SCRIPT_PATH / f"{BUILD_DIR}/bin/{CONFIG}/schelling" +schelling_model_path = SCRIPT_PATH / "src/schelling.py" if schelling_model_path.is_file(): pre_pop_times = [] pop_gen_times = [] From 96007b79d5c53706291c0be9860dd11bf69acd8a Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Thu, 30 Nov 2023 10:53:37 +0000 Subject: [PATCH 08/20] Draft repast4py flocking model Runs to completion on Waimu, but need to decide how to validate it. --- pyflamegpu-agentpy/src/boids2D.py | 23 +-- pyflamegpu/src/boids2D.py | 15 +- repast4py/src/flocking/flocking.py | 291 +++++++++++++++++++++++++++++ repast4py/src/flocking/params.yaml | 3 + 4 files changed, 311 insertions(+), 21 deletions(-) create mode 100644 repast4py/src/flocking/flocking.py create mode 100644 repast4py/src/flocking/params.yaml diff --git a/pyflamegpu-agentpy/src/boids2D.py b/pyflamegpu-agentpy/src/boids2D.py index b0d17492..bc5529a6 100644 --- a/pyflamegpu-agentpy/src/boids2D.py +++ b/pyflamegpu-agentpy/src/boids2D.py @@ -99,20 +99,14 @@ def inputdata(message_in: pyflamegpu.MessageSpatial2D, message_out: pyflamegpu.M global_velocity_y /= perceived_count # Rule 1) Steer towards perceived centre of flock (Cohesion) - steer_velocity_x = 0.0 - steer_velocity_y = 0.0 - STEER_SCALE = pyflamegpu.environment.getPropertyFloat("STEER_SCALE") steer_velocity_x = (perceived_centre_x - agent_x) * STEER_SCALE steer_velocity_y = (perceived_centre_y - agent_y) * STEER_SCALE - velocity_change_x += steer_velocity_x; - velocity_change_y += steer_velocity_y; + velocity_change_x += steer_velocity_x + velocity_change_y += steer_velocity_y # Rule 2) Match neighbours speeds (Alignment) - match_velocity_x = 0.0 - match_velocity_y = 0.0 - MATCH_SCALE = pyflamegpu.environment.getPropertyFloat("MATCH_SCALE") match_velocity_x = global_velocity_x * MATCH_SCALE match_velocity_y = global_velocity_y * MATCH_SCALE @@ -211,7 +205,7 @@ def outputdata(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.Messa # Global Scalers env.newPropertyFloat("TIME_SCALE", 1.0) # 1.0 for benchmarking to behave the same as the other simulators. -env.newPropertyFloat("GLOBAL_SCALE", 0.5) # 1.0 for comparing to other benchmarks +env.newPropertyFloat("GLOBAL_SCALE",1.0) # 1.0 for comparing to other benchmarks # Rule scalers env.newPropertyFloat("STEER_SCALE", 0.03) # cohere scale? 0.03 @@ -290,7 +284,7 @@ def outputdata(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.Messa # Initialisation simulator.initialise(sys.argv) prePopulationTimer_stop = time.monotonic() -print("pre population (s): %.6f\n"%(prePopulationTimer_stop - prePopulationTimer_start)) +print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) populationGenerationTimer_start = time.monotonic() @@ -322,8 +316,9 @@ def outputdata(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.Messa fx = rng.uniform(-1, 1) fy = rng.uniform(-1, 1) # Use the random speed for the velocity. - fx /= np.linalg.norm([fx, fy]) - fy /= np.linalg.norm([fx, fy]) + fxy_len = np.linalg.norm([fx, fy]) + fx /= fxy_len + fy /= fxy_len fx *= fmagnitude fy *= fmagnitude @@ -339,11 +334,11 @@ def outputdata(message_in: pyflamegpu.MessageNone, message_out: pyflamegpu.Messa # Execute the simulation simulator.simulate() -# Print the exeuction time to stdout +# Print the execution time to stdout print("simulate (s): %.6f"%simulator.getElapsedTimeSimulation()) print("rtc (s): %.6f"%simulator.getElapsedTimeRTCInitialisation()) -# Join the visualsition if required +# Join the visualisation if required if pyflamegpu.VISUALISATION: visualisation.join() diff --git a/pyflamegpu/src/boids2D.py b/pyflamegpu/src/boids2D.py index d5b29b0f..24e35fc5 100644 --- a/pyflamegpu/src/boids2D.py +++ b/pyflamegpu/src/boids2D.py @@ -227,7 +227,7 @@ agent_x += agent_fx * TIME_SCALE; agent_y += agent_fy * TIME_SCALE; - // Wramp position + // Wrap position const float MIN_POSITION = FLAMEGPU->environment.getProperty("MIN_POSITION"); const float MAX_POSITION = FLAMEGPU->environment.getProperty("MAX_POSITION"); wrapPosition(agent_x, agent_y, MIN_POSITION, MAX_POSITION); @@ -278,7 +278,7 @@ # Global Scalers env.newPropertyFloat("TIME_SCALE", 1.0) # 1.0 for benchmarking to behave the same as the other simulators. -env.newPropertyFloat("GLOBAL_SCALE", 1.0) # 1.0 for comparing to other benchamrks +env.newPropertyFloat("GLOBAL_SCALE", 1.0) # 1.0 for comparing to other benchmarks # Rule scalers env.newPropertyFloat("STEER_SCALE", 0.03) # cohere scale? 0.03 @@ -357,7 +357,7 @@ # Initialisation simulator.initialise(sys.argv) prePopulationTimer_stop = time.monotonic() -print("pre population (s): %.6f\n"%(prePopulationTimer_stop - prePopulationTimer_start)) +print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) populationGenerationTimer_start = time.monotonic() @@ -389,8 +389,9 @@ fx = rng.uniform(-1, 1) fy = rng.uniform(-1, 1) # Use the random speed for the velocity. - fx /= np.linalg.norm([fx, fy]) - fy /= np.linalg.norm([fx, fy]) + fxy_len = np.linalg.norm([fx, fy]) + fx /= fxy_len + fy /= fxy_len fx *= fmagnitude fy *= fmagnitude @@ -406,11 +407,11 @@ # Execute the simulation simulator.simulate() -# Print the exeuction time to stdout +# Print the execution time to stdout print("simulate (s): %.6f"%simulator.getElapsedTimeSimulation()) print("rtc (s): %.6f"%simulator.getElapsedTimeRTCInitialisation()) -# Join the visualsition if required +# Join the visualisation if required if pyflamegpu.VISUALISATION: visualisation.join() diff --git a/repast4py/src/flocking/flocking.py b/repast4py/src/flocking/flocking.py new file mode 100644 index 00000000..8658860f --- /dev/null +++ b/repast4py/src/flocking/flocking.py @@ -0,0 +1,291 @@ +from repast4py import core, space, schedule, logging, random +from repast4py.space import ContinuousPoint as cpt +from repast4py.space import DiscretePoint as dpt +from repast4py.space import BorderType, OccupancyType +from repast4py.geometry import find_2d_nghs_periodic + +from repast4py import context as ctx +from repast4py.parameters import create_args_parser, init_params + +import numpy as np +from typing import Final, Dict, Tuple +from mpi4py import MPI + +import math, sys, csv, time + + +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() + +# Environment Bounds +MIN_POSITION: Final = 0.0 +MAX_POSITION: Final = 400.0 + +# Initialisation parameter(s) +INITIAL_SPEED: Final = 1.0 + +# Interaction radius +INTERACTION_RADIUS: Final = 5.0 +SEPARATION_RADIUS: Final = 1.0 + +# Global Scalers +TIME_SCALE: Final = 1.0 # 1.0 for benchmarking to behave the same as the other simulators. +GLOBAL_SCALE: Final = 1.0 # 1.0 for comparing to other benchmarks + +# Rule scalers +STEER_SCALE: Final = 0.03 # cohere scale? 0.03 +COLLISION_SCALE: Final = 0.015 # separate_scale? 0.015 +MATCH_SCALE: Final = 0.05 # match 0.05 + +class Boid(core.Agent): + + TYPE = 0 + + def __init__(self, a_id, rank): + super().__init__(id=a_id, type=Boid.TYPE, rank=rank) + self.fxy = np.array((0.0, 0.0)) + + def save(self) -> Tuple: + """Saves the state of this Boid as a Tuple. + + Used to move this Boid from one MPI rank to another. + + Returns: + The saved state of this Boid. + """ + return (self.uid, self.fxy) + + def step(self): + grid = model.grid + pt = grid.get_location(self) + nghs = np.transpose(find_2d_nghs_periodic(np.array((pt.x, pt.y)), model.grid_box)) + + # Agent position + agent_xy = model.space.get_location(self) + agent_xy = np.array((agent_xy.x, agent_xy.y)) + + # Boids perceived center + perceived_centre_xy = np.array((0.0, 0.0)) + perceived_count = 0 + + # Boids global velocity matching + global_velocity_xy = np.array((0.0, 0.0)) + + # Total change in velocity + velocity_change_xy = np.array((0.0, 0.0)) + + at = dpt(0, 0) + maximum = [[], -(sys.maxsize - 1)] + # Iterate moore neighbourhood of grid + for ngh in nghs: + at = dpt(ngh[0], ngh[1]) # at._reset_from_array(ngh) does not work correctly??? + # Iterate agents within current grid cell + for obj in grid.get_agents(at): + # Ignore self messages. + if obj.uid[0] != self.uid[0]: + # Get the message location + message_xy = model.space.get_location(obj) + message_xy = np.array((message_xy.x, message_xy.y)) + + # Convert message location to virtual coordinates to account for wrapping + xy21 = message_xy - agent_xy; + message_xy = np.where(abs(xy21) > MAX_POSITION / 2, message_xy - (xy21 / abs(xy21) * MAX_POSITION), message_xy) + + # Check interaction radius + separation = np.linalg.norm(agent_xy - message_xy) + + if separation < INTERACTION_RADIUS: + # Update the perceived centre + perceived_centre_xy += message_xy + perceived_count+=1 + + # Update perceived velocity matching + global_velocity_xy += obj.fxy; + + # Update collision centre + if separation < SEPARATION_RADIUS: # dependant on model size + # Rule 3) Avoid other nearby boids (Separation) + normalizedSeparation = (separation / SEPARATION_RADIUS) + invNormSep = (1.0 - normalizedSeparation) + invSqSep = invNormSep * invNormSep + + velocity_change_xy += COLLISION_SCALE * (agent_xy - message_xy) * invSqSep + + if perceived_count: + # Divide positions/velocities by relevant counts. + perceived_centre_xy /= perceived_count + global_velocity_xy /= perceived_count + + # Rule 1) Steer towards perceived centre of flock (Cohesion) + steer_velocity_xy = (perceived_centre_xy - agent_xy) * STEER_SCALE + + velocity_change_xy += steer_velocity_xy + + # Rule 2) Match neighbours speeds (Alignment) + match_velocity_xy = global_velocity_xy * MATCH_SCALE + + velocity_change_xy += match_velocity_xy - self.fxy + + # Global scale of velocity change + velocity_change_xy *= GLOBAL_SCALE + + # Update agent velocity + self.fxy += velocity_change_xy + + # Bound velocity + if not (self.fxy[0] == 0 and self.fxy[1] == 0): + agent_fscale = np.linalg.norm(self.fxy) + if agent_fscale > 1: + self.fxy /= agent_fscale + + minSpeed = 0.5 + if agent_fscale < minSpeed: + # Normalise + self.fxy /= agent_fscale + + # Scale to min + self.fxy *= minSpeed + + + # Apply the velocity + agent_xy += self.fxy * TIME_SCALE + + # Wrap position + width = MAX_POSITION-MIN_POSITION + for i in range(len(agent_xy)): + if agent_xy[i] < MIN_POSITION : + agent_xy[i] += width + elif agent_xy[i] > MAX_POSITION : + agent_xy[i] -= width + + # Move agent + model.move(self, agent_xy[0], agent_xy[1]) + + + +agent_cache = {} + + +def restore_agent(agent_data: Tuple): + """Creates an agent from the specified agent_data. + + This is used to re-create agents when they have moved from one MPI rank to another. + The tuple returned by the agent's save() method is moved between ranks, and restore_agent + is called for each tuple in order to create the agent on that rank. Here we also use + a cache to cache any agents already created on this rank, and only update their state + rather than creating from scratch. + + Args: + agent_data: the data to create the agent from. This is the tuple returned from the agent's save() method + where the first element is the agent id tuple, and any remaining arguments encapsulate + agent state. + """ + uid = agent_data[0] + if uid in agent_cache: + h = agent_cache[uid] + else: + h = Boid(uid[0], uid[2]) + agent_cache[uid] = h + + # restore the agent state from the agent_data tuple + h.fx = agent_data[1] + return h + + +class Model: + + def __init__(self, comm, params): + self.comm = comm + self.context = ctx.SharedContext(comm) + self.rank = self.comm.Get_rank() + self.csv_log = params['csv.log'] + + self.runner = schedule.init_schedule_runner(comm) + self.runner.schedule_repeating_event(1, 1, self.step) + self.runner.schedule_stop(params['stop.at']) + self.runner.schedule_end_event(self.at_end) + + grid_box = space.BoundingBox(int(MIN_POSITION), int(math.floor(MAX_POSITION/INTERACTION_RADIUS)), int(MIN_POSITION), int(math.floor(MAX_POSITION/INTERACTION_RADIUS)), 0, 0) + box = space.BoundingBox(int(MIN_POSITION), int(MAX_POSITION), int(MIN_POSITION), int(MAX_POSITION), 0, 0) + self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, + buffer_size=2, comm=comm) + self.grid_box = np.array((grid_box.xmin, grid_box.xmin + grid_box.xextent - 1, grid_box.ymin, grid_box.ymin + grid_box.yextent - 1)) + self.context.add_projection(self.grid) + self.space = space.SharedCSpace('space', bounds=box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, + buffer_size=int(math.ceil(INTERACTION_RADIUS)), comm=comm, tree_threshold=100) + self.context.add_projection(self.space) + + prePopulationTimer_stop = time.monotonic() + print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + + populationGenerationTimer_start = time.monotonic() + + local_bounds = self.space.get_local_bounds() + for i in range(int(params['boid.count'])): + h = Boid(i, self.rank) + self.context.add(h) + x = random.default_rng.uniform(local_bounds.xmin, local_bounds.xmin + local_bounds.xextent) + y = random.default_rng.uniform(local_bounds.ymin, local_bounds.ymin + local_bounds.yextent) + self.move(h, x, y) + fxy = np.array((random.default_rng.uniform(-1, 1), random.default_rng.uniform(-1, 1))) + h.fxy = INITIAL_SPEED * fxy / np.linalg.norm(fxy) + + populationGenerationTimer_stop = time.monotonic() + print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + + + def at_end(self): + if self.csv_log: + # Log final agent positions to file + with open(self.csv_log, 'w', newline='') as file: + writer = csv.writer(file) + writer.writerow(['"x"', '"y"', '"fx"', '"fy"']) + for b in self.context.agents(Boid.TYPE): + agent_xy = self.space.get_location(b) + writer.writerow([agent_xy.x, agent_xy.y, b.fxy[0], b.fxy[1]]) + + def move(self, agent, x, y): + # timer.start_timer('space_move') + self.space.move(agent, cpt(x, y)) + # timer.stop_timer('space_move') + # timer.start_timer('grid_move') + self.grid.move(agent, dpt(int(math.floor(x/INTERACTION_RADIUS)), int(math.floor(y/INTERACTION_RADIUS)))) + # timer.stop_timer('grid_move') + + def step(self): + # print("{}: {}".format(self.rank, len(self.context.local_agents))) + tick = self.runner.schedule.tick + self.context.synchronize(restore_agent) + + # timer.start_timer('b_step') + for b in self.context.agents(Boid.TYPE): + b.step() + # timer.stop_timer('b_step') + + def run(self): + simulateTimer_start = time.monotonic() + self.runner.execute() + simulateTimer_stop = time.monotonic() + print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) + + def remove_agent(self, agent): + self.context.remove(agent) + +def run(params: Dict): + """Creates and runs the Zombies Model. + + Args: + params: the model input parameters + """ + global model + model = Model(MPI.COMM_WORLD, params) + model.run() + + +if __name__ == "__main__": + parser = create_args_parser() + args = parser.parse_args() + params = init_params(args.parameters_file, args.parameters) + run(params) + mainTimer_stop = time.monotonic() + print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) diff --git a/repast4py/src/flocking/params.yaml b/repast4py/src/flocking/params.yaml new file mode 100644 index 00000000..d904e8df --- /dev/null +++ b/repast4py/src/flocking/params.yaml @@ -0,0 +1,3 @@ +stop.at: 10.0 +boid.count: 80000 +csv.log: "testwrap.csv" From e1072c30a7833af2a1351c7b76c321cd9d45f290 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 4 Dec 2023 11:22:04 +0000 Subject: [PATCH 09/20] Schelling draft +minor tweaks to pyfgpu boids --- repast4py/src/flocking/flocking.py | 3 +- repast4py/src/schelling/params.yaml | 4 + repast4py/src/schelling/schelling.py | 272 +++++++++++++++++++++++++++ 3 files changed, 277 insertions(+), 2 deletions(-) create mode 100644 repast4py/src/schelling/params.yaml create mode 100644 repast4py/src/schelling/schelling.py diff --git a/repast4py/src/flocking/flocking.py b/repast4py/src/flocking/flocking.py index 8658860f..199a241f 100644 --- a/repast4py/src/flocking/flocking.py +++ b/repast4py/src/flocking/flocking.py @@ -74,7 +74,6 @@ def step(self): # Total change in velocity velocity_change_xy = np.array((0.0, 0.0)) - at = dpt(0, 0) maximum = [[], -(sys.maxsize - 1)] # Iterate moore neighbourhood of grid for ngh in nghs: @@ -272,7 +271,7 @@ def remove_agent(self, agent): self.context.remove(agent) def run(params: Dict): - """Creates and runs the Zombies Model. + """Creates and runs the Flocking Model. Args: params: the model input parameters diff --git a/repast4py/src/schelling/params.yaml b/repast4py/src/schelling/params.yaml new file mode 100644 index 00000000..eb314e48 --- /dev/null +++ b/repast4py/src/schelling/params.yaml @@ -0,0 +1,4 @@ +stop.at: 10.0 +grid.width: 500 +population.count: 200000 +csv.log: "testwrap.csv" diff --git a/repast4py/src/schelling/schelling.py b/repast4py/src/schelling/schelling.py new file mode 100644 index 00000000..361f8837 --- /dev/null +++ b/repast4py/src/schelling/schelling.py @@ -0,0 +1,272 @@ +from repast4py import core, space, schedule, logging, random +from repast4py.space import ContinuousPoint as cpt +from repast4py.space import DiscretePoint as dpt +from repast4py.space import BorderType, OccupancyType +from repast4py.geometry import find_2d_nghs_periodic + +from repast4py import context as ctx +from repast4py.parameters import create_args_parser, init_params + +import numpy as np +from typing import Final, Dict, Tuple +from mpi4py import MPI + +import math, sys, csv, time + +from functools import reduce + +mainTimer_start = time.monotonic() +prePopulationTimer_start = time.monotonic() + +# Configurable properties +THRESHOLD: Final = 0.375 # 0.375 == 3/8s to match integer models + +A: Final = 0 +B: Final = 1 +UNOCCUPIED: Final = 2 + + +class Agent(core.Agent): + + TYPE = 0 + + def __init__(self, a_id, rank): + super().__init__(id=a_id, type=Agent.TYPE, rank=rank) + self.current_type = UNOCCUPIED + self.next_type = UNOCCUPIED + self.happy = False + self.available = False + self.movement_resolved = False + + def save(self) -> Tuple: + """Saves the state of this Boid as a Tuple. + + Used to move this Boid from one MPI rank to another. + + Returns: + The saved state of this Boid. + """ + return (self.uid, self.current_type, self.next_type, self.happy, self.available, self.movement_resolved) + + def determineStatus(self): + grid = model.grid + pt = grid.get_location(self) + nghs = np.transpose(find_2d_nghs_periodic(np.array((pt.x, pt.y)), model.grid_box)) + + same_type_neighbours = 0 + diff_type_neighbours = 0 + + # Iterate 3x3 Moore neighbourhood (but skip ourself) + for ngh in nghs: + at = dpt(ngh[0], ngh[1]) # at._reset_from_array(ngh) does not work correctly??? + if pt == at: + continue + # Iterate agents within current grid cell + for obj in grid.get_agents(at): + same_type_neighbours += int(self.current_type == obj.current_type) + diff_type_neighbours += int(self.current_type != obj.current_type and obj.current_type != UNOCCUPIED) + + self.isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD if same_type_neighbours else False + self.next_type = self.current_type if ((self.current_type != UNOCCUPIED) and self.isHappy) else UNOCCUPIED + self.movement_resolved = (self.current_type == UNOCCUPIED) or self.isHappy + self.available = (self.current_type == UNOCCUPIED) or not self.isHappy + + def bid(self, available_locations): + if self.movement_resolved: + return + + # Select a random location + selected_location = random.default_rng.choice(available_locations) + + # Bid for that location + bid_grid = model.bidding_grid + bid_grid.add(self) + bid_grid.move(self, selected_location) + + def selectWinner(self): + grid = model.grid + pt = grid.get_location(self) + + bid_grid = model.bidding_grid + bid_agents = [x for x in bid_grid.get_agents(pt)] + # Random agent in the bucket wins + if len(bid_agents): + winner = random.default_rng.choice(bid_agents) + self.next_type = winner.current_type # @todo: Does winner need to be a ghost agent? + self.available = False + # Remove losers from the bidding process + for ba in bid_agents: + if ba != winner: + bid_grid.remove(ba) + + def hasMoved(self): + bid_grid = model.bidding_grid + # Check if I won, update self and remove winning bid + if bid_grid.contains(self): + self.movement_resolved = True + bid_grid.remove(self) + +agent_cache = {} + +def restore_agent(agent_data: Tuple): + """Creates an agent from the specified agent_data. + + This is used to re-create agents when they have moved from one MPI rank to another. + The tuple returned by the agent's save() method is moved between ranks, and restore_agent + is called for each tuple in order to create the agent on that rank. Here we also use + a cache to cache any agents already created on this rank, and only update their state + rather than creating from scratch. + + Args: + agent_data: the data to create the agent from. This is the tuple returned from the agent's save() method + where the first element is the agent id tuple, and any remaining arguments encapsulate + agent state. + """ + uid = agent_data[0] + if uid in agent_cache: + h = agent_cache[uid] + else: + h = Agent(uid[0], uid[2]) + agent_cache[uid] = h + + # restore the agent state from the agent_data tuple + h.current_type = agent_data[1] + h.next_type = agent_data[2] + h.happy = agent_data[3] + h.available = agent_data[4] + h.movement_resolved = agent_data[5] + return h + +class Model: + + def __init__(self, comm, params): + self.comm = comm + self.context = ctx.SharedContext(comm) + self.rank = self.comm.Get_rank() + self.csv_log = params['csv.log'] + + self.runner = schedule.init_schedule_runner(comm) + self.runner.schedule_repeating_event(1, 1, self.step) + self.runner.schedule_stop(params['stop.at']) + self.runner.schedule_end_event(self.at_end) + + GRID_WIDTH = int(params['grid.width']) + grid_box = space.BoundingBox(int(0), int(GRID_WIDTH), int(0), int(GRID_WIDTH), 0, 0) + self.grid_box = np.array((grid_box.xmin, grid_box.xmin + grid_box.xextent - 1, grid_box.ymin, grid_box.ymin + grid_box.yextent - 1)) + self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Single, buffer_size=2, comm=comm) + self.bidding_grid = space.SharedGrid('bidding grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, buffer_size=1, comm=comm) + + self.context.add_projection(self.grid) + self.context.add_projection(self.bidding_grid) + + + prePopulationTimer_stop = time.monotonic() + print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + + populationGenerationTimer_start = time.monotonic() + + # Calculate the total number of cells + CELL_COUNT = GRID_WIDTH * GRID_WIDTH + POPULATED_COUNT = int(params['population.count']) + # Select POPULATED_COUNT unique random integers in the range [0, CELL_COUNT), by shuffling the iota. + shuffledIota = [i for i in range(CELL_COUNT)] + random.default_rng.shuffle(shuffledIota) + for elementIdx in range(CELL_COUNT): + # Create agent + h = Agent(elementIdx, self.rank) + self.context.add(h) + # Setup it's location + idx = shuffledIota[elementIdx] + x = int(idx % GRID_WIDTH) + y = int(idx / GRID_WIDTH) + self.move(h, x, y) + # If the elementIDX is below the populated count, generated a populated type, otherwise it defaults unoccupied + if elementIdx < POPULATED_COUNT: + h.current_type = A if random.default_rng.uniform() < 0.5 else B + + populationGenerationTimer_stop = time.monotonic() + print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + + + def at_end(self): + if self.csv_log: + # Log final agent positions to file + with open(self.csv_log, 'w', newline='') as file: + writer = csv.writer(file) + writer.writerow(['"x"', '"y"', '"fx"', '"fy"']) + for b in self.context.agents(Agent.TYPE): + agent_xy = self.grid.get_location(b) + t = b.current_type + writer.writerow([agent_xy.x, agent_xy.y, int(t==A or t==UNOCCUPIED), int(t==B or t==UNOCCUPIED)]) + + def move(self, agent, x, y): + # timer.start_timer('grid_move') + self.grid.move(agent, dpt(x, y)) + # timer.stop_timer('grid_move') + + def step(self): + # print("{}: {}".format(self.rank, len(self.context.local_agents))) + tick = self.runner.schedule.tick + self.context.synchronize(restore_agent) + + # timer.start_timer('b_step') + for a in self.context.agents(Agent.TYPE): + a.determineStatus() + # Plan Movement Submodel + # Break from submodel once all agents have resolved their movement + while True: + self.context.synchronize(restore_agent) + # Available agents output their location + available_spaces = [self.grid.get_location(a) for a in self.context.agents(Agent.TYPE) if a.available] + + # Unhappy agents bid for a new location + for a in self.context.agents(Agent.TYPE): + a.bid(available_spaces) + + self.context.synchronize(restore_agent) + # Available locations check if anyone wants to move to them. If so, approve one and mark as unavailable + # Update next type to the type of the mover + for a in self.context.agents(Agent.TYPE): + a.selectWinner() + + self.context.synchronize(restore_agent) + # Successful movers mark themselves as resolved + for a in self.context.agents(Agent.TYPE): + a.hasMoved() + + self.context.synchronize(restore_agent) + if not any([not a.movement_resolved for a in self.context.agents(Agent.TYPE)]): + break + + self.context.synchronize(restore_agent) + for a in self.context.agents(Agent.TYPE): + a.current_type = a.next_type + # timer.stop_timer('b_step') + + def run(self): + simulateTimer_start = time.monotonic() + self.runner.execute() + simulateTimer_stop = time.monotonic() + print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) + + def remove_agent(self, agent): + self.context.remove(agent) + +def run(params: Dict): + """Creates and runs the Schelling Model. + + Args: + params: the model input parameters + """ + global model + model = Model(MPI.COMM_WORLD, params) + model.run() + + +if __name__ == "__main__": + parser = create_args_parser() + args = parser.parse_args() + params = init_params(args.parameters_file, args.parameters) + run(params) + mainTimer_stop = time.monotonic() + print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) From 028081185e57b0bba6e3d9733fa387db6d04eaec Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 4 Dec 2023 16:57:43 +0000 Subject: [PATCH 10/20] Fixes so that flocking.py runs on mpi Remove space and store xy pos as an agent variable Init and print timing info only at rank 0 Write output csv to split log file --- repast4py/src/flocking/flocking.py | 65 +++++++++++++++--------------- 1 file changed, 32 insertions(+), 33 deletions(-) diff --git a/repast4py/src/flocking/flocking.py b/repast4py/src/flocking/flocking.py index 199a241f..c7b8d997 100644 --- a/repast4py/src/flocking/flocking.py +++ b/repast4py/src/flocking/flocking.py @@ -1,5 +1,4 @@ from repast4py import core, space, schedule, logging, random -from repast4py.space import ContinuousPoint as cpt from repast4py.space import DiscretePoint as dpt from repast4py.space import BorderType, OccupancyType from repast4py.geometry import find_2d_nghs_periodic @@ -43,6 +42,7 @@ class Boid(core.Agent): def __init__(self, a_id, rank): super().__init__(id=a_id, type=Boid.TYPE, rank=rank) + self.xy = np.array((0.0, 0.0)) self.fxy = np.array((0.0, 0.0)) def save(self) -> Tuple: @@ -53,7 +53,7 @@ def save(self) -> Tuple: Returns: The saved state of this Boid. """ - return (self.uid, self.fxy) + return (self.uid, self.xy, self.fxy) def step(self): grid = model.grid @@ -61,8 +61,7 @@ def step(self): nghs = np.transpose(find_2d_nghs_periodic(np.array((pt.x, pt.y)), model.grid_box)) # Agent position - agent_xy = model.space.get_location(self) - agent_xy = np.array((agent_xy.x, agent_xy.y)) + agent_xy = self.xy # Boids perceived center perceived_centre_xy = np.array((0.0, 0.0)) @@ -83,8 +82,7 @@ def step(self): # Ignore self messages. if obj.uid[0] != self.uid[0]: # Get the message location - message_xy = model.space.get_location(obj) - message_xy = np.array((message_xy.x, message_xy.y)) + message_xy = obj.xy # Convert message location to virtual coordinates to account for wrapping xy21 = message_xy - agent_xy; @@ -158,7 +156,7 @@ def step(self): agent_xy[i] -= width # Move agent - model.move(self, agent_xy[0], agent_xy[1]) + model.move(self, agent_xy) @@ -187,7 +185,8 @@ def restore_agent(agent_data: Tuple): agent_cache[uid] = h # restore the agent state from the agent_data tuple - h.fx = agent_data[1] + h.xy = agent_data[1] + h.fxy = agent_data[2] return h @@ -206,49 +205,47 @@ def __init__(self, comm, params): grid_box = space.BoundingBox(int(MIN_POSITION), int(math.floor(MAX_POSITION/INTERACTION_RADIUS)), int(MIN_POSITION), int(math.floor(MAX_POSITION/INTERACTION_RADIUS)), 0, 0) box = space.BoundingBox(int(MIN_POSITION), int(MAX_POSITION), int(MIN_POSITION), int(MAX_POSITION), 0, 0) - self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, + self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Periodic, occupancy=OccupancyType.Multiple, buffer_size=2, comm=comm) self.grid_box = np.array((grid_box.xmin, grid_box.xmin + grid_box.xextent - 1, grid_box.ymin, grid_box.ymin + grid_box.yextent - 1)) self.context.add_projection(self.grid) - self.space = space.SharedCSpace('space', bounds=box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, - buffer_size=int(math.ceil(INTERACTION_RADIUS)), comm=comm, tree_threshold=100) - self.context.add_projection(self.space) prePopulationTimer_stop = time.monotonic() - print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + if self.rank == 0: + print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) populationGenerationTimer_start = time.monotonic() - local_bounds = self.space.get_local_bounds() - for i in range(int(params['boid.count'])): - h = Boid(i, self.rank) - self.context.add(h) - x = random.default_rng.uniform(local_bounds.xmin, local_bounds.xmin + local_bounds.xextent) - y = random.default_rng.uniform(local_bounds.ymin, local_bounds.ymin + local_bounds.yextent) - self.move(h, x, y) - fxy = np.array((random.default_rng.uniform(-1, 1), random.default_rng.uniform(-1, 1))) - h.fxy = INITIAL_SPEED * fxy / np.linalg.norm(fxy) + # Only rank zero generates agents, for simplicity/to avoid RNG conflict + if self.rank == 0: + for i in range(int(params['boid.count'])): + h = Boid(i, self.rank) + self.context.add(h) + x = random.default_rng.uniform(MIN_POSITION, MAX_POSITION) + y = random.default_rng.uniform(MIN_POSITION, MAX_POSITION) + self.move(h, np.array((x, y))) + fxy = np.array((random.default_rng.uniform(-1, 1), random.default_rng.uniform(-1, 1))) + h.fxy = INITIAL_SPEED * fxy / np.linalg.norm(fxy) populationGenerationTimer_stop = time.monotonic() - print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + if self.rank == 0: + print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) def at_end(self): if self.csv_log: # Log final agent positions to file + self.csv_log = self.csv_log.replace(".csv", "%d.csv"%(self.comm.rank)) with open(self.csv_log, 'w', newline='') as file: writer = csv.writer(file) writer.writerow(['"x"', '"y"', '"fx"', '"fy"']) for b in self.context.agents(Boid.TYPE): - agent_xy = self.space.get_location(b) - writer.writerow([agent_xy.x, agent_xy.y, b.fxy[0], b.fxy[1]]) + writer.writerow([b.xy[0], b.xy[1], b.fxy[0], b.fxy[1]]) - def move(self, agent, x, y): - # timer.start_timer('space_move') - self.space.move(agent, cpt(x, y)) - # timer.stop_timer('space_move') + def move(self, agent, xy): + agent.xy = xy # timer.start_timer('grid_move') - self.grid.move(agent, dpt(int(math.floor(x/INTERACTION_RADIUS)), int(math.floor(y/INTERACTION_RADIUS)))) + self.grid.move(agent, dpt(int(math.floor(xy[0]/INTERACTION_RADIUS)), int(math.floor(xy[1]/INTERACTION_RADIUS)))) # timer.stop_timer('grid_move') def step(self): @@ -265,7 +262,8 @@ def run(self): simulateTimer_start = time.monotonic() self.runner.execute() simulateTimer_stop = time.monotonic() - print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) + if self.rank == 0: + print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) def remove_agent(self, agent): self.context.remove(agent) @@ -279,6 +277,9 @@ def run(params: Dict): global model model = Model(MPI.COMM_WORLD, params) model.run() + mainTimer_stop = time.monotonic() + if model.rank == 0: + print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) if __name__ == "__main__": @@ -286,5 +287,3 @@ def run(params: Dict): args = parser.parse_args() params = init_params(args.parameters_file, args.parameters) run(params) - mainTimer_stop = time.monotonic() - print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) From 91da3b82a9b13f4f0ed7aaf5ce6f5871b62f0a05 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 4 Dec 2023 17:02:16 +0000 Subject: [PATCH 11/20] Rewrite schelling to support MPI --- repast4py/src/schelling/schelling.py | 258 ++++++++++++++++----------- 1 file changed, 154 insertions(+), 104 deletions(-) diff --git a/repast4py/src/schelling/schelling.py b/repast4py/src/schelling/schelling.py index 361f8837..7bed63b5 100644 --- a/repast4py/src/schelling/schelling.py +++ b/repast4py/src/schelling/schelling.py @@ -11,7 +11,7 @@ from typing import Final, Dict, Tuple from mpi4py import MPI -import math, sys, csv, time +import math, sys, csv, time, itertools from functools import reduce @@ -25,18 +25,25 @@ B: Final = 1 UNOCCUPIED: Final = 2 +def dp_to_array(dp): + if dp is not None: + return np.array((dp.x, dp.y)) + return None + +def array_to_dp(arr): + if arr is not None: + return dpt(arr[0], arr[1]) + return None -class Agent(core.Agent): + +class Cell(core.Agent): TYPE = 0 def __init__(self, a_id, rank): - super().__init__(id=a_id, type=Agent.TYPE, rank=rank) + super().__init__(id=a_id, type=Cell.TYPE, rank=rank) self.current_type = UNOCCUPIED - self.next_type = UNOCCUPIED - self.happy = False - self.available = False - self.movement_resolved = False + self.winner = None def save(self) -> Tuple: """Saves the state of this Boid as a Tuple. @@ -46,11 +53,22 @@ def save(self) -> Tuple: Returns: The saved state of this Boid. """ - return (self.uid, self.current_type, self.next_type, self.happy, self.available, self.movement_resolved) + return (self.uid, self.current_type, self.winner) + + + def update(self, data: Tuple): + """Updates the state of this agent when it is a ghost + agent on a rank other than its local one. + + Args: + data: the new agent state + """ + # The example only updates if there is a change, is there a performance reason for doing this? + self.current_type = data[1] + self.winner = data[2] def determineStatus(self): - grid = model.grid - pt = grid.get_location(self) + pt = model.grid.get_location(self) nghs = np.transpose(find_2d_nghs_periodic(np.array((pt.x, pt.y)), model.grid_box)) same_type_neighbours = 0 @@ -58,53 +76,74 @@ def determineStatus(self): # Iterate 3x3 Moore neighbourhood (but skip ourself) for ngh in nghs: - at = dpt(ngh[0], ngh[1]) # at._reset_from_array(ngh) does not work correctly??? + at = array_to_dp(ngh) # at._reset_from_array(ngh) does not work correctly??? if pt == at: continue - # Iterate agents within current grid cell - for obj in grid.get_agents(at): - same_type_neighbours += int(self.current_type == obj.current_type) - diff_type_neighbours += int(self.current_type != obj.current_type and obj.current_type != UNOCCUPIED) + # Iterate cell agents within current grid cell + for obj in model.grid.get_agents(at): + if obj.uid[1] == Cell.TYPE: + same_type_neighbours += int(self.current_type == obj.current_type) + diff_type_neighbours += int(self.current_type != obj.current_type and obj.current_type != UNOCCUPIED) - self.isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD if same_type_neighbours else False - self.next_type = self.current_type if ((self.current_type != UNOCCUPIED) and self.isHappy) else UNOCCUPIED - self.movement_resolved = (self.current_type == UNOCCUPIED) or self.isHappy - self.available = (self.current_type == UNOCCUPIED) or not self.isHappy - - def bid(self, available_locations): - if self.movement_resolved: - return - - # Select a random location - selected_location = random.default_rng.choice(available_locations) - - # Bid for that location - bid_grid = model.bidding_grid - bid_grid.add(self) - bid_grid.move(self, selected_location) + isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD if same_type_neighbours else False + if not isHappy and self.winner is not None: + old_winner = model.context.agent(self.winner) + old_winner.movement_resolved = False + self.winner = None + self.current_type = UNOCCUPIED def selectWinner(self): - grid = model.grid - pt = grid.get_location(self) + if self.winner is not None: + return + pt = model.grid.get_location(self) - bid_grid = model.bidding_grid - bid_agents = [x for x in bid_grid.get_agents(pt)] + bid_agents = [x for x in model.grid.get_agents(pt) if x.uid[1] == Agent.TYPE] # Random agent in the bucket wins if len(bid_agents): winner = random.default_rng.choice(bid_agents) - self.next_type = winner.current_type # @todo: Does winner need to be a ghost agent? - self.available = False - # Remove losers from the bidding process - for ba in bid_agents: - if ba != winner: - bid_grid.remove(ba) - - def hasMoved(self): - bid_grid = model.bidding_grid - # Check if I won, update self and remove winning bid - if bid_grid.contains(self): - self.movement_resolved = True - bid_grid.remove(self) + winner.movement_resolved = True + self.winner = winner.uid + self.current_type = winner.my_type + +class Agent(core.Agent): + + TYPE = 1 + + def __init__(self, a_id, rank): + super().__init__(id=a_id, type=Agent.TYPE, rank=rank) + self.my_type = UNOCCUPIED + self.movement_resolved = True + + def save(self) -> Tuple: + """Saves the state of this Boid as a Tuple. + + Used to move this Boid from one MPI rank to another. + + Returns: + The saved state of this Boid. + """ + return (self.uid, self.my_type, self.movement_resolved) + + + def update(self, data: Tuple): + """Updates the state of this agent when it is a ghost + agent on a rank other than its local one. + + Args: + data: the new agent state + """ + # The example only updates if there is a change, is there a performance reason for doing this? + self.my_type = data[1] + self.movement_resolved = data[2] + + + def bid(self, available_locations): + if self.movement_resolved: + return + + # Select and bid for a random location + selected_location = random.default_rng.choice(available_locations) + model.grid.move(self, array_to_dp(selected_location)) agent_cache = {} @@ -123,18 +162,17 @@ def restore_agent(agent_data: Tuple): agent state. """ uid = agent_data[0] + if uid in agent_cache: h = agent_cache[uid] else: - h = Agent(uid[0], uid[2]) + if uid[1] == Cell.TYPE: + h = Cell(uid[0], uid[2]) + elif uid[1] == Agent.TYPE: + h = Agent(uid[0], uid[2]) agent_cache[uid] = h - # restore the agent state from the agent_data tuple - h.current_type = agent_data[1] - h.next_type = agent_data[2] - h.happy = agent_data[3] - h.available = agent_data[4] - h.movement_resolved = agent_data[5] + h.update(agent_data) return h class Model: @@ -153,52 +191,64 @@ def __init__(self, comm, params): GRID_WIDTH = int(params['grid.width']) grid_box = space.BoundingBox(int(0), int(GRID_WIDTH), int(0), int(GRID_WIDTH), 0, 0) self.grid_box = np.array((grid_box.xmin, grid_box.xmin + grid_box.xextent - 1, grid_box.ymin, grid_box.ymin + grid_box.yextent - 1)) - self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Single, buffer_size=2, comm=comm) - self.bidding_grid = space.SharedGrid('bidding grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, buffer_size=1, comm=comm) + self.grid = space.SharedGrid('grid', bounds=grid_box, borders=BorderType.Sticky, occupancy=OccupancyType.Multiple, buffer_size=0, comm=comm) self.context.add_projection(self.grid) - self.context.add_projection(self.bidding_grid) prePopulationTimer_stop = time.monotonic() - print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) + if self.rank == 0: + print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) populationGenerationTimer_start = time.monotonic() - # Calculate the total number of cells - CELL_COUNT = GRID_WIDTH * GRID_WIDTH - POPULATED_COUNT = int(params['population.count']) - # Select POPULATED_COUNT unique random integers in the range [0, CELL_COUNT), by shuffling the iota. - shuffledIota = [i for i in range(CELL_COUNT)] - random.default_rng.shuffle(shuffledIota) - for elementIdx in range(CELL_COUNT): - # Create agent - h = Agent(elementIdx, self.rank) - self.context.add(h) - # Setup it's location - idx = shuffledIota[elementIdx] - x = int(idx % GRID_WIDTH) - y = int(idx / GRID_WIDTH) - self.move(h, x, y) - # If the elementIDX is below the populated count, generated a populated type, otherwise it defaults unoccupied - if elementIdx < POPULATED_COUNT: - h.current_type = A if random.default_rng.uniform() < 0.5 else B - + # Only rank zero generates agents, for simplicity/to avoid conflict + if self.rank == 0: + # Calculate the total number of cells + CELL_COUNT = GRID_WIDTH * GRID_WIDTH + POPULATED_COUNT = int(params['population.count']) + # Select POPULATED_COUNT unique random integers in the range [0, CELL_COUNT), by shuffling the iota. + shuffledIota = [i for i in range(CELL_COUNT)] + random.default_rng.shuffle(shuffledIota) + for elementIdx in range(CELL_COUNT): + # Create cell agent + cell = Cell(elementIdx, self.rank) + self.context.add(cell) + # Setup it's location + idx = shuffledIota[elementIdx] + x = int(idx % GRID_WIDTH) + y = int(idx / GRID_WIDTH) + self.move(cell, x, y) + # If the elementIDX is below the populated count, generated a mobile agent and make it the current winner + if elementIdx < POPULATED_COUNT: + agent = Agent(elementIdx, self.rank) + self.context.add(agent) + agent.my_type = A if random.default_rng.uniform() < 0.5 else B + cell.current_type = agent.my_type + cell.winner = agent.uid + self.move(agent, x, y) + + populationGenerationTimer_stop = time.monotonic() - print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) + if self.rank == 0: + print("population generation (s): %.6f"%(populationGenerationTimer_stop - populationGenerationTimer_start)) def at_end(self): if self.csv_log: # Log final agent positions to file + self.csv_log = self.csv_log.replace(".csv", "%d.csv"%(self.comm.rank)) with open(self.csv_log, 'w', newline='') as file: writer = csv.writer(file) writer.writerow(['"x"', '"y"', '"fx"', '"fy"']) - for b in self.context.agents(Agent.TYPE): + for b in self.context.agents(Cell.TYPE): agent_xy = self.grid.get_location(b) t = b.current_type writer.writerow([agent_xy.x, agent_xy.y, int(t==A or t==UNOCCUPIED), int(t==B or t==UNOCCUPIED)]) - + + def remove(self, agent): + self.grid.remove(agent) + def move(self, agent, x, y): # timer.start_timer('grid_move') self.grid.move(agent, dpt(x, y)) @@ -210,44 +260,43 @@ def step(self): self.context.synchronize(restore_agent) # timer.start_timer('b_step') - for a in self.context.agents(Agent.TYPE): - a.determineStatus() + for c in self.context.agents(Cell.TYPE): + c.determineStatus() # Plan Movement Submodel # Break from submodel once all agents have resolved their movement while True: self.context.synchronize(restore_agent) # Available agents output their location - available_spaces = [self.grid.get_location(a) for a in self.context.agents(Agent.TYPE) if a.available] - + # Perform a local gather + my_available_spaces = [dp_to_array(self.grid.get_location(c)) for c in self.context.agents(Cell.TYPE) if c.winner is None] + # Collectively do a global gather of available spaces + # TODO: this could potentially be made cheaper with a 2-pass + # step 1: move agents to a rank based on ratio of available spaces in each rank + # step 2: move agent to a random available space within it's new rank + available_spaces = list(itertools.chain.from_iterable(self.comm.allgather(my_available_spaces))) # Unhappy agents bid for a new location for a in self.context.agents(Agent.TYPE): a.bid(available_spaces) - + self.context.synchronize(restore_agent) # Available locations check if anyone wants to move to them. If so, approve one and mark as unavailable # Update next type to the type of the mover - for a in self.context.agents(Agent.TYPE): - a.selectWinner() - - self.context.synchronize(restore_agent) - # Successful movers mark themselves as resolved - for a in self.context.agents(Agent.TYPE): - a.hasMoved() - - self.context.synchronize(restore_agent) - if not any([not a.movement_resolved for a in self.context.agents(Agent.TYPE)]): + # todo request winner as a ghost so their UID can be marked as winner + for c in self.context.agents(Cell.TYPE): + c.selectWinner() + + my_mvmt_resolved = any([not a.movement_resolved for a in self.context.agents(Agent.TYPE)]) + if not any(self.comm.allgather(my_mvmt_resolved)): + #if not self.comm.allreduce(my_mvmt_resolved, op=MPI.ANY): break - - self.context.synchronize(restore_agent) - for a in self.context.agents(Agent.TYPE): - a.current_type = a.next_type # timer.stop_timer('b_step') def run(self): simulateTimer_start = time.monotonic() self.runner.execute() simulateTimer_stop = time.monotonic() - print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) + if self.rank == 0: + print("simulate (s): %.6f"%(simulateTimer_stop - simulateTimer_start)) def remove_agent(self, agent): self.context.remove(agent) @@ -261,12 +310,13 @@ def run(params: Dict): global model model = Model(MPI.COMM_WORLD, params) model.run() + mainTimer_stop = time.monotonic() + if model.rank == 0: + print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) if __name__ == "__main__": parser = create_args_parser() args = parser.parse_args() params = init_params(args.parameters_file, args.parameters) - run(params) - mainTimer_stop = time.monotonic() - print("main (s): %.6f\n"%(mainTimer_stop - mainTimer_start)) + run(params) From efd8f10f959ab763d1aaf449c3e477003af949e7 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Wed, 6 Dec 2023 16:38:07 +0000 Subject: [PATCH 12/20] Script that has been used to visualise repast outputs. --- repast4py/draw_agent.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 repast4py/draw_agent.py diff --git a/repast4py/draw_agent.py b/repast4py/draw_agent.py new file mode 100644 index 00000000..5ac6b39e --- /dev/null +++ b/repast4py/draw_agent.py @@ -0,0 +1,26 @@ +import csv, os, re +import matplotlib.pyplot as plt +import numpy as np + +regex = re.compile('testwrap[0-9]*.csv') + +x = [] +y = [] +fx = [] +fy = [] +color = [] +for root, dirs, files in os.walk(os.getcwd()): + for file in files: + if regex.match(file): + with open(file, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONNUMERIC) + next(reader, None) # skip the headers + for row in reader: + x.append(row[0]) + y.append(row[1]) + fx.append(row[2]) + fy.append(row[3]) + color.append(((row[2] + 1.0)/2, (row[3] + 1.0)/2, 0.0)) + +plt.scatter(np.array(x), np.array(y), s=2, c=color, marker=",", linewidths=0.1) +plt.show() \ No newline at end of file From d657a4a4dd4eabd9310dbae6bf79a8c4c57469f4 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 10:14:40 +0000 Subject: [PATCH 13/20] Resolve potential division by zero in FLAMEGPU schelling. --- FLAMEGPU2/src/schelling.cu | 2 +- pyflamegpu-agentpy/src/schelling.py | 2 +- pyflamegpu/src/schelling.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/FLAMEGPU2/src/schelling.cu b/FLAMEGPU2/src/schelling.cu index 082be9c1..5012bc10 100644 --- a/FLAMEGPU2/src/schelling.cu +++ b/FLAMEGPU2/src/schelling.cu @@ -42,7 +42,7 @@ FLAMEGPU_AGENT_FUNCTION(determine_status, flamegpu::MessageArray2D, flamegpu::Me diff_type_neighbours += (my_type != message_type) && (message_type != UNOCCUPIED); } - int isHappy = (static_cast(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD; + int isHappy = same_type_neighbours ? (static_cast(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD : false; FLAMEGPU->setVariable("happy", isHappy); unsigned int my_next_type = ((my_type != UNOCCUPIED) && isHappy) ? my_type : UNOCCUPIED; FLAMEGPU->setVariable("next_type", my_next_type); diff --git a/pyflamegpu-agentpy/src/schelling.py b/pyflamegpu-agentpy/src/schelling.py index 54fdf6c8..220409ef 100644 --- a/pyflamegpu-agentpy/src/schelling.py +++ b/pyflamegpu-agentpy/src/schelling.py @@ -41,7 +41,7 @@ def determine_status(message_in: pyflamegpu.MessageArray2D, message_out: pyflame same_type_neighbours += my_type == message_type diff_type_neighbours += (my_type != message_type) and (message_type != UNOCCUPIED) - isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD + isHappy = (float(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD if same_type_neighbours else False pyflamegpu.setVariableUInt("happy", isHappy); my_next_type = my_type if ((my_type != UNOCCUPIED) and isHappy) else UNOCCUPIED pyflamegpu.setVariableUInt("next_type", my_next_type) diff --git a/pyflamegpu/src/schelling.py b/pyflamegpu/src/schelling.py index ac51a66f..cc4be3b5 100644 --- a/pyflamegpu/src/schelling.py +++ b/pyflamegpu/src/schelling.py @@ -45,7 +45,7 @@ diff_type_neighbours += (my_type != message_type) && (message_type != UNOCCUPIED); } - int isHappy = (static_cast(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD; + int isHappy = same_type_neighbours ? (static_cast(same_type_neighbours) / (same_type_neighbours + diff_type_neighbours)) > THRESHOLD : false; FLAMEGPU->setVariable("happy", isHappy); unsigned int my_next_type = ((my_type != UNOCCUPIED) && isHappy) ? my_type : UNOCCUPIED; FLAMEGPU->setVariable("next_type", my_next_type); From 442e42c21d0a4caf6a4307310b7d06c3daae50bf Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 10:24:07 +0000 Subject: [PATCH 14/20] Repast: Seed MPI ranks with unique seed. --- repast4py/src/flocking/flocking.py | 4 ++++ repast4py/src/schelling/schelling.py | 3 +++ 2 files changed, 7 insertions(+) diff --git a/repast4py/src/flocking/flocking.py b/repast4py/src/flocking/flocking.py index c7b8d997..ad1824f2 100644 --- a/repast4py/src/flocking/flocking.py +++ b/repast4py/src/flocking/flocking.py @@ -210,6 +210,10 @@ def __init__(self, comm, params): self.grid_box = np.array((grid_box.xmin, grid_box.xmin + grid_box.xextent - 1, grid_box.ymin, grid_box.ymin + grid_box.yextent - 1)) self.context.add_projection(self.grid) + # Each rank must generate a unique seed + # https://numpy.org/doc/stable/reference/random/parallel.html#sequence-of-integer-seeds + random.default_rng = np.random.default_rng([self.rank, random.default_rng.bytes(4)]) + prePopulationTimer_stop = time.monotonic() if self.rank == 0: print("pre population (s): %.6f"%(prePopulationTimer_stop - prePopulationTimer_start)) diff --git a/repast4py/src/schelling/schelling.py b/repast4py/src/schelling/schelling.py index 7bed63b5..d55479a8 100644 --- a/repast4py/src/schelling/schelling.py +++ b/repast4py/src/schelling/schelling.py @@ -195,6 +195,9 @@ def __init__(self, comm, params): self.context.add_projection(self.grid) + # Each rank must generate a unique seed + # https://numpy.org/doc/stable/reference/random/parallel.html#sequence-of-integer-seeds + random.default_rng = np.random.default_rng([self.rank, random.default_rng.bytes(4)]) prePopulationTimer_stop = time.monotonic() if self.rank == 0: From fc78b8b988cd132e0c91da52b5666f2b878ed6e1 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 11:10:16 +0000 Subject: [PATCH 15/20] Repast benchmark script. --- FLAMEGPU2/benchmark.py | 8 +- pyflamegpu-agentpy/benchmark.py | 52 ++++++------ pyflamegpu/benchmark.py | 52 ++++++------ repast4py/.gitignore | 3 + repast4py/README.md | 11 +++ repast4py/benchmark.py | 140 ++++++++++++++++++++++++++++++++ 6 files changed, 216 insertions(+), 50 deletions(-) create mode 100644 repast4py/.gitignore create mode 100644 repast4py/README.md create mode 100644 repast4py/benchmark.py diff --git a/FLAMEGPU2/benchmark.py b/FLAMEGPU2/benchmark.py index b784e264..173695ee 100644 --- a/FLAMEGPU2/benchmark.py +++ b/FLAMEGPU2/benchmark.py @@ -10,11 +10,13 @@ import re import math import statistics +import random SCRIPT_PATH = pathlib.Path(__file__).parent BUILD_DIR = "build" CONFIG = "Release" REPETITIONS = 10 +SEED = 12 def extract_times(lines): @@ -59,8 +61,9 @@ def extract_times(lines): pop_gen_times = [] main_times = [] sim_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([str(flocking_binary_path), "-s", "100"], stdout=subprocess.PIPE) + result = subprocess.run([str(flocking_binary_path), "-s", "100", "-r", str(random.randint(0, 999999999))], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time = extract_times(lines) @@ -90,8 +93,9 @@ def extract_times(lines): pop_gen_times = [] main_times = [] sim_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([str(schelling_binary_path), "-s", "100"], stdout=subprocess.PIPE) + result = subprocess.run([str(schelling_binary_path), "-s", "100", "-r", str(random.randint(0, 999999999))], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time = extract_times(lines) diff --git a/pyflamegpu-agentpy/benchmark.py b/pyflamegpu-agentpy/benchmark.py index f20ea671..7aaa90c0 100644 --- a/pyflamegpu-agentpy/benchmark.py +++ b/pyflamegpu-agentpy/benchmark.py @@ -10,11 +10,13 @@ import re import math import statistics +import random SCRIPT_PATH = pathlib.Path(__file__).parent BUILD_DIR = "build" CONFIG = "Release" REPETITIONS = 10 +SEED = 12 def extract_times(lines): PRE_POP_RE = re.compile("^(pre population \(s\): ([0-9]+(\.[0-9]+)?))$") @@ -66,8 +68,9 @@ def extract_times(lines): main_times = [] sim_times = [] rtc_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "-r", str(random.randint(0, 999999999)), "--disable-rtc-cache"], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) @@ -78,20 +81,20 @@ def extract_times(lines): rtc_times.append(rtc_time) min_main_time = min(main_times) min_simulate_time = min(sim_times) - print(f"FLAMEGPU2 flocking prepop times (s) : {pre_pop_times}") - print(f"FLAMEGPU2 flocking popgen times (s) : {pop_gen_times}") - print(f"FLAMEGPU2 flocking simulate times (s): {sim_times}") - print(f"FLAMEGPU2 flocking rtc times (s): {rtc_times}") - print(f"FLAMEGPU2 flocking main times (s) : {main_times}") - print(f"FLAMEGPU2 flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") - print(f"FLAMEGPU2 flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") - print(f"FLAMEGPU2 flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") - print(f"FLAMEGPU2 flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") - print(f"FLAMEGPU2 flocking main (mean ms) : {statistics.mean(main_times)*1e3}") + print(f"pyflamegpu-agentpy flocking prepop times (s) : {pre_pop_times}") + print(f"pyflamegpu-agentpy flocking popgen times (s) : {pop_gen_times}") + print(f"pyflamegpu-agentpy flocking simulate times (s): {sim_times}") + print(f"pyflamegpu-agentpy flocking rtc times (s): {rtc_times}") + print(f"pyflamegpu-agentpy flocking main times (s) : {main_times}") + print(f"pyflamegpu-agentpy flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"pyflamegpu-agentpy flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"pyflamegpu-agentpy flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"pyflamegpu-agentpy flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"pyflamegpu-agentpy flocking main (mean ms) : {statistics.mean(main_times)*1e3}") else: - print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + print(f"Error: pyflamegpu-agentpy flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) # Benchmark Schelling schelling_model_path = SCRIPT_PATH / "src/schelling.py" @@ -101,8 +104,9 @@ def extract_times(lines): main_times = [] sim_times = [] rtc_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "-r", str(random.randint(0, 999999999)), "--disable-rtc-cache"], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) @@ -111,17 +115,17 @@ def extract_times(lines): main_times.append(main_time) sim_times.append(sim_time) rtc_times.append(rtc_time) - print(f"FLAMEGPU2 schelling prepop times (s) : {pre_pop_times}") - print(f"FLAMEGPU2 schelling popgen times (s) : {pop_gen_times}") - print(f"FLAMEGPU2 schelling simulate times (s): {sim_times}") - print(f"FLAMEGPU2 schelling rtc times (s): {rtc_times}") - print(f"FLAMEGPU2 schelling main times (s) : {main_times}") - print(f"FLAMEGPU2 schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") - print(f"FLAMEGPU2 schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") - print(f"FLAMEGPU2 schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") - print(f"FLAMEGPU2 schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") - print(f"FLAMEGPU2 schelling main (mean ms) : {statistics.mean(main_times)*1e3}") + print(f"pyflamegpu-agentpy schelling prepop times (s) : {pre_pop_times}") + print(f"pyflamegpu-agentpy schelling popgen times (s) : {pop_gen_times}") + print(f"pyflamegpu-agentpy schelling simulate times (s): {sim_times}") + print(f"pyflamegpu-agentpy schelling rtc times (s): {rtc_times}") + print(f"pyflamegpu-agentpy schelling main times (s) : {main_times}") + print(f"pyflamegpu-agentpy schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"pyflamegpu-agentpy schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"pyflamegpu-agentpy schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"pyflamegpu-agentpy schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"pyflamegpu-agentpy schelling main (mean ms) : {statistics.mean(main_times)*1e3}") else: - print(f"Error: pyFLAMEGPU schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + print(f"Error: pyflamegpu-agentpy schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) diff --git a/pyflamegpu/benchmark.py b/pyflamegpu/benchmark.py index f20ea671..1708770e 100644 --- a/pyflamegpu/benchmark.py +++ b/pyflamegpu/benchmark.py @@ -10,11 +10,13 @@ import re import math import statistics +import random SCRIPT_PATH = pathlib.Path(__file__).parent BUILD_DIR = "build" CONFIG = "Release" REPETITIONS = 10 +SEED = 12 def extract_times(lines): PRE_POP_RE = re.compile("^(pre population \(s\): ([0-9]+(\.[0-9]+)?))$") @@ -66,8 +68,9 @@ def extract_times(lines): main_times = [] sim_times = [] rtc_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + result = subprocess.run([sys.executable, str(flocking_model_path), "-s", "100", "-r", str(random.randint(0, 999999999)), "--disable-rtc-cache"], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) @@ -78,20 +81,20 @@ def extract_times(lines): rtc_times.append(rtc_time) min_main_time = min(main_times) min_simulate_time = min(sim_times) - print(f"FLAMEGPU2 flocking prepop times (s) : {pre_pop_times}") - print(f"FLAMEGPU2 flocking popgen times (s) : {pop_gen_times}") - print(f"FLAMEGPU2 flocking simulate times (s): {sim_times}") - print(f"FLAMEGPU2 flocking rtc times (s): {rtc_times}") - print(f"FLAMEGPU2 flocking main times (s) : {main_times}") - print(f"FLAMEGPU2 flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") - print(f"FLAMEGPU2 flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") - print(f"FLAMEGPU2 flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") - print(f"FLAMEGPU2 flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") - print(f"FLAMEGPU2 flocking main (mean ms) : {statistics.mean(main_times)*1e3}") + print(f"pyflamegpu flocking prepop times (s) : {pre_pop_times}") + print(f"pyflamegpu flocking popgen times (s) : {pop_gen_times}") + print(f"pyflamegpu flocking simulate times (s): {sim_times}") + print(f"pyflamegpu flocking rtc times (s): {rtc_times}") + print(f"pyflamegpu flocking main times (s) : {main_times}") + print(f"pyflamegpu flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"pyflamegpu flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"pyflamegpu flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"pyflamegpu flocking rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"pyflamegpu flocking main (mean ms) : {statistics.mean(main_times)*1e3}") else: - print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + print(f"Error: pyflamegpu flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) # Benchmark Schelling schelling_model_path = SCRIPT_PATH / "src/schelling.py" @@ -101,8 +104,9 @@ def extract_times(lines): main_times = [] sim_times = [] rtc_times = [] + random.seed(SEED) for i in range(0, REPETITIONS): - result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "--disable-rtc-cache"], stdout=subprocess.PIPE) + result = subprocess.run([sys.executable, str(schelling_model_path), "-s", "100", "-r", str(random.randint(0, 999999999)), "--disable-rtc-cache"], stdout=subprocess.PIPE) # @todo make this less brittle lines = result.stdout.decode('utf-8').splitlines() pre_pop_time, pop_gen_time, main_time, sim_time, rtc_time = extract_times(lines) @@ -111,17 +115,17 @@ def extract_times(lines): main_times.append(main_time) sim_times.append(sim_time) rtc_times.append(rtc_time) - print(f"FLAMEGPU2 schelling prepop times (s) : {pre_pop_times}") - print(f"FLAMEGPU2 schelling popgen times (s) : {pop_gen_times}") - print(f"FLAMEGPU2 schelling simulate times (s): {sim_times}") - print(f"FLAMEGPU2 schelling rtc times (s): {rtc_times}") - print(f"FLAMEGPU2 schelling main times (s) : {main_times}") - print(f"FLAMEGPU2 schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") - print(f"FLAMEGPU2 schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") - print(f"FLAMEGPU2 schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") - print(f"FLAMEGPU2 schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") - print(f"FLAMEGPU2 schelling main (mean ms) : {statistics.mean(main_times)*1e3}") + print(f"pyflamegpu schelling prepop times (s) : {pre_pop_times}") + print(f"pyflamegpu schelling popgen times (s) : {pop_gen_times}") + print(f"pyflamegpu schelling simulate times (s): {sim_times}") + print(f"pyflamegpu schelling rtc times (s): {rtc_times}") + print(f"pyflamegpu schelling main times (s) : {main_times}") + print(f"pyflamegpu schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"pyflamegpu schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"pyflamegpu schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"pyflamegpu schelling rtc (mean ms): {statistics.mean(rtc_times)*1e3}") + print(f"pyflamegpu schelling main (mean ms) : {statistics.mean(main_times)*1e3}") else: - print(f"Error: pyFLAMEGPU schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + print(f"Error: pyflamegpu schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) diff --git a/repast4py/.gitignore b/repast4py/.gitignore new file mode 100644 index 00000000..7982c769 --- /dev/null +++ b/repast4py/.gitignore @@ -0,0 +1,3 @@ +# Temporary files for benchmarking +benchmark_config.txt +temp_params.yaml \ No newline at end of file diff --git a/repast4py/README.md b/repast4py/README.md new file mode 100644 index 00000000..18962bd2 --- /dev/null +++ b/repast4py/README.md @@ -0,0 +1,11 @@ +# pyFLAMEGPU ABM Comparison Benchmarks + +This directory contains the implementation of several ABMs used to compare agent based models, including: + ++ `flocking` - a 2D spatial implementation of boids flocking model ++ `schelling` - an implementation of Schelling's Model of Segregation + +The [`repast4py` package](https://repast.github.io/repast4py.site/index.html) must be available within the Python environment used to launch the independent models or `benchmark.py`. + + + diff --git a/repast4py/benchmark.py b/repast4py/benchmark.py new file mode 100644 index 00000000..9bb08537 --- /dev/null +++ b/repast4py/benchmark.py @@ -0,0 +1,140 @@ +#! /usr/bin/env python3 + +# Run 100 iterations of each the Release builds of boids and schelling models, capturing the time of each and emitting the minimum time. +# @todo - support scaling, do not just output minimum, time whole execution vs just simulation? +# @todo - this is only reporting the simulate method, not the rest of FLAMEGPUs runtime which may be biased compared to other timings (need to check) + +import sys +import subprocess +import pathlib +import re +import math +import statistics +import random +import os + +SCRIPT_PATH = pathlib.Path(__file__).parent +BUILD_DIR = "build" +CONFIG = "Release" +REPETITIONS = 10 +SEED = 12 + +def extract_times(lines): + PRE_POP_RE = re.compile("^(pre population \(s\): ([0-9]+(\.[0-9]+)?))$") + POP_GEN_RE = re.compile("^(population generation \(s\): ([0-9]+(\.[0-9]+)?))$") + MAIN_RE = re.compile("^(main \(s\): ([0-9]+(\.[0-9]+)?))$") + SIMULATE_RE = re.compile("^(simulate \(s\): ([0-9]+(\.[0-9]+)?))$") + pre_pop_time = math.inf + pop_gen_time = math.inf + main_time = math.inf + simulate_time = math.inf + matched = False + for line in lines: + line_matched = False + if not line_matched: + match = PRE_POP_RE.match(line.strip()) + if match: + pre_pop_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = POP_GEN_RE.match(line.strip()) + if match: + pop_gen_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = MAIN_RE.match(line.strip()) + if match: + main_time = float(match.group(2)) + line_matched = True + if not line_matched: + match = SIMULATE_RE.match(line.strip()) + if match: + simulate_time = float(match.group(2)) + line_matched = True + return pre_pop_time, pop_gen_time, main_time, simulate_time + + +# Benchmark flocking +flocking_model_path = SCRIPT_PATH / "src/flocking/flocking.py" +flocking_params_path = SCRIPT_PATH / "src/flocking/temp_params.yaml" +if flocking_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + rtc_times = [] + random.seed(SEED) + for i in range(0, REPETITIONS): + # Create a seeded param file + with open(flocking_params_path, "w") as f: + params = f.write( +f"""stop.at: 10.0 +boid.count: 80000 +csv.log: "" +random.seed: {random.randint(0, 999999999)} +""") + result = subprocess.run([sys.executable, str(flocking_model_path), str(flocking_params_path)], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + min_main_time = min(main_times) + min_simulate_time = min(sim_times) + print(f"repast4py flocking prepop times (s) : {pre_pop_times}") + print(f"repast4py flocking popgen times (s) : {pop_gen_times}") + print(f"repast4py flocking simulate times (s): {sim_times}") + print(f"repast4py flocking main times (s) : {main_times}") + print(f"repast4py flocking prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"repast4py flocking popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"repast4py flocking simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"repast4py flocking main (mean ms) : {statistics.mean(main_times)*1e3}") + # Cleanup + os.remove(flocking_params_path) + + +else: + print(f"Error: pyFLAMEGPU flocking model ({flocking_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + +# Benchmark Schelling +schelling_model_path = SCRIPT_PATH / "src/schelling/schelling.py" +schelling_params_path = SCRIPT_PATH / "src/schelling/temp_params.yaml" +if schelling_model_path.is_file(): + pre_pop_times = [] + pop_gen_times = [] + main_times = [] + sim_times = [] + random.seed(SEED) + for i in range(0, REPETITIONS): + # Create a seeded param file + with open(schelling_params_path, "w") as f: + params = f.write( +f"""stop.at: 10.0 +grid.width: 500 +population.count: 200000 +csv.log: "" +random.seed: {random.randint(0, 999999999)} +""") + result = subprocess.run([sys.executable, str(schelling_model_path), str(schelling_params_path)], stdout=subprocess.PIPE) + # @todo make this less brittle + lines = result.stdout.decode('utf-8').splitlines() + pre_pop_time, pop_gen_time, main_time, sim_time = extract_times(lines) + pre_pop_times.append(pre_pop_time) + pop_gen_times.append(pop_gen_time) + main_times.append(main_time) + sim_times.append(sim_time) + print(f"repast4py schelling prepop times (s) : {pre_pop_times}") + print(f"repast4py schelling popgen times (s) : {pop_gen_times}") + print(f"repast4py schelling simulate times (s): {sim_times}") + print(f"repast4py schelling main times (s) : {main_times}") + print(f"repast4py schelling prepop (mean ms) : {statistics.mean(pre_pop_times)*1e3}") + print(f"repast4py schelling popgen (mean ms) : {statistics.mean(pop_gen_times)*1e3}") + print(f"repast4py schelling simulate (mean ms): {statistics.mean(sim_times)*1e3}") + print(f"repast4py schelling main (mean ms) : {statistics.mean(main_times)*1e3}") + # Cleanup + os.remove(schelling_params_path) +else: + print(f"Error: pyFLAMEGPU schelling model ({schelling_model_path}) does not exist. Please check paths are correct.", file=sys.stderr) + From 4a23f9349be84ebd495af0271deb9be02fdbfc57 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 15:10:27 +0000 Subject: [PATCH 16/20] Fixup repast gitignore --- repast4py/.gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/repast4py/.gitignore b/repast4py/.gitignore index 7982c769..f61dd100 100644 --- a/repast4py/.gitignore +++ b/repast4py/.gitignore @@ -1,3 +1,4 @@ # Temporary files for benchmarking benchmark_config.txt -temp_params.yaml \ No newline at end of file +temp_params.yaml +*.csv \ No newline at end of file From 2f87ac506cba29b3fd0c7f8afb7033ffe7115c88 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 15:26:31 +0000 Subject: [PATCH 17/20] Seed julia models (untested) Not julia script does not log time of first run where model is compiled. --- Agents/benchmark.jl | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/Agents/benchmark.jl b/Agents/benchmark.jl index f77f25f0..1a875c5e 100644 --- a/Agents/benchmark.jl +++ b/Agents/benchmark.jl @@ -3,13 +3,16 @@ using Pkg; Pkg.instantiate() using Agents using Test using Statistics +using Random -# Does not use @bencmark, due to jobs being OOM killed for long-running models, with a higher maximum runtime to allow the required repetitions. +# Does not use @benchmark, due to jobs being OOM killed for long-running models, with a higher maximum runtime to allow the required repetitions. # enabling the gc between samples did not resolve this BenchmarkTools.DEFAULT_PARAMETERS.gcsample = false -# Runs each model SAMPLE_COUNT + 1 times, discarding hte first timing (which includes compilation) +# Runs each model SAMPLE_COUNT + 1 times, discarding the first timing (which includes compilation) SAMPLE_COUNT = 10 +SEED = 12 # Boids +Random.seed!(SEED) times = [] for i in 0:SAMPLE_COUNT (model, agent_step!, model_step!) = Models.flocking( @@ -20,6 +23,7 @@ for i in 0:SAMPLE_COUNT match_factor = 0.05, visual_distance = 5.0, extent = (400, 400), + seed = rand(Int), ) step_stats = @timed step!(model, agent_step!, model_step!, 100) if i > 0 @@ -30,9 +34,14 @@ println("Agents.jl Flocking times (ms)", map(x -> x * 1e3, times)) println("Agents.jl Flocking (mean ms): ", (Statistics.mean(times)) * 1e3) # Schelling +Random.seed!(SEED) times = [] for i in 0:SAMPLE_COUNT - (model, agent_step!, model_step!) = Models.schelling(griddims = (500, 500), numagents = 200000) + (model, agent_step!, model_step!) = Models.schelling( + griddims = (500, 500), + numagents = 200000, + seed = rand(Int), + ) step_stats = @timed step!(model, agent_step!, model_step!, 100) if i > 0 append!(times, step_stats.time) From ac174f2f268a10adc69c8eec39a03cedb524595a Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 15:26:57 +0000 Subject: [PATCH 18/20] Update main script for running all benchmarks. --- runall.sh | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/runall.sh b/runall.sh index fa8046b7..09d8aa82 100755 --- a/runall.sh +++ b/runall.sh @@ -3,6 +3,16 @@ echo "Benchmarking FLAMEGPU2" python3 FLAMEGPU2/benchmark.py +echo "Benchmarking pyflamegpu" +python3 pyflamegpu/benchmark.py + +echo "Benchmarking pyflamegpu-agentpy" +python3 pyflamegpu-agentpy/benchmark.py + +echo "Benchmarking repast4py" +echo "(todo setup MPI)" +python3 repast4py/benchmark.py + echo "Benchmarking Julia" julia --project=@. Agents/benchmark.jl From 6f0672a847a6902842960a3945f27261293e784a Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 15:36:16 +0000 Subject: [PATCH 19/20] seed mesa (untested) --- Mesa/Flocking/benchmark.py | 18 ++++++++++++------ Mesa/Schelling/benchmark.py | 21 ++++++++++++--------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/Mesa/Flocking/benchmark.py b/Mesa/Flocking/benchmark.py index a778f72a..779f6a0b 100644 --- a/Mesa/Flocking/benchmark.py +++ b/Mesa/Flocking/benchmark.py @@ -5,8 +5,15 @@ import timeit import gc import statistics +import random -setup = f""" +REPETITIONS = 3 +SEED = 12 + +random.seed(SEED) +a = [] +for i in range(0, REPETITIONS): + setup=f""" gc.enable() import os, sys sys.path.insert(0, os.path.abspath(".")) @@ -21,13 +28,12 @@ def runthemodel(flock): flock = BoidFlockers( population=80000, width=400, - height=400 + height=400, + seed={random.randint(0, 999999999)} ) """ - -tt = timeit.Timer('runthemodel(flock)', setup=setup) -SAMPLES=3 -a = tt.repeat(SAMPLES, 1) + tt = timeit.Timer('runthemodel(flock)', setup=setup) + a.append(tt.timeit(1)) print("Mesa Flocking times (ms):", list(map(lambda x: x * 1e3, a))) print("Mesa Flocking (mean ms):", statistics.mean(a)*1e3) diff --git a/Mesa/Schelling/benchmark.py b/Mesa/Schelling/benchmark.py index cb7cb3e4..d9c2b813 100644 --- a/Mesa/Schelling/benchmark.py +++ b/Mesa/Schelling/benchmark.py @@ -3,17 +3,21 @@ import timeit import gc import statistics +import random + +REPETITIONS = 3 +SEED = 12 -setup = f""" +random.seed(SEED) +a = [] +for i in range(0, REPETITIONS): + setup = f""" gc.enable() import os, sys sys.path.insert(0, os.path.abspath(".")) from model import SchellingModel -import random -random.seed(2) - def runthemodel(schelling): for i in range(0, 100): schelling.step() @@ -21,13 +25,12 @@ def runthemodel(schelling): schelling = SchellingModel( height=500, width=500, - density=0.8 + density=0.8, + seed={random.randint(0, 999999999)} ) """ - -tt = timeit.Timer('runthemodel(schelling)', setup=setup) -SAMPLES=3 -a = tt.repeat(SAMPLES, 1) + tt = timeit.Timer('runthemodel(schelling)', setup=setup) + a.append(tt.timeit(1)) print("Mesa schelling times (ms):", list(map(lambda x: x * 1e3, a))) print("Mesa schelling (mean ms):", statistics.mean(a)*1e3) From 6b4bc751925109023c44b21924c9950821a828e5 Mon Sep 17 00:00:00 2001 From: Robert Chisholm Date: Mon, 11 Dec 2023 16:12:46 +0000 Subject: [PATCH 20/20] Speedup repast schelling, don't use np.rng.choice() --- repast4py/src/schelling/schelling.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/repast4py/src/schelling/schelling.py b/repast4py/src/schelling/schelling.py index d55479a8..a9cd8a89 100644 --- a/repast4py/src/schelling/schelling.py +++ b/repast4py/src/schelling/schelling.py @@ -142,7 +142,8 @@ def bid(self, available_locations): return # Select and bid for a random location - selected_location = random.default_rng.choice(available_locations) + # DO NOT USE random.default_rng.choice() HERE, VERY SLOW!!! + selected_location = available_locations[random.default_rng.integers(len(available_locations))] model.grid.move(self, array_to_dp(selected_location)) agent_cache = {}