Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix: Make ReEDS parser compatible with new cost functions #44

Merged
merged 12 commits into from
Sep 27, 2024
8 changes: 7 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ build-backend = "hatchling.build"
[tool.hatch.version]
path = "src/r2x/__version__.py"

[tool.hatch.metadata]
allow-direct-references=true

[project]
name = "r2x"
dynamic = ["version"]
Expand All @@ -25,10 +28,10 @@ classifiers = [
"Operating System :: OS Independent",
]
dependencies = [
"infrasys @ git+https://github.com/NREL/infrasys@ps/value_curves",
"jsonschema~=4.23",
"loguru~=0.7.2",
"pandas~=2.2",
"infrasys>=0.1.0",
"plexosdb>=0.0.4",
"polars~=1.1.0",
"pyyaml~=6.0.1",
Expand Down Expand Up @@ -142,7 +145,10 @@ pythonpath = [
]
markers = [
"exporters: Tests related to exporters",
"exporter_utils: Tests related to exporters utils",
"plexos: Tests related to plexos",
"sienna: Tests related to sienna",
"utils: Util functions"
]

[tool.coverage.run]
Expand Down
117 changes: 16 additions & 101 deletions src/r2x/api.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,17 @@
"""R2X API for data model."""

import csv
import json
from collections.abc import Callable
from os import PathLike
from pathlib import Path
from itertools import chain
from collections.abc import Iterable
from loguru import logger

import inspect
from infrasys.component import Component
from infrasys.system import System as ISSystem

from .__version__ import __data_model_version__
import uuid
import infrasys.cost_curves


class System(ISSystem):
Expand Down Expand Up @@ -52,7 +50,11 @@ def export_component_to_csv(
# Get desired components to offload to csv
components = map(
lambda component: component.model_dump(
exclude={}, exclude_none=True, mode="json", context={"magnitude_only": True}
exclude={},
exclude_none=True,
mode="json",
context={"magnitude_only": True},
# serialize_as_any=True,
),
self.get_components(component, filter_func=filter_func),
)
Expand All @@ -68,114 +70,27 @@ def export_component_to_csv(
**dict_writer_kwargs,
)

def _add_operation_cost_data( # noqa: C901
self,
data: Iterable[dict],
fields: list | None = None,
):
operation_cost_fields = set()
x_y_coords = None
for sub_dict in data:
if "operation_cost" not in sub_dict.keys():
continue

operation_cost = sub_dict["operation_cost"]
for cost_field_key, cost_field_value in operation_cost.items():
if isinstance(cost_field_value, dict):
assert (
"uuid" in cost_field_value.keys()
), f"Operation cost field {cost_field_key} was assumed to be a component but is not."
variable_cost = self.get_component_by_uuid(uuid.UUID(cost_field_value["uuid"]))
sub_dict["variable_cost"] = variable_cost.vom_units.function_data.proportional_term
if "fuel_cost" in variable_cost.model_fields:
# Note: We multiply the fuel price by 1000 to offset the division
# done by Sienna when it parses .csv files
sub_dict["fuel_price"] = variable_cost.fuel_cost * 1000
operation_cost_fields.add("fuel_price")

function_data = variable_cost.value_curve.function_data
if "constant_term" in function_data.model_fields:
sub_dict["heat_rate_a0"] = function_data.constant_term
operation_cost_fields.add("heat_rate_a0")
if "proportional_term" in function_data.model_fields:
sub_dict["heat_rate_a1"] = function_data.proportional_term
operation_cost_fields.add("heat_rate_a1")
if "quadratic_term" in function_data.model_fields:
sub_dict["heat_rate_a2"] = function_data.quadratic_term
operation_cost_fields.add("heat_rate_a2")
if "x_coords" in function_data.model_fields:
x_y_coords = dict(zip(function_data.x_coords, function_data.y_coords))
if "points" in function_data.model_fields:
x_y_coords = dict((xyCoord.x, xyCoord.y) for xyCoord in function_data.points)
if x_y_coords:
match type(variable_cost):
case infrasys.cost_curves.CostCurve:
for i, (x_coord, y_coord) in enumerate(x_y_coords.items()):
output_point_col = f"output_point_{i}"
sub_dict[output_point_col] = x_coord
operation_cost_fields.add(output_point_col)

cost_point_col = f"cost_point_{i}"
sub_dict[cost_point_col] = y_coord
operation_cost_fields.add(cost_point_col)

case infrasys.cost_curves.FuelCurve:
for i, (x_coord, y_coord) in enumerate(x_y_coords.items()):
output_point_col = f"output_point_{i}"
sub_dict[output_point_col] = x_coord
operation_cost_fields.add(output_point_col)

heat_rate_col = "heat_rate_avg_0" if i == 0 else f"heat_rate_incr_{i}"
sub_dict[heat_rate_col] = y_coord
operation_cost_fields.add(heat_rate_col)
elif cost_field_key not in sub_dict.keys():
sub_dict[cost_field_key] = cost_field_value
operation_cost_fields.add(cost_field_key)
else:
pass

fields.remove("operation_cost") # type: ignore
fields.extend(list(operation_cost_fields)) # type: ignore

return data, fields

def _export_dict_to_csv(
self,
data: Iterable[dict],
fpath: PathLike,
fields: list | None = None,
key_mapping: dict | None = None,
unnest_key: str = "name",
# key_mapping: dict | None = None,
# unnest_key: str = "name",
**dict_writer_kwargs,
):
# Remaping keys
# NOTE: It does not work recursively for nested components
if key_mapping:
data = [
{key_mapping.get(key, key): value for key, value in sub_dict.items()} for sub_dict in data
]
if fields:
fields = list(map(lambda key: key_mapping.get(key, key), fields))

if fields is None:
fields = list(set(chain.from_iterable(data)))

if "operation_cost" in fields:
data, fields = self._add_operation_cost_data(data, fields)
dict_writer_kwargs = {
key: value
for key, value in dict_writer_kwargs.items()
if key in inspect.getfullargspec(csv.DictWriter).args
}

with open(str(fpath), "w", newline="") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fields, extrasaction="ignore", **dict_writer_kwargs) # type: ignore
writer.writeheader()
for row in data:
filter_row = {
key: json.dumps(value)
if key == "ext" and isinstance(value, dict)
else value
if not isinstance(value, dict)
else value.get(unnest_key)
for key, value in row.items()
}
writer.writerow(filter_row)
writer.writerow(row)
return


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion src/r2x/defaults/plexos_input.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"plexos_device_map": {},
"plexos_fuel_map": {},
"plexos_property_map": {
"plexos_input_property_map": {
"Charge Efficiency": "charge_efficiency",
"Commit": "must_run",
"Discharge Efficiency": "discharge_efficiency",
Expand Down
21 changes: 11 additions & 10 deletions src/r2x/defaults/plexos_output.json
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,7 @@
"min_up_time": "Min Up Time"
},
"plexos_property_map": {
"active_power": "Max Capacity",
"available": "Units",
"base_power": "Max Capacity",
"base_voltage": "Voltage",
Expand Down Expand Up @@ -135,24 +136,24 @@
},
"reserve_types": {
"1": {
"direction": "up",
"type": "spinning"
"direction": "UP",
"type": "SPINNING"
},
"2": {
"direction": "down",
"type": "spinning"
"direction": "DOWN",
"type": "SPINNING"
},
"3": {
"direction": "up",
"type": "regulation"
"direction": "UP",
"type": "REGULATION"
},
"4": {
"direction": "down",
"type": "regulation"
"direction": "DOWN",
"type": "REGULATION"
},
"default": {
"direction": "up",
"type": "spinning"
"direction": "UP",
"type": "SPINNING"
}
},
"spin_reserve_file_prefix": "Spin_reserve",
Expand Down
94 changes: 94 additions & 0 deletions src/r2x/defaults/sienna_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,100 @@
"ES",
"FW"
],
"table_data": {
"generator": [
"name",
"available",
"bus_id",
"fuel",
"fuel_price",
"active_power",
"reactive_power",
"active_power_limits_max",
"active_power_limits_min",
"reactive_power_limits_max",
"reactive_power_limits_min",
"min_down_time",
"min_up_time",
"ramp_limits",
"ramp_up",
"ramp_down",
"startup_heat_cold_cost",
"heat_rate_a0",
"heat_rate_a1",
"heat_rate_a2",
"heat_rate_avg_0",
"heat_rate_incr_1",
"heat_rate_incr_2",
"heat_rate_incr_3",
"heat_rate_incr_4",
"heat_rate_incr_5",
"heat_rate_incr_6",
"heat_rate_incr_7",
"heat_rate_incr_8",
"heat_rate_incr_9",
"heat_rate_incr_10",
"heat_rate_incr_11",
"heat_rate_incr_12",
"cost_point_0",
"cost_point_1",
"cost_point_2",
"cost_point_3",
"cost_point_4",
"cost_point_5",
"cost_point_6",
"cost_point_7",
"cost_point_8",
"cost_point_9",
"cost_point_10",
"cost_point_11",
"cost_point_12",
"output_point_0",
"output_point_1",
"output_point_2",
"output_point_3",
"output_point_4",
"output_point_5",
"output_point_6",
"output_point_7",
"output_point_8",
"output_point_9",
"output_point_10",
"output_point_11",
"output_point_12",
"base_mva",
"variable_cost",
"fixed_cost",
"startup_cost",
"shutdown_cost",
"curtailment_cost",
"power_factor",
"unit_type",
"category",
"cold_start_time",
"warm_start_time",
"hot_start_time",
"startup_ramp",
"shutdown_ramp",
"status_at_start",
"time_at_status",
"cold_start_cost",
"warm_start_cost",
"hot_start_cost",
"must_run",
"pump_load",
"pump_active_power_limits_max",
"pump_active_power_limits_min",
"pump_reactive_power_limits_max",
"pump_reactive_power_limits_min",
"pump_min_down_time",
"pump_min_up_time",
"pump_ramp_limits",
"pump_ramp_up",
"pump_ramp_down",
"generator_category"
]
},
"valid_branch_types": [
"Line",
"TapTransformer",
Expand Down
4 changes: 4 additions & 0 deletions src/r2x/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,7 @@ class MultipleFilesError(Exception):

class ParserError(Exception):
pass


class FieldRemovalError(Exception):
pass
Loading