Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[pivotal] Assign a Shortcut Team/Group to Imported Epics & Stories #59

Merged
merged 6 commits into from
Apr 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions pivotal-import/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ In order to run this, you will require a Pivotal account and the ability to sign
1. Run `make import` to perform a dry-run of the import.
- Follow instructions printed to the console to ensure the mapping of Pivotal and Shortcut data is complete and correct.
- Refer to `data/priorities.csv`, `data/states.csv`, and `data/users.csv` to review these mappings.
- Ensure a `group_id` is set in your `config.json` file if you want to assign the epics and stories you import to a Shortcut Team/Group.
1. If the dry-run output looks correct, you can apply the import to your Shortcut workspace by running `make import-apply`
- The console should print a link to an import-specific Shortcut label page that you can review to find all imported Stories and Epics.
- If you run the importer multiple times, you can review all imported Stories and Epics by visiting Settings > Labels and then searching for the `pivotal->shortcut` label and clicking on it.
Expand Down
44 changes: 34 additions & 10 deletions pivotal-import/lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,10 @@ def printerr(s):


# File locations
data_pivotal_export_csv = "data/pivotal_export.csv"
data_priorities_csv = "data/priorities.csv"
data_states_csv = "data/states.csv"
data_users_csv = "data/users.csv"
emails_to_invite = "data/emails_to_invite.csv"
shortcut_custom_fields_csv = "data/shortcut_custom_fields.csv"
shortcut_groups_csv = "data/shortcut_groups.csv"
Expand Down Expand Up @@ -340,11 +344,11 @@ def populate_config():
workflow_id = default_workflow_id()
data = {
"group_id": group_id,
"pt_csv_file": "data/pivotal_export.csv",
"priorities_csv_file": "data/priorities.csv",
"pt_csv_file": data_pivotal_export_csv,
"priorities_csv_file": data_priorities_csv,
"priority_custom_field_id": priority_custom_field_id,
"states_csv_file": "data/states.csv",
"users_csv_file": "data/users.csv",
"states_csv_file": data_states_csv,
"users_csv_file": data_users_csv,
"workflow_id": workflow_id,
}
json.dump(data, f, indent=2)
Expand Down Expand Up @@ -402,25 +406,45 @@ def validate_config(cfg):
" - Your config.json file must be a JSON object, please check its formatting."
)
else:
if "workflow_id" not in cfg or not cfg["workflow_id"]:
if "group_id" not in cfg:
problems.append(
f' - Your config.json file needs a "group_id" entry, which may be set to `null`, or may be set to one of the Teams/Groups listed in {shortcut_groups_csv}'
)
if "priorities_csv_file" not in cfg or not cfg["priorities_csv_file"]:
problems.append(
' - Your config.json file needs a "workflow_id" entry whose value is the ID of the Shortcut Workflow this importer should use.'
f' - Your config.json is missing an expected field "priorities_csv_file" whose default value is {data_priorities_csv}. Add it manually or run `make clean import` to regenerate a valid config.json'
)
if "priority_custom_field_id" not in cfg or not cfg["priority_custom_field_id"]:
problems.append(
f' - Your config.json file needs a "priority_custom_field_id" entry whose value is the ID of the built-in Shortcut Custom Field called "Priority" which you can find in {shortcut_custom_fields_csv}'
)
if "pt_csv_file" not in cfg or not cfg["pt_csv_file"]:
problems.append(
' - Your config.json file needs a "pt_csv_file" entry whose value is the path to your Pivotal Tracker export CSV.'
f' - Your config.json file needs a "pt_csv_file" entry whose default value is {data_pivotal_export_csv}. Add it manually or run `make clean import` to regenerate a valid config.json'
)
if "states_csv_file" not in cfg or not cfg["states_csv_file"]:
problems.append(
f' - Your config.json is missing an expected field "states_csv_file" whose default value is {data_states_csv}. Add it manually or run `make clean import` to regenerate a valid config.json'
)
if "users_csv_file" not in cfg or not cfg["users_csv_file"]:
problems.append(
f' - Your config.json is missing an expected field "users_csv_file" whose default value is {data_users_csv}. Add it manually or run `make clean import` to regenerate a valid config.json'
)
if "workflow_id" not in cfg or not cfg["workflow_id"]:
problems.append(
f' - Your config.json file needs a "workflow_id" entry whose value is the ID of the Shortcut Workflow this importer should use, refer to {shortcut_workflows_csv} to pick one.'
)
if problems:
msg = "\n".join(problems)
printerr(f"Problems:\n{msg}")
sys.exit(1)
else:
return cfg


def load_config():
validate_environment()
cfg = populate_config()
validate_config(cfg)
return cfg
return validate_config(populate_config())


def get_user_info(member):
Expand Down
42 changes: 42 additions & 0 deletions pivotal-import/lib_test.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
from copy import deepcopy
import tempfile

import pytest
from lib import *


Expand Down Expand Up @@ -26,6 +29,45 @@ def test_read_config_from_disk_malformed():
fp.close()


cfg_ok = {
"group_id": None,
"pt_csv_file": data_pivotal_export_csv,
"priorities_csv_file": data_priorities_csv,
"priority_custom_field_id": "123",
"states_csv_file": data_states_csv,
"users_csv_file": data_users_csv,
"workflow_id": "456",
}


def assoc(dict, key, value):
"""Return a copy of `dict` with `key` assigned to `value`"""
d = deepcopy(dict)
d[key] = value


def dissoc(dict, key_to_remove):
"""Return a copy of `dict` with `key_to_remove` absent."""
d = deepcopy(dict)
del d[key_to_remove]
return d


def test_validate_config_ok():
assert cfg_ok == validate_config(cfg_ok)


def test_validate_config_missing_fields():
with pytest.raises(SystemExit):
validate_config({})
for k in cfg_ok.keys():
with pytest.raises(SystemExit):
validate_config(dissoc(cfg_ok, k))
for k in [key for key in cfg_ok.keys() if k != "group_id"]: # group_id may be null
with pytest.raises(SystemExit):
validate_config(assoc(cfg_ok, k, ""))


def test_parse_comment_good():
comment = """Testing comments (Daniel Gregoire - Mar 25, 2024)"""
assert parse_comment(comment) == {
Expand Down
65 changes: 48 additions & 17 deletions pivotal-import/pivotal_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ def parse_priority(priority):
"external_id",
"external_links",
"follower_ids",
"group_id",
"labels",
"name",
"owner_ids",
Expand All @@ -147,6 +148,7 @@ def parse_priority(priority):
"created_at",
"description",
"external_id",
"group_ids",
"labels",
"name",
],
Expand Down Expand Up @@ -203,6 +205,10 @@ def build_entity(ctx, d):
[{"name": PIVOTAL_TO_SHORTCUT_LABEL}, {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}]
)

# The Shortcut Team/Group ID to assign to stories/epics,
# may be None which the REST API interprets correctly.
group_id = ctx["group_id"]

# reconcile entity types
type = "story"
if d["story_type"] == "epic":
Expand All @@ -228,6 +234,8 @@ def build_entity(ctx, d):
d.setdefault("labels", []).append({"name": PIVOTAL_RELEASE_TYPE_LABEL})

if type == "story":
# assign to team/group
d["group_id"] = group_id
# process workflow state
pt_state = d.get("pt_state")
if pt_state:
Expand Down Expand Up @@ -311,7 +319,9 @@ def build_entity(ctx, d):
del d["comments"]

elif type == "epic":
pass
# While Pivotal's model does not have a requester or owners for
# Epics, we can still apply the provided Team/Group assignment.
d["group_ids"] = [group_id] if group_id is not None else []

entity = {k: d[k] for k in select_keys[type] if k in d}
return {"type": type, "entity": entity, "parsed_row": d}
Expand Down Expand Up @@ -375,6 +385,37 @@ def mock_emitter(items):
return mock_emitter


def collect_epic_label_mapping(epics):
"""
Return a dict mapping label names to Shortcut Epic ID.
"""
epic_label_map = {}
for epic in epics:
for label in epic["entity"]["labels"]:
label_name = label["name"]
if (
label_name is not PIVOTAL_TO_SHORTCUT_LABEL
and label_name is not PIVOTAL_TO_SHORTCUT_RUN_LABEL
):
epic_label_map[label_name] = epic["imported_entity"]["id"]
return epic_label_map


def assign_stories_to_epics(stories, epics):
"""
Mutate the `stories` to set an epic_id if that story is assigned to that epic.
"""
epic_label_map = collect_epic_label_mapping(epics)
for story in stories:
for label in story["entity"].get("labels", []):
label_name = label["name"]
epic_id = epic_label_map.get(label_name)
logger.debug(f"story epic id {epic_id}")
if epic_id is not None:
story["entity"]["epic_id"] = epic_id
return stories


class EntityCollector:
"""Collect and process entities for import into Shortcut.

Expand Down Expand Up @@ -422,20 +463,8 @@ def commit(self):
self.epics = self.emitter(self.epics)

print("Finished creating {} epics".format(len(self.epics)))
epic_label_map = {}
for epic in self.epics:
for label in epic["entity"]["labels"]:
label_name = label["name"]
if label_name is not PIVOTAL_TO_SHORTCUT_LABEL:
epic_label_map[label_name] = epic["imported_entity"]["id"]

# update all the stories with the appropriate epic ids
for story in self.stories:
for label in story["entity"].get("labels", []):
label_name = label["name"]
epic_id = epic_label_map.get(label_name)
if epic_id is not None:
story["entity"]["epic_id"] = epic_id

assign_stories_to_epics(self.stories, self.epics)

# create all the stories
self.stories = self.emitter(self.stories)
Expand Down Expand Up @@ -473,21 +502,23 @@ def process_pt_csv_export(ctx, pt_csv_file, entity_collector):

def write_created_entities_csv(created_entities):
with open(shortcut_imported_entities_csv, "w") as f:
writer = csv.DictWriter(f, ["id", "type", "name", "url"])
writer = csv.DictWriter(f, ["id", "type", "name", "epic_id", "url"])
writer.writeheader()
for entity in created_entities:
writer.writerow(
{
"id": entity["id"],
"name": entity["name"],
"type": entity["entity_type"],
"name": entity["name"],
"epic_id": entity["epic_id"] if "epic_id" in entity else None,
"url": entity["app_url"],
}
)


def build_ctx(cfg):
ctx = {
"group_id": cfg["group_id"],
"priority_config": load_priorities(cfg["priorities_csv_file"]),
"priority_custom_field_id": cfg["priority_custom_field_id"],
"user_config": load_users(cfg["users_csv_file"]),
Expand Down
Loading
Loading