diff --git a/pivotal-import/README.md b/pivotal-import/README.md index 5fbb4bb..78b7650 100644 --- a/pivotal-import/README.md +++ b/pivotal-import/README.md @@ -15,6 +15,7 @@ In order to run this, you will require a Pivotal account and the ability to sign 1. Run `make import` to perform a dry-run of the import. - Follow instructions printed to the console to ensure the mapping of Pivotal and Shortcut data is complete and correct. - Refer to `data/priorities.csv`, `data/states.csv`, and `data/users.csv` to review these mappings. + - Ensure a `group_id` is set in your `config.json` file if you want to assign the epics and stories you import to a Shortcut Team/Group. 1. If the dry-run output looks correct, you can apply the import to your Shortcut workspace by running `make import-apply` - The console should print a link to an import-specific Shortcut label page that you can review to find all imported Stories and Epics. - If you run the importer multiple times, you can review all imported Stories and Epics by visiting Settings > Labels and then searching for the `pivotal->shortcut` label and clicking on it. diff --git a/pivotal-import/lib.py b/pivotal-import/lib.py index 36b6bed..c5ad024 100644 --- a/pivotal-import/lib.py +++ b/pivotal-import/lib.py @@ -127,6 +127,10 @@ def printerr(s): # File locations +data_pivotal_export_csv = "data/pivotal_export.csv" +data_priorities_csv = "data/priorities.csv" +data_states_csv = "data/states.csv" +data_users_csv = "data/users.csv" emails_to_invite = "data/emails_to_invite.csv" shortcut_custom_fields_csv = "data/shortcut_custom_fields.csv" shortcut_groups_csv = "data/shortcut_groups.csv" @@ -340,11 +344,11 @@ def populate_config(): workflow_id = default_workflow_id() data = { "group_id": group_id, - "pt_csv_file": "data/pivotal_export.csv", - "priorities_csv_file": "data/priorities.csv", + "pt_csv_file": data_pivotal_export_csv, + "priorities_csv_file": data_priorities_csv, "priority_custom_field_id": priority_custom_field_id, - "states_csv_file": "data/states.csv", - "users_csv_file": "data/users.csv", + "states_csv_file": data_states_csv, + "users_csv_file": data_users_csv, "workflow_id": workflow_id, } json.dump(data, f, indent=2) @@ -402,25 +406,45 @@ def validate_config(cfg): " - Your config.json file must be a JSON object, please check its formatting." ) else: - if "workflow_id" not in cfg or not cfg["workflow_id"]: + if "group_id" not in cfg: + problems.append( + f' - Your config.json file needs a "group_id" entry, which may be set to `null`, or may be set to one of the Teams/Groups listed in {shortcut_groups_csv}' + ) + if "priorities_csv_file" not in cfg or not cfg["priorities_csv_file"]: problems.append( - ' - Your config.json file needs a "workflow_id" entry whose value is the ID of the Shortcut Workflow this importer should use.' + f' - Your config.json is missing an expected field "priorities_csv_file" whose default value is {data_priorities_csv}. Add it manually or run `make clean import` to regenerate a valid config.json' + ) + if "priority_custom_field_id" not in cfg or not cfg["priority_custom_field_id"]: + problems.append( + f' - Your config.json file needs a "priority_custom_field_id" entry whose value is the ID of the built-in Shortcut Custom Field called "Priority" which you can find in {shortcut_custom_fields_csv}' ) if "pt_csv_file" not in cfg or not cfg["pt_csv_file"]: problems.append( - ' - Your config.json file needs a "pt_csv_file" entry whose value is the path to your Pivotal Tracker export CSV.' + f' - Your config.json file needs a "pt_csv_file" entry whose default value is {data_pivotal_export_csv}. Add it manually or run `make clean import` to regenerate a valid config.json' + ) + if "states_csv_file" not in cfg or not cfg["states_csv_file"]: + problems.append( + f' - Your config.json is missing an expected field "states_csv_file" whose default value is {data_states_csv}. Add it manually or run `make clean import` to regenerate a valid config.json' + ) + if "users_csv_file" not in cfg or not cfg["users_csv_file"]: + problems.append( + f' - Your config.json is missing an expected field "users_csv_file" whose default value is {data_users_csv}. Add it manually or run `make clean import` to regenerate a valid config.json' + ) + if "workflow_id" not in cfg or not cfg["workflow_id"]: + problems.append( + f' - Your config.json file needs a "workflow_id" entry whose value is the ID of the Shortcut Workflow this importer should use, refer to {shortcut_workflows_csv} to pick one.' ) if problems: msg = "\n".join(problems) printerr(f"Problems:\n{msg}") sys.exit(1) + else: + return cfg def load_config(): validate_environment() - cfg = populate_config() - validate_config(cfg) - return cfg + return validate_config(populate_config()) def get_user_info(member): diff --git a/pivotal-import/lib_test.py b/pivotal-import/lib_test.py index ac4b694..d2181e8 100644 --- a/pivotal-import/lib_test.py +++ b/pivotal-import/lib_test.py @@ -1,4 +1,7 @@ +from copy import deepcopy import tempfile + +import pytest from lib import * @@ -26,6 +29,45 @@ def test_read_config_from_disk_malformed(): fp.close() +cfg_ok = { + "group_id": None, + "pt_csv_file": data_pivotal_export_csv, + "priorities_csv_file": data_priorities_csv, + "priority_custom_field_id": "123", + "states_csv_file": data_states_csv, + "users_csv_file": data_users_csv, + "workflow_id": "456", +} + + +def assoc(dict, key, value): + """Return a copy of `dict` with `key` assigned to `value`""" + d = deepcopy(dict) + d[key] = value + + +def dissoc(dict, key_to_remove): + """Return a copy of `dict` with `key_to_remove` absent.""" + d = deepcopy(dict) + del d[key_to_remove] + return d + + +def test_validate_config_ok(): + assert cfg_ok == validate_config(cfg_ok) + + +def test_validate_config_missing_fields(): + with pytest.raises(SystemExit): + validate_config({}) + for k in cfg_ok.keys(): + with pytest.raises(SystemExit): + validate_config(dissoc(cfg_ok, k)) + for k in [key for key in cfg_ok.keys() if k != "group_id"]: # group_id may be null + with pytest.raises(SystemExit): + validate_config(assoc(cfg_ok, k, "")) + + def test_parse_comment_good(): comment = """Testing comments (Daniel Gregoire - Mar 25, 2024)""" assert parse_comment(comment) == { diff --git a/pivotal-import/pivotal_import.py b/pivotal-import/pivotal_import.py index da95353..91190d3 100644 --- a/pivotal-import/pivotal_import.py +++ b/pivotal-import/pivotal_import.py @@ -135,6 +135,7 @@ def parse_priority(priority): "external_id", "external_links", "follower_ids", + "group_id", "labels", "name", "owner_ids", @@ -147,6 +148,7 @@ def parse_priority(priority): "created_at", "description", "external_id", + "group_ids", "labels", "name", ], @@ -203,6 +205,10 @@ def build_entity(ctx, d): [{"name": PIVOTAL_TO_SHORTCUT_LABEL}, {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}] ) + # The Shortcut Team/Group ID to assign to stories/epics, + # may be None which the REST API interprets correctly. + group_id = ctx["group_id"] + # reconcile entity types type = "story" if d["story_type"] == "epic": @@ -228,6 +234,8 @@ def build_entity(ctx, d): d.setdefault("labels", []).append({"name": PIVOTAL_RELEASE_TYPE_LABEL}) if type == "story": + # assign to team/group + d["group_id"] = group_id # process workflow state pt_state = d.get("pt_state") if pt_state: @@ -311,7 +319,9 @@ def build_entity(ctx, d): del d["comments"] elif type == "epic": - pass + # While Pivotal's model does not have a requester or owners for + # Epics, we can still apply the provided Team/Group assignment. + d["group_ids"] = [group_id] if group_id is not None else [] entity = {k: d[k] for k in select_keys[type] if k in d} return {"type": type, "entity": entity, "parsed_row": d} @@ -375,6 +385,37 @@ def mock_emitter(items): return mock_emitter +def collect_epic_label_mapping(epics): + """ + Return a dict mapping label names to Shortcut Epic ID. + """ + epic_label_map = {} + for epic in epics: + for label in epic["entity"]["labels"]: + label_name = label["name"] + if ( + label_name is not PIVOTAL_TO_SHORTCUT_LABEL + and label_name is not PIVOTAL_TO_SHORTCUT_RUN_LABEL + ): + epic_label_map[label_name] = epic["imported_entity"]["id"] + return epic_label_map + + +def assign_stories_to_epics(stories, epics): + """ + Mutate the `stories` to set an epic_id if that story is assigned to that epic. + """ + epic_label_map = collect_epic_label_mapping(epics) + for story in stories: + for label in story["entity"].get("labels", []): + label_name = label["name"] + epic_id = epic_label_map.get(label_name) + logger.debug(f"story epic id {epic_id}") + if epic_id is not None: + story["entity"]["epic_id"] = epic_id + return stories + + class EntityCollector: """Collect and process entities for import into Shortcut. @@ -422,20 +463,8 @@ def commit(self): self.epics = self.emitter(self.epics) print("Finished creating {} epics".format(len(self.epics))) - epic_label_map = {} - for epic in self.epics: - for label in epic["entity"]["labels"]: - label_name = label["name"] - if label_name is not PIVOTAL_TO_SHORTCUT_LABEL: - epic_label_map[label_name] = epic["imported_entity"]["id"] - - # update all the stories with the appropriate epic ids - for story in self.stories: - for label in story["entity"].get("labels", []): - label_name = label["name"] - epic_id = epic_label_map.get(label_name) - if epic_id is not None: - story["entity"]["epic_id"] = epic_id + + assign_stories_to_epics(self.stories, self.epics) # create all the stories self.stories = self.emitter(self.stories) @@ -473,14 +502,15 @@ def process_pt_csv_export(ctx, pt_csv_file, entity_collector): def write_created_entities_csv(created_entities): with open(shortcut_imported_entities_csv, "w") as f: - writer = csv.DictWriter(f, ["id", "type", "name", "url"]) + writer = csv.DictWriter(f, ["id", "type", "name", "epic_id", "url"]) writer.writeheader() for entity in created_entities: writer.writerow( { "id": entity["id"], - "name": entity["name"], "type": entity["entity_type"], + "name": entity["name"], + "epic_id": entity["epic_id"] if "epic_id" in entity else None, "url": entity["app_url"], } ) @@ -488,6 +518,7 @@ def write_created_entities_csv(created_entities): def build_ctx(cfg): ctx = { + "group_id": cfg["group_id"], "priority_config": load_priorities(cfg["priorities_csv_file"]), "priority_custom_field_id": cfg["priority_custom_field_id"], "user_config": load_users(cfg["users_csv_file"]), diff --git a/pivotal-import/pivotal_import_test.py b/pivotal-import/pivotal_import_test.py index 8ea791e..d475651 100644 --- a/pivotal-import/pivotal_import_test.py +++ b/pivotal-import/pivotal_import_test.py @@ -3,6 +3,7 @@ def create_test_ctx(): return { + "group_id": "group_123", "priority_config": {"p2 - medium": "priority_medium_123"}, "priority_custom_field_id": "priority_123", "user_config": { @@ -102,6 +103,7 @@ def test_build_story_with_comments(): "type": "story", "entity": { "story_type": "feature", + "group_id": "group_123", "comments": [ {"text": "Comment 1"}, {"text": "Comment 2"}, @@ -147,6 +149,7 @@ def test_build_story_with_reviews(): "type": "story", "entity": { "story_type": "feature", + "group_id": "group_123", "comments": [ { "author_id": None, @@ -173,6 +176,7 @@ def test_build_story_with_reviews(): "entity": { "story_type": "bug", "requested_by_id": "daniel_member_id", + "group_id": "group_123", "comments": [ {"text": "Comment 1"}, {"text": "Comment 2"}, @@ -222,6 +226,7 @@ def test_build_story_priority_mapping(): "type": "story", "entity": { "story_type": "feature", + "group_id": "group_123", "custom_fields": [ { "field_id": "priority_123", @@ -239,6 +244,7 @@ def test_build_story_priority_mapping(): "type": "story", "entity": { "story_type": "bug", + "group_id": "group_123", "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}, @@ -267,6 +273,7 @@ def test_build_story_workflow_mapping(): "type": "story", "entity": { "story_type": "feature", + "group_id": "group_123", "workflow_state_id": ctx["workflow_config"]["unstarted"], "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, @@ -279,6 +286,7 @@ def test_build_story_workflow_mapping(): "type": "story", "entity": { "story_type": "bug", + "group_id": "group_123", "workflow_state_id": ctx["workflow_config"]["started"], "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, @@ -308,6 +316,7 @@ def test_build_story_user_mapping(): "type": "story", "entity": { "story_type": "feature", + "group_id": "group_123", "requested_by_id": ctx["user_config"]["Daniel McFadden"], "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, @@ -320,6 +329,7 @@ def test_build_story_user_mapping(): "type": "story", "entity": { "story_type": "bug", + "group_id": "group_123", "owner_ids": [ ctx["user_config"]["Amy Williams"], ctx["user_config"]["Daniel McFadden"], @@ -334,6 +344,49 @@ def test_build_story_user_mapping(): ] == [build_entity(ctx, d) for d in rows] +def test_build_no_group(): + ctx = create_test_ctx() + ctx["group_id"] = None # field is allowed to be null/None + rows = [ + { + "story_type": "feature", + "requester": "Daniel McFadden", + }, + { + "story_type": "epic", + "name": "An Epic Name", + }, + ] + + assert [ + { + "type": "story", + "entity": { + "story_type": "feature", + "group_id": None, + "requested_by_id": ctx["user_config"]["Daniel McFadden"], + "labels": [ + {"name": PIVOTAL_TO_SHORTCUT_LABEL}, + {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}, + ], + }, + "parsed_row": rows[0], + }, + { + "type": "epic", + "entity": { + "name": "An Epic Name", + "group_ids": [], + "labels": [ + {"name": PIVOTAL_TO_SHORTCUT_LABEL}, + {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}, + ], + }, + "parsed_row": rows[1], + }, + ] == [build_entity(ctx, d) for d in rows] + + def test_build_release(): ctx = create_test_ctx() d = { @@ -346,6 +399,7 @@ def test_build_release(): "entity": { "name": "A Release", "story_type": "chore", + "group_id": "group_123", "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}, @@ -372,6 +426,7 @@ def test_build_epic(): "type": "epic", "entity": { "name": "An Epic Name", + "group_ids": ["group_123"], "labels": [ {"name": PIVOTAL_TO_SHORTCUT_LABEL}, {"name": PIVOTAL_TO_SHORTCUT_RUN_LABEL}, @@ -381,6 +436,44 @@ def test_build_epic(): } == build_entity(ctx, d) +def test_assign_stories_to_epics(): + assert assign_stories_to_epics( + [ + { + "type": "story", + "entity": { + "name": "A Story 1", + # This label is used to determine epic membership of the story; see the epic's labels + "labels": [{"name": "an epic name"}], + }, + }, + # This story is not assigned to an epic, and so should not have an epic_id. + {"type": "story", "entity": {"name": "A Story 2"}}, + ], + [ + { + "type": "epic", + # This label is used to determine epic membership of the story; see the story's labels + "entity": {"id": 1234, "labels": [{"name": "an epic name"}]}, + "imported_entity": {"id": 1234}, + } + ], + ) == [ + { + "type": "story", + "entity": { + "name": "A Story 1", + "epic_id": 1234, + "labels": [{"name": "an epic name"}], + }, + }, + # Note the absence of the epic_id, fixing a bug where we unintentionally assigned + # an epic to every story; bug introduced in commit + # efbb2ddb691c7c91b0f2e3c817cfead663adc5db on 2024-04-08 + {"type": "story", "entity": {"name": "A Story 2"}}, + ] + + def test_entity_collector(): entity_collector = EntityCollector()