Skip to content

Commit

Permalink
Add build folder to pylint
Browse files Browse the repository at this point in the history
Signed-off-by: ted chang <[email protected]>
  • Loading branch information
tedhtchang committed Mar 5, 2024
1 parent 9d789b3 commit 677f28a
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 11 deletions.
4 changes: 3 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,9 @@ disable=raw-checker-failed,
attribute-defined-outside-init,
abstract-method,
pointless-statement,
wrong-import-order
wrong-import-order,
duplicate-code,
unbalanced-tuple-unpacking

# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
Expand Down
26 changes: 18 additions & 8 deletions build/launch_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ def get_highest_checkpoint(dir_path):
for curr_dir in os.listdir(dir_path):
if curr_dir.startswith("checkpoint"):
if checkpoint_dir:
curr_dir_num = int(checkpoint_dir.split("-")[-1])
curr_dir_num = int(checkpoint_dir.rsplit("-", maxsplit=1)[-1])
new_dir_num = int(curr_dir.split("-")[-1])
if new_dir_num > curr_dir_num:
checkpoint_dir = curr_dir
Expand Down Expand Up @@ -87,13 +87,13 @@ def main():
) = parser.parse_json_file(json_path, allow_extra_keys=True)

contents = ""
with open(json_path, "r") as f:
with open(json_path, "r", encoding="utf-8") as f:
contents = json.load(f)
peft_method_parsed = contents.get("peft_method")
logging.debug(f"Input params parsed: {contents}")
logging.debug("Input params parsed: %s", contents)
elif json_env_var:
job_config_dict = txt_to_obj(json_env_var)
logging.debug(f"Input params parsed: {job_config_dict}")
logging.debug("Input params parsed: %s", job_config_dict)

(
model_args,
Expand All @@ -106,7 +106,8 @@ def main():
peft_method_parsed = job_config_dict.get("peft_method")
else:
raise ValueError(
"Must set environment variable 'SFT_TRAINER_CONFIG_JSON_PATH' or 'SFT_TRAINER_CONFIG_JSON_ENV_VAR'."
"Must set environment variable 'SFT_TRAINER_CONFIG_JSON_PATH' \
or 'SFT_TRAINER_CONFIG_JSON_ENV_VAR'."
)

tune_config = None
Expand All @@ -118,7 +119,12 @@ def main():
tune_config = prompt_tuning_config

logging.debug(
f"Parameters used to launch training: model_args {model_args}, data_args {data_args}, training_args {training_args}, tune_config {tune_config}"
"Parameters used to launch training: \
model_args %s, data_args %s, training_args %s, tune_config %s",
model_args,
data_args,
training_args,
tune_config,
)

original_output_dir = training_args.output_dir
Expand All @@ -138,7 +144,9 @@ def main():
)

logging.info(
f"Merging lora tuned checkpoint {lora_checkpoint_dir} with base model into output path: {export_path}"
"Merging lora tuned checkpoint %s with base model into output path: %s",
lora_checkpoint_dir,
export_path,
)

create_merged_model(
Expand All @@ -151,7 +159,9 @@ def main():
# copy last checkpoint into mounted output dir
pt_checkpoint_dir = get_highest_checkpoint(training_args.output_dir)
logging.info(
f"Copying last checkpoint {pt_checkpoint_dir} into output dir {original_output_dir}"
"Copying last checkpoint %s into output dir %s",
pt_checkpoint_dir,
original_output_dir,
)
shutil.copytree(
os.path.join(training_args.output_dir, pt_checkpoint_dir),
Expand Down
2 changes: 1 addition & 1 deletion tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -18,5 +18,5 @@ allowlist_externals = ./scripts/fmt.sh
description = lint with pylint
deps = pylint>=2.16.2,<=3.1.0
-r requirements.txt
commands = pylint tuning scripts/*.py
commands = pylint tuning scripts/*.py build/*.py
allowlist_externals = pylint
2 changes: 1 addition & 1 deletion tuning/sft_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def main(**kwargs): # pylint: disable=unused-argument
choices=["pt", "lora", None, "none"],
default="pt",
)
( # pylint: disable=unbalanced-tuple-unpacking
(
model_args,
data_args,
training_args,
Expand Down

0 comments on commit 677f28a

Please sign in to comment.